ccci_private_log.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. #include <linux/slab.h>
  2. #include <linux/proc_fs.h>
  3. #include <linux/spinlock.h>
  4. #include <linux/smp.h>
  5. #include <linux/poll.h>
  6. #include <linux/uaccess.h>
  7. #include <linux/wait.h>
  8. #include <linux/sched.h>
  9. #include <linux/skbuff.h>
  10. #include <linux/timer.h>
  11. #include <linux/types.h>
  12. #include <linux/ktime.h>
  13. #include <mt-plat/mt_ccci_common.h>
  14. #include "ccci_util_log.h"
  15. #define CCCI_LOG_BUF_SIZE 4096 /* must be power of 2 */
  16. #define CCCI_LOG_MAX_WRITE 512
  17. /*extern u64 local_clock(void); */
  18. struct ccci_ring_buffer {
  19. void *buffer;
  20. unsigned int size;
  21. unsigned int read_pos;
  22. unsigned int write_pos;
  23. atomic_t last_ops; /* 0 for write; 1 for read */
  24. atomic_t reader_cnt;
  25. wait_queue_head_t log_wq;
  26. spinlock_t write_lock;
  27. };
  28. struct ccci_ring_buffer ccci_log_buf;
  29. int ccci_log_write(const char *fmt, ...)
  30. {
  31. va_list args;
  32. int write_len, first_half;
  33. unsigned long flags;
  34. char temp_log[CCCI_LOG_MAX_WRITE];
  35. int this_cpu;
  36. char state = irqs_disabled() ? '-' : ' ';
  37. u64 ts_nsec = local_clock();
  38. unsigned long rem_nsec = do_div(ts_nsec, 1000000000);
  39. if (unlikely(ccci_log_buf.buffer == NULL))
  40. return -ENODEV;
  41. preempt_disable();
  42. this_cpu = smp_processor_id();
  43. preempt_enable();
  44. write_len = snprintf(temp_log, sizeof(temp_log), "[%5lu.%06lu]%c(%x)[%d:%s]",
  45. (unsigned long)ts_nsec, rem_nsec / 1000, state, this_cpu, current->pid, current->comm);
  46. va_start(args, fmt);
  47. write_len += vsnprintf(temp_log + write_len, sizeof(temp_log) - write_len, fmt, args);
  48. va_end(args);
  49. spin_lock_irqsave(&ccci_log_buf.write_lock, flags);
  50. if (ccci_log_buf.write_pos + write_len > CCCI_LOG_BUF_SIZE) {
  51. first_half = CCCI_LOG_BUF_SIZE - ccci_log_buf.write_pos;
  52. memcpy(ccci_log_buf.buffer + ccci_log_buf.write_pos, temp_log, first_half);
  53. memcpy(ccci_log_buf.buffer, temp_log + first_half, write_len - first_half);
  54. } else {
  55. memcpy(ccci_log_buf.buffer + ccci_log_buf.write_pos, temp_log, write_len);
  56. }
  57. ccci_log_buf.write_pos = (ccci_log_buf.write_pos + write_len) & (CCCI_LOG_BUF_SIZE - 1);
  58. atomic_set(&ccci_log_buf.last_ops, 0);
  59. spin_unlock_irqrestore(&ccci_log_buf.write_lock, flags);
  60. wake_up_all(&ccci_log_buf.log_wq);
  61. return write_len;
  62. }
  63. EXPORT_SYMBOL(ccci_log_write);
  64. static ssize_t ccci_log_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
  65. {
  66. unsigned int available, read_len, first_half;
  67. unsigned long flags;
  68. int ret;
  69. retry:
  70. spin_lock_irqsave(&ccci_log_buf.write_lock, flags);
  71. available = (ccci_log_buf.write_pos - ccci_log_buf.read_pos) & (CCCI_LOG_BUF_SIZE - 1);
  72. if (available == 0 && !atomic_read(&ccci_log_buf.last_ops))
  73. available = CCCI_LOG_BUF_SIZE;
  74. if (!available) {
  75. spin_unlock_irqrestore(&ccci_log_buf.write_lock, flags);
  76. if (!(file->f_flags & O_NONBLOCK)) {
  77. ret = wait_event_interruptible(ccci_log_buf.log_wq, !atomic_read(&ccci_log_buf.last_ops));
  78. if (ret == -ERESTARTSYS)
  79. return -EINTR;
  80. else
  81. goto retry;
  82. } else {
  83. return -EAGAIN;
  84. }
  85. }
  86. read_len = size < available ? size : available;
  87. if (ccci_log_buf.read_pos + read_len > CCCI_LOG_BUF_SIZE) {
  88. first_half = CCCI_LOG_BUF_SIZE - ccci_log_buf.read_pos;
  89. ret = copy_to_user(buf, ccci_log_buf.buffer + ccci_log_buf.read_pos, first_half);
  90. ret = copy_to_user(buf + first_half, ccci_log_buf.buffer, read_len - first_half);
  91. } else {
  92. ret = copy_to_user(buf, ccci_log_buf.buffer + ccci_log_buf.read_pos, read_len);
  93. }
  94. ccci_log_buf.read_pos = (ccci_log_buf.read_pos + read_len) & (CCCI_LOG_BUF_SIZE - 1);
  95. atomic_set(&ccci_log_buf.last_ops, 1);
  96. spin_unlock_irqrestore(&ccci_log_buf.write_lock, flags);
  97. return read_len;
  98. }
  99. unsigned int ccci_log_poll(struct file *fp, struct poll_table_struct *poll)
  100. {
  101. unsigned int mask = 0;
  102. poll_wait(fp, &ccci_log_buf.log_wq, poll);
  103. if (!atomic_read(&ccci_log_buf.last_ops))
  104. mask |= POLLIN | POLLRDNORM;
  105. return mask;
  106. }
  107. static int ccci_log_open(struct inode *inode, struct file *file)
  108. {
  109. if (atomic_read(&ccci_log_buf.reader_cnt))
  110. return -EBUSY;
  111. atomic_inc(&ccci_log_buf.reader_cnt);
  112. return 0;
  113. }
  114. static int ccci_log_close(struct inode *inode, struct file *file)
  115. {
  116. atomic_dec(&ccci_log_buf.reader_cnt);
  117. return 0;
  118. }
  119. static const struct file_operations ccci_log_fops = {
  120. .read = ccci_log_read,
  121. .open = ccci_log_open,
  122. .release = ccci_log_close,
  123. .poll = ccci_log_poll,
  124. };
  125. void ccci_log_init(void)
  126. {
  127. struct proc_dir_entry *ccci_log_proc;
  128. ccci_log_proc = proc_create("ccci_log", 0664, NULL, &ccci_log_fops);
  129. if (ccci_log_proc == NULL) {
  130. CCCI_UTIL_INF_MSG("fail to create proc entry for log\n");
  131. return;
  132. }
  133. ccci_log_buf.buffer = kmalloc(CCCI_LOG_BUF_SIZE, GFP_KERNEL);
  134. spin_lock_init(&ccci_log_buf.write_lock);
  135. init_waitqueue_head(&ccci_log_buf.log_wq);
  136. atomic_set(&ccci_log_buf.last_ops, 1);
  137. atomic_set(&ccci_log_buf.reader_cnt, 0);
  138. }