aed-process.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. #include <linux/cpu.h>
  2. #include <linux/cpumask.h>
  3. #include <linux/ptrace.h>
  4. #include <linux/ratelimit.h>
  5. #include <linux/sched.h>
  6. #include <linux/smp.h>
  7. #include <linux/spinlock.h>
  8. #include <linux/linkage.h>
  9. #include <linux/atomic.h>
  10. #include <linux/module.h>
  11. #include <linux/uaccess.h>
  12. #include <linux/mm.h>
  13. #include <linux/stacktrace.h>
  14. #include <asm/memory.h>
  15. #include <asm/stacktrace.h>
  16. #include <asm/traps.h>
  17. #include <linux/semaphore.h>
  18. #include <linux/delay.h>
  19. #include <mrdump.h>
  20. #include "aed.h"
  21. struct bt_sync {
  22. atomic_t cpus_report;
  23. atomic_t cpus_lock;
  24. };
  25. static void per_cpu_get_bt(void *info)
  26. {
  27. int timeout_max = 500000;
  28. struct bt_sync *s = (struct bt_sync *)info;
  29. if (atomic_read(&s->cpus_lock) == 0)
  30. return;
  31. atomic_dec(&s->cpus_report);
  32. while (atomic_read(&s->cpus_lock) == 1) {
  33. if (timeout_max-- > 0)
  34. udelay(1);
  35. else
  36. break;
  37. }
  38. atomic_dec(&s->cpus_report);
  39. }
  40. static int aed_save_trace(struct stackframe *frame, void *d)
  41. {
  42. struct aee_process_bt *trace = d;
  43. unsigned int id = trace->nr_entries;
  44. /* use static var, not support concurrency */
  45. static unsigned long stack;
  46. int ret = 0;
  47. if (id >= AEE_NR_FRAME)
  48. return -1;
  49. if (id == 0)
  50. stack = frame->sp;
  51. if (frame->fp < stack || frame->fp > ALIGN(stack, THREAD_SIZE))
  52. ret = -1;
  53. #if 0
  54. if (ret == 0 && in_exception_text(addr)) {
  55. #ifdef __aarch64__
  56. excp_regs = (void *)(frame->fp + 0x10);
  57. frame->pc = excp_regs->reg_pc - 4;
  58. #else
  59. excp_regs = (void *)(frame->fp + 4);
  60. frame->pc = excp_regs->reg_pc;
  61. frame->lr = excp_regs->reg_lr;
  62. #endif
  63. frame->sp = excp_regs->reg_sp;
  64. frame->fp = excp_regs->reg_fp;
  65. }
  66. #endif
  67. trace->entries[id].pc = frame->pc;
  68. snprintf(trace->entries[id].pc_symbol, AEE_SZ_SYMBOL_S, "%pS", (void *)frame->pc);
  69. #ifndef __aarch64__
  70. trace->entries[id].lr = frame->lr;
  71. snprintf(trace->entries[id].lr_symbol, AEE_SZ_SYMBOL_L, "%pS", (void *)frame->lr);
  72. #endif
  73. ++trace->nr_entries;
  74. return ret;
  75. }
  76. static void aed_get_bt(struct task_struct *tsk, struct aee_process_bt *bt)
  77. {
  78. struct stackframe frame;
  79. unsigned long stack_address;
  80. static struct aee_bt_frame aed_backtrace_buffer[AEE_NR_FRAME];
  81. bt->nr_entries = 0;
  82. bt->entries = aed_backtrace_buffer;
  83. memset(&frame, 0, sizeof(struct stackframe));
  84. if (tsk != current) {
  85. frame.fp = thread_saved_fp(tsk);
  86. frame.sp = thread_saved_sp(tsk);
  87. #ifdef __aarch64__
  88. frame.pc = thread_saved_pc(tsk);
  89. #else
  90. frame.lr = thread_saved_pc(tsk);
  91. frame.pc = 0xffffffff;
  92. #endif
  93. } else {
  94. register unsigned long current_sp asm("sp");
  95. frame.fp = (unsigned long)__builtin_frame_address(0);
  96. frame.sp = current_sp;
  97. #ifdef __aarch64__
  98. frame.pc = (unsigned long)__builtin_return_address(0);
  99. #else
  100. frame.lr = (unsigned long)__builtin_return_address(0);
  101. frame.pc = (unsigned long)aed_get_bt;
  102. #endif
  103. }
  104. stack_address = ALIGN(frame.sp, THREAD_SIZE);
  105. if ((stack_address >= (PAGE_OFFSET + THREAD_SIZE)) && virt_addr_valid(stack_address))
  106. walk_stackframe(&frame, aed_save_trace, bt);
  107. else
  108. LOGD("%s: Invalid sp value %lx\n", __func__, frame.sp);
  109. }
  110. static DEFINE_SEMAPHORE(process_bt_sem);
  111. int aed_get_process_bt(struct aee_process_bt *bt)
  112. {
  113. int nr_cpus, err;
  114. struct bt_sync s;
  115. struct task_struct *task;
  116. int timeout_max = 500000;
  117. if (down_interruptible(&process_bt_sem) < 0)
  118. return -ERESTARTSYS;
  119. err = 0;
  120. if (bt->pid > 0) {
  121. task = find_task_by_vpid(bt->pid);
  122. if (task == NULL) {
  123. err = -EINVAL;
  124. goto exit;
  125. }
  126. } else {
  127. err = -EINVAL;
  128. goto exit;
  129. }
  130. err = mutex_lock_killable(&task->signal->cred_guard_mutex);
  131. if (err)
  132. goto exit;
  133. if (!ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
  134. mutex_unlock(&task->signal->cred_guard_mutex);
  135. err = -EPERM;
  136. goto exit;
  137. }
  138. mutex_unlock(&task->signal->cred_guard_mutex);
  139. get_online_cpus();
  140. preempt_disable();
  141. nr_cpus = num_online_cpus();
  142. atomic_set(&s.cpus_report, nr_cpus - 1);
  143. atomic_set(&s.cpus_lock, 1);
  144. smp_call_function(per_cpu_get_bt, &s, 0);
  145. while (atomic_read(&s.cpus_report) != 0) {
  146. if (timeout_max-- > 0)
  147. udelay(1);
  148. else
  149. break;
  150. }
  151. aed_get_bt(task, bt);
  152. atomic_set(&s.cpus_report, nr_cpus - 1);
  153. atomic_set(&s.cpus_lock, 0);
  154. timeout_max = 500000;
  155. while (atomic_read(&s.cpus_report) != 0) {
  156. if (timeout_max-- > 0)
  157. udelay(1);
  158. else
  159. break;
  160. }
  161. preempt_enable();
  162. put_online_cpus();
  163. exit:
  164. up(&process_bt_sem);
  165. return err;
  166. }