monitor_debug_out.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397
  1. #include <linux/proc_fs.h>
  2. #include <linux/sched.h>
  3. #include <linux/kallsyms.h>
  4. #include <linux/utsname.h>
  5. #include <linux/uaccess.h>
  6. #include <linux/module.h>
  7. #include <linux/pid.h>
  8. #include "mt_sched_mon.h"
  9. #include <mt-plat/aee.h>
  10. #include <linux/stacktrace.h>
  11. #include "internal.h"
  12. #ifdef CONFIG_MT_SCHED_MONITOR
  13. #define get_sched_block_events(isr, softirq, tasklet, hrtimer, soft_timer) \
  14. do { \
  15. isr = &per_cpu(ISR_mon, cpu); \
  16. softirq = &per_cpu(SoftIRQ_mon, cpu); \
  17. tasklet = &per_cpu(tasklet_mon, cpu); \
  18. hrtimer = &per_cpu(hrt_mon, cpu); \
  19. soft_timer = &per_cpu(sft_mon, cpu); \
  20. } while (0)
  21. #define get_sched_stop_events(preempt_disable_event, irq_disable_event) \
  22. do { \
  23. preempt_disable_event = &per_cpu(Preempt_disable_mon, cpu); \
  24. irq_disable_event = &per_cpu(IRQ_disable_mon, cpu); \
  25. } while (0)
  26. #include <linux/irqnr.h>
  27. #include <linux/kernel_stat.h>
  28. #include <asm/hardirq.h>
  29. void mt_show_last_irq_counts(void)
  30. {
  31. int irq, cpu;
  32. unsigned long flags;
  33. spin_lock_irqsave(&mt_irq_count_lock, flags);
  34. for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
  35. pr_err("Last irq counts record at [%llu] ns\n", per_cpu(save_irq_count_time, cpu));
  36. pr_err("CPU%d state:%s\n", cpu, cpu_online(cpu) ? "online" : "offline");
  37. for (irq = 0; irq < nr_irqs && irq < MAX_NR_IRQS; irq++) {
  38. if (per_cpu(irq_count_mon, cpu).irqs[irq] != 0)
  39. pr_err("[%3d] = %8d\n", irq, per_cpu(irq_count_mon, cpu).irqs[irq]);
  40. }
  41. }
  42. #ifdef CONFIG_SMP
  43. for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
  44. for (irq = 0; irq < NR_IPI; irq++) {
  45. if (per_cpu(ipi_count_mon, cpu).ipis[irq] != 0)
  46. pr_err("(CPU#%d)IPI[%3d] = %8d\n", cpu, irq,
  47. per_cpu(ipi_count_mon, cpu).ipis[irq]);
  48. }
  49. }
  50. #endif
  51. spin_unlock_irqrestore(&mt_irq_count_lock, flags);
  52. }
  53. void mt_show_current_irq_counts(void)
  54. {
  55. int irq, cpu, count;
  56. unsigned long flags;
  57. unsigned long long t_cur, t_diff = 0;
  58. t_cur = sched_clock();
  59. spin_lock_irqsave(&mt_irq_count_lock, flags);
  60. pr_err("=========================================\nIRQ Status:\n");
  61. for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
  62. t_diff = t_cur - per_cpu(save_irq_count_time, cpu);
  63. pr_err("Current irq counts record at [%llu] ns.(last at %llu, diff:+%llu ns)\n", t_cur,
  64. per_cpu(save_irq_count_time, cpu), usec_high(t_diff));
  65. pr_err("CPU%d state:%s\n", cpu, cpu_online(cpu) ? "online" : "offline");
  66. for (irq = 0; irq < nr_irqs && irq < MAX_NR_IRQS; irq++) {
  67. count = kstat_irqs_cpu(irq, cpu);
  68. if (count != 0)
  69. pr_err(" IRQ[%3d:%14s] = %8d, (+%d times in %lld us)\n", irq,
  70. isr_name(irq), count, count - per_cpu(irq_count_mon, cpu).irqs[irq],
  71. usec_high(t_diff));
  72. }
  73. }
  74. #ifdef CONFIG_SMP
  75. for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
  76. pr_err("Local IRQ on CPU#%d:\n", cpu);
  77. for (irq = 0; irq < NR_IPI; irq++) {
  78. count = __get_irq_stat(cpu, ipi_irqs[irq]);
  79. if (count != 0)
  80. pr_err(" IRQ[%2d: IPI] = %8d,(+%d times in %lld us)\n", irq, count,
  81. count - per_cpu(ipi_count_mon, cpu).ipis[irq], usec_high(t_diff));
  82. }
  83. }
  84. #endif
  85. spin_unlock_irqrestore(&mt_irq_count_lock, flags);
  86. }
  87. void mt_show_timer_info(void)
  88. {
  89. int cpu;
  90. for_each_possible_cpu(cpu) {
  91. pr_err("[TimerISR#%d] last timer ISR start:%llu ns, end:%llu ns\n", cpu,
  92. per_cpu(local_timer_ts, cpu), per_cpu(local_timer_te, cpu));
  93. }
  94. }
  95. /* for MTK fiq debug log mechanism*/
  96. static void mt_aee_show_current_irq_counts(void)
  97. {
  98. int irq, cpu, count;
  99. unsigned long long t_cur, t_diff;
  100. t_cur = sched_clock();
  101. /* spin_lock_irqsave(&mt_irq_count_lock, flags); */
  102. aee_wdt_printf("\nIRQ Status\n");
  103. for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
  104. t_diff = t_cur - per_cpu(save_irq_count_time, cpu);
  105. aee_wdt_printf("Dur:%lld us,(now:%lld,last:%lld)\n", usec_high(t_diff), usec_high(t_cur),
  106. usec_high(per_cpu(save_irq_count_time, cpu)));
  107. aee_wdt_printf("CPU%d state:%s\n", cpu, cpu_online(cpu) ? "online" : "offline");
  108. for (irq = 0; irq < nr_irqs && irq < MAX_NR_IRQS; irq++) {
  109. count = kstat_irqs_cpu(irq, cpu);
  110. if (count != 0)
  111. aee_wdt_printf(" %d:%s +%d(%d)\n", irq, isr_name(irq),
  112. count - per_cpu(irq_count_mon, cpu).irqs[irq], count);
  113. }
  114. }
  115. #ifdef CONFIG_SMP
  116. for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
  117. aee_wdt_printf("CPU#%d:\n", cpu);
  118. for (irq = 0; irq < NR_IPI; irq++) {
  119. count = __get_irq_stat(cpu, ipi_irqs[irq]);
  120. if (count != 0)
  121. aee_wdt_printf(" %d:IPI +%d(%d)\n", irq,
  122. count - per_cpu(ipi_count_mon, cpu).ipis[irq], count);
  123. }
  124. }
  125. #endif
  126. /* spin_unlock_irqrestore(&mt_irq_count_lock, flags); */
  127. }
  128. static void mt_aee_show_timer_info(void)
  129. {
  130. int cpu;
  131. for_each_possible_cpu(cpu) {
  132. aee_wdt_printf("[TimerISR#%d]last s:%llu e:%llu ns\n", cpu,
  133. per_cpu(local_timer_ts, cpu), per_cpu(local_timer_te, cpu));
  134. }
  135. }
  136. void mt_dump_sched_traces(void)
  137. {
  138. struct sched_block_event *b_isr, *b_sq, *b_tk, *b_hrt, *b_sft;
  139. struct sched_stop_event *e_irq, *e_pmpt;
  140. int cpu;
  141. for_each_possible_cpu(cpu) {
  142. get_sched_block_events(b_isr, b_sq, b_tk, b_hrt, b_sft);
  143. get_sched_stop_events(e_pmpt, e_irq);
  144. pr_err("==CPU[%d]==\n", cpu);
  145. if (b_isr->cur_event == 0)
  146. pr_err("[ISR]last_irq:%d, dur:%llu ns (s:%llu, e:%llu)\n\n",
  147. (int)b_isr->last_event, b_isr->last_te - b_isr->last_ts,
  148. b_isr->last_ts, b_isr->last_te);
  149. else {
  150. pr_err("[In ISR]Current irq:%d, Start:%llu (elapsed: %llu ns),",
  151. (int)b_isr->cur_event, b_isr->cur_ts,
  152. sched_clock() - b_isr->cur_ts);
  153. pr_err(" last irq#:%d, last_start:%llu, last_end:%llu\n\n",
  154. (int)b_isr->last_event, b_isr->last_ts, b_isr->last_te);
  155. }
  156. if (b_sq->cur_event == 0)
  157. pr_err("[Softirq]last#:%d, dur:%llu ns (s:%llu, e:%llu)\n\n",
  158. (int)b_sq->last_event, b_sq->last_te - b_sq->last_ts,
  159. b_sq->last_ts, b_sq->last_te);
  160. else {
  161. pr_err("[In Softirq]Current softirq#:%d, Start:%llu(elapsed: %llu ns),",
  162. (int)b_sq->cur_event, b_sq->cur_ts,
  163. sched_clock() - b_sq->cur_ts);
  164. pr_err(" last softirq#:%d(dur:%llu ns), last_start:%llu, last_end:%llu\n\n",
  165. (int)b_sq->last_event, b_sq->last_te - b_sq->last_ts,
  166. b_sq->last_ts, b_sq->last_te);
  167. }
  168. if (b_tk->cur_event == 0) {
  169. pr_err("[Tasklet]\n Occurs %d times in last SoftIRQ duration\n",
  170. (int)b_tk->last_count);
  171. pr_err("last fn:%pS, dur:%llu ns (s:%llu, e:%llu)\n\n",
  172. (void *)b_tk->last_event, b_tk->last_te - b_tk->last_ts,
  173. b_tk->last_ts, b_tk->last_te);
  174. } else {
  175. pr_err("[In Tasklet]\n Occurs: cur:%d, last:%d\n Current:%pS,",
  176. (int)b_tk->cur_count, (int)b_tk->last_count,
  177. (void *)b_tk->cur_event);
  178. pr_err(" Start:%llu(elapsed: %llu ns), last#:%pS(dur:%llu ns),",
  179. b_tk->cur_ts, sched_clock() - b_tk->cur_ts,
  180. (void *)b_tk->last_event, b_tk->last_te - b_tk->last_ts);
  181. pr_err(" last_start:%llu, last_end:%llu\n\n",
  182. b_tk->last_ts, b_tk->last_te);
  183. }
  184. if (b_hrt->cur_event == 0) {
  185. pr_err("[HRTimer]\n Occurs %d times in last ISR duration\n",
  186. (int)b_hrt->last_count);
  187. pr_err(" last fn:%pS, dur:%llu ns (s:%llu, e:%llu)\n\n",
  188. (void *)b_hrt->last_event,
  189. b_hrt->last_te - b_hrt->last_ts,
  190. b_hrt->last_ts, b_hrt->last_te);
  191. } else {
  192. pr_err("[In HRTimer]\n Occurs: cur:%d, last:%d\n Current:%pS, ",
  193. (int)b_tk->cur_count, (int)b_tk->last_count,
  194. (void *)b_hrt->cur_event);
  195. pr_err("Start:%llu(elapsed: %llu ns), last#:%pS(dur:%llu ns), ",
  196. b_hrt->cur_ts, sched_clock() - b_hrt->cur_ts,
  197. (void *)b_hrt->last_event, b_hrt->last_te - b_hrt->last_ts);
  198. pr_err("last_start:%llu, last_end:%llu\n\n",
  199. b_hrt->last_ts, b_hrt->last_te);
  200. }
  201. if (b_sft->cur_event == 0) {
  202. pr_err("[SoftTimer]\n Occurs %d times in last SoftIRQ duration\n",
  203. (int)b_sft->last_count);
  204. pr_err(" last fn:%pS, dur:%llu ns (s:%llu, e:%llu)\n\n",
  205. (void *)b_sft->last_event,
  206. b_sft->last_te - b_sft->last_ts,
  207. b_sft->last_ts, b_sft->last_te);
  208. } else {
  209. pr_err("[In SoftTimer]\n Occurs: cur:%d, last:%d\n Current:%pS,",
  210. (int)b_sft->cur_count, (int)b_sft->last_count,
  211. (void *)b_sft->cur_event);
  212. pr_err(" Start:%llu(elapsed: %llu ns), last#:%pS(dur:%llu ns), ",
  213. b_sft->cur_ts, sched_clock() - b_sft->cur_ts,
  214. (void *)b_sft->last_event, b_sft->last_te - b_sft->last_ts);
  215. pr_err("last_start:%llu, last_end:%llu\n\n",
  216. b_sft->last_ts, b_sft->last_te);
  217. }
  218. /**** Dump Stop Events ****/
  219. if (e_irq->cur_ts == 0)
  220. pr_err("[IRQ disable] last duration:%llu ns (s: %llu, e: %llu)\n\n",
  221. e_irq->last_te - e_irq->last_ts, e_irq->last_ts, e_irq->last_te);
  222. else
  223. pr_err
  224. ("[IRQ disable] cur_ts:%llu(elapsed:%llu ns), last duration:%llu ns(s: %llu, e: %llu)\n\n",
  225. e_irq->cur_ts, sched_clock() - e_irq->cur_ts,
  226. e_irq->last_te - e_irq->last_ts,
  227. e_irq->last_ts, e_irq->last_te);
  228. if (e_pmpt->cur_ts == 0)
  229. pr_err("[Preempt disable] last duration:%llu ns(s: %llu, e: %llu)\n\n",
  230. e_pmpt->last_te - e_pmpt->last_ts,
  231. e_pmpt->last_ts, e_pmpt->last_te);
  232. else
  233. pr_err
  234. ("[Preempt disable] cur_ts:%llu(elapsed:%llu ns), last duration:%llu ns(s: %llu, e: %llu)\n\n",
  235. e_pmpt->cur_ts, sched_clock() - e_pmpt->cur_ts,
  236. e_pmpt->last_te - e_pmpt->last_ts, e_pmpt->last_ts, e_pmpt->last_te);
  237. }
  238. mt_show_current_irq_counts();
  239. mt_show_timer_info();
  240. }
  241. void mt_aee_dump_sched_traces(void)
  242. {
  243. struct sched_block_event *b_isr, *b_sq, *b_tk, *b_hrt, *b_sft;
  244. struct sched_stop_event *e_irq, *e_pmpt;
  245. int cpu;
  246. for_each_possible_cpu(cpu) {
  247. get_sched_block_events(b_isr, b_sq, b_tk, b_hrt, b_sft);
  248. get_sched_stop_events(e_pmpt, e_irq);
  249. aee_wdt_printf("CPU%d\n", cpu);
  250. b_isr->cur_event == 0 ?
  251. aee_wdt_printf("[ISR]last#%d,dur:%lld s:%lld\n\n",
  252. (int)b_isr->last_event,
  253. usec_high(b_isr->last_te - b_isr->last_ts),
  254. usec_high(b_isr->
  255. last_ts)) :
  256. aee_wdt_printf
  257. ("[In ISR]Current irq#:%d, Start:%lld (elapsed: %lld), last irq#:%d, s:%lld, e:%lld\n\n",
  258. (int)b_isr->cur_event, usec_high(b_isr->cur_ts),
  259. usec_high(sched_clock() - b_isr->cur_ts), (int)b_isr->last_event,
  260. usec_high(b_isr->last_ts), usec_high(b_isr->last_te));
  261. if (b_sq->cur_event == 0)
  262. aee_wdt_printf("[Softirq]last#%d,dur:%lld s:%lld\n\n",
  263. (int)b_sq->last_event, usec_high(b_sq->last_te - b_sq->last_ts),
  264. usec_high(b_sq->last_ts));
  265. else {
  266. aee_wdt_printf("[In Softirq]Current softirq#:%d, Start:%lld(elapsed: %lld),",
  267. (int)b_sq->cur_event, usec_high(b_sq->cur_ts),
  268. usec_high(sched_clock() - b_sq->cur_ts));
  269. aee_wdt_printf(" last softirq#:%d(dur:%lld), s:%lld, e:%lld\n\n",
  270. (int)b_sq->last_event,
  271. usec_high(b_sq->last_te - b_sq->last_ts),
  272. usec_high(b_sq->last_ts), usec_high(b_sq->last_te));
  273. }
  274. if (b_tk->cur_event == 0)
  275. aee_wdt_printf("[Tasklet]%d/SoftIRQ\n %pS dur:%lld s:%lld\n\n",
  276. (int)b_tk->last_count, (void *)b_tk->last_event,
  277. usec_high(b_tk->last_te - b_tk->last_ts),
  278. usec_high(b_tk->last_ts));
  279. else {
  280. aee_wdt_printf(
  281. "[In Tasklet]\n Occurs: cur:%d, last:%d\n Current:%pS, Start:%lld(elapsed: %lld),",
  282. (int)b_tk->cur_count, (int)b_tk->last_count, (void *)b_tk->cur_event,
  283. usec_high(b_tk->cur_ts), usec_high(sched_clock() - b_tk->cur_ts));
  284. aee_wdt_printf(" last#:%pS(dur:%lld), last_start:%lld, last_end:%lld\n\n",
  285. (void *)b_tk->last_event, usec_high(b_tk->last_te - b_tk->last_ts),
  286. usec_high(b_tk->last_ts), usec_high(b_tk->last_te));
  287. }
  288. if (b_hrt->cur_event == 0)
  289. aee_wdt_printf("[HRTimer]%d/ISR\n %pS dur:%lld s:%lld\n\n",
  290. (int)b_hrt->last_count, (void *)b_hrt->last_event,
  291. usec_high(b_hrt->last_te - b_hrt->last_ts),
  292. usec_high(b_hrt->last_ts));
  293. else {
  294. aee_wdt_printf
  295. ("[In HRTimer]\n Occurs: cur:%d, last:%d\n Current:%pS, Start:%lld(elapsed: %lld),",
  296. (int)b_tk->cur_count, (int)b_tk->last_count, (void *)b_hrt->cur_event,
  297. usec_high(b_hrt->cur_ts), usec_high(sched_clock() - b_hrt->cur_ts));
  298. aee_wdt_printf(" last#:%pS(dur:%lld), last_start:%lld, last_end:%lld\n\n",
  299. (void *)b_hrt->last_event, usec_high(b_hrt->last_te - b_hrt->last_ts),
  300. usec_high(b_hrt->last_ts), usec_high(b_hrt->last_te));
  301. }
  302. if (b_sft->cur_event == 0)
  303. aee_wdt_printf("[SoftTimer]%d/SoftIRQ\n %pS dur:%lld s:%lld\n\n",
  304. (int)b_sft->last_count, (void *)b_sft->last_event,
  305. usec_high(b_sft->last_te - b_sft->last_ts),
  306. usec_high(b_sft->last_ts));
  307. else {
  308. aee_wdt_printf
  309. ("[In SoftTimer]\n Occurs: cur:%d, last:%d\n Current:%pS, Start:%lld(elapsed: %lld),",
  310. (int)b_sft->cur_count, (int)b_sft->last_count, (void *)b_sft->cur_event,
  311. usec_high(b_sft->cur_ts), usec_high(sched_clock() - b_sft->cur_ts));
  312. aee_wdt_printf(" last#:%pS(dur:%lld), last_start:%lld, last_end:%lld\n\n",
  313. (void *)b_sft->last_event, usec_high(b_sft->last_te - b_sft->last_ts),
  314. usec_high(b_sft->last_ts), usec_high(b_sft->last_te));
  315. }
  316. /**** Dump Stop Events ****/
  317. /*
  318. * e_irq->cur_ts == 0?
  319. aee_wdt_printf("[IRQ disable] last duration:%llu ns (s: %llu, e: %llu)\n\n",
  320. e_irq->last_te - e_irq->last_ts, e_irq->last_ts, e_irq->last_te):
  321. aee_wdt_printf("[IRQ disable] cur_ts:%llu(elapsed:%llu ns), "
  322. "last duration:%llu ns(s: %llu, e: %llu)\n\n",
  323. e_irq->cur_ts, sched_clock() - e_irq->cur_ts, e_irq->last_te - e_irq->last_ts,"
  324. " e_irq->last_ts, e_irq->last_te);
  325. e_pmpt->cur_ts == 0?
  326. aee_wdt_printf("[Preempt disable] last duration:%llu ns(s: %llu, e: %llu)\n\n",
  327. e_pmpt->last_te - e_pmpt->last_ts, e_pmpt->last_ts, e_pmpt->last_te):
  328. aee_wdt_printf("[Preempt disable] cur_ts:%llu(elapsed:%llu ns), "
  329. "last duration:%llu ns(s: %llu, e: %llu)\n\n",
  330. e_pmpt->cur_ts, sched_clock() - e_pmpt->cur_ts, e_pmpt->last_te - e_pmpt->last_ts,"
  331. " e_pmpt->last_ts, e_pmpt->last_te);
  332. */
  333. }
  334. mt_aee_show_current_irq_counts();
  335. mt_aee_show_timer_info();
  336. }
  337. #else
  338. void mt_dump_sched_traces(void)
  339. {
  340. }
  341. void mt_aee_dump_sched_traces(void)
  342. {
  343. }
  344. void mt_show_last_irq_counts(void)
  345. {
  346. }
  347. void mt_show_current_irq_counts(void)
  348. {
  349. }
  350. void mt_show_timer_info(void)
  351. {
  352. }
  353. #endif