wdt-handler.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. #include <linux/module.h>
  2. #include <linux/slab.h>
  3. #include <mt-plat/aee.h>
  4. #include <linux/utsname.h>
  5. #include <linux/sched.h>
  6. #include <linux/list.h>
  7. #include <linux/init.h>
  8. #include <linux/smp.h>
  9. #ifdef CONFIG_MT_SCHED_MONITOR
  10. #include "mt_sched_mon.h"
  11. #endif
  12. #include <linux/io.h>
  13. #include <linux/delay.h>
  14. #include <linux/hardirq.h>
  15. #include <linux/mm.h>
  16. #include <linux/stacktrace.h>
  17. #include <asm/stacktrace.h>
  18. #include <asm/memory.h>
  19. #include <asm/traps.h>
  20. #include <asm/fiq_smp_call.h>
  21. #include <mach/wd_api.h>
  22. #ifndef __aarch64__
  23. #include <smp.h>
  24. #include <mach/irqs.h>
  25. #endif
  26. #include "aee-common.h"
  27. #undef WDT_DEBUG_VERBOSE
  28. /* #define WDT_DEBUG_VERBOSE */
  29. /* Some chip do not have irq dump, define a weak to avoid build error */
  30. __weak void mt_irq_dump(void)
  31. {
  32. }
  33. __weak void mt_dump_sched_traces(void)
  34. {
  35. }
  36. #define THREAD_INFO(sp) ((struct thread_info *) \
  37. ((unsigned long)(sp) & ~(THREAD_SIZE - 1)))
  38. #define WDT_PERCPU_LOG_SIZE 1024
  39. #define WDT_LOG_DEFAULT_SIZE 4096
  40. #define WDT_SAVE_STACK_SIZE 128
  41. #define MAX_EXCEPTION_FRAME 16
  42. /* NR_CPUS may not eaqual to real cpu numbers, alloc buffer at initialization */
  43. static char *wdt_percpu_log_buf[NR_CPUS];
  44. static int wdt_percpu_log_length[NR_CPUS];
  45. static char wdt_log_buf[WDT_LOG_DEFAULT_SIZE];
  46. static int wdt_percpu_preempt_cnt[NR_CPUS];
  47. static unsigned long wdt_percpu_stackframe[NR_CPUS][MAX_EXCEPTION_FRAME];
  48. static int wdt_log_length;
  49. static atomic_t wdt_enter_fiq;
  50. struct stacks_buffer {
  51. char bin_buf[WDT_SAVE_STACK_SIZE];
  52. int real_len;
  53. unsigned long top;
  54. unsigned long bottom;
  55. };
  56. static struct stacks_buffer stacks_buffer_bin[NR_CPUS];
  57. struct regs_buffer {
  58. struct pt_regs regs;
  59. int real_len;
  60. };
  61. static struct regs_buffer regs_buffer_bin[NR_CPUS];
  62. int in_fiq_handler(void)
  63. {
  64. return atomic_read(&wdt_enter_fiq);
  65. }
  66. void aee_wdt_dump_info(void)
  67. {
  68. char *printk_buf = wdt_log_buf;
  69. struct task_struct *task;
  70. int cpu, i;
  71. task = &init_task;
  72. aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_WDT_INFO);
  73. if (wdt_log_length == 0) {
  74. LOGE("\n No log for WDT\n");
  75. mt_dump_sched_traces();
  76. #ifdef CONFIG_SCHED_DEBUG
  77. /* sysrq_sched_debug_show_at_KE(); */
  78. #endif
  79. return;
  80. }
  81. aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_WDT_PERCPU);
  82. LOGE("==========================================");
  83. for (cpu = 0; cpu < NR_CPUS; cpu++) {
  84. if ((wdt_percpu_log_buf[cpu]) && (wdt_percpu_log_length[cpu])) {
  85. /* LOGE( "=====> wdt_percpu_log_buf[%d], length=%d ", cpu, wdt_percpu_log_length[cpu]); */
  86. LOGE("%s", wdt_percpu_log_buf[cpu]);
  87. LOGE("Backtrace : ");
  88. for (i = 0; i < MAX_EXCEPTION_FRAME; i++) {
  89. if (wdt_percpu_stackframe[cpu][i] == 0)
  90. break;
  91. LOGE("%08lx, ", wdt_percpu_stackframe[cpu][i]);
  92. }
  93. LOGE("==========================================");
  94. }
  95. }
  96. aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_WDT_LOG);
  97. /* printk temporary buffer printk_buf[1024]. To avoid char loss, add 4 bytes here */
  98. while (wdt_log_length > 0) {
  99. LOGE("%s", printk_buf);
  100. printk_buf += 1020;
  101. wdt_log_length -= 1020;
  102. }
  103. #ifdef CONFIG_SCHED_DEBUG
  104. aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_SCHED_DEBUG);
  105. /*sysrq_sched_debug_show_at_KE();*/
  106. #endif
  107. for_each_process(task) {
  108. if (task->state == 0) {
  109. LOGE("PID: %d, name: %s\n", task->pid, task->comm);
  110. show_stack(task, NULL);
  111. LOGE("\n");
  112. }
  113. }
  114. aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_WDT_DONE);
  115. }
  116. void aee_wdt_percpu_printf(int cpu, const char *fmt, ...)
  117. {
  118. va_list args;
  119. if (wdt_percpu_log_buf[cpu] == NULL)
  120. return;
  121. va_start(args, fmt);
  122. wdt_percpu_log_length[cpu] +=
  123. vsnprintf((wdt_percpu_log_buf[cpu] + wdt_percpu_log_length[cpu]),
  124. (WDT_PERCPU_LOG_SIZE - wdt_percpu_log_length[cpu]), fmt, args);
  125. va_end(args);
  126. }
  127. void aee_wdt_printf(const char *fmt, ...)
  128. {
  129. va_list args;
  130. va_start(args, fmt);
  131. wdt_log_length += vsnprintf((wdt_log_buf + wdt_log_length),
  132. (sizeof(wdt_log_buf) - wdt_log_length), fmt, args);
  133. va_end(args);
  134. }
  135. #if defined(CONFIG_FIQ_GLUE)
  136. /* save registers in bin buffer, may comes from various cpu */
  137. static void aee_dump_cpu_reg_bin(int cpu, void *regs_ptr)
  138. {
  139. memcpy(&(regs_buffer_bin[cpu].regs), regs_ptr, sizeof(struct pt_regs));
  140. regs_buffer_bin[cpu].real_len = sizeof(struct pt_regs);
  141. aee_wdt_percpu_printf(cpu, "pc : %08lx, lr : %08lx, cpsr : %08lx\n",
  142. ((struct pt_regs *)regs_ptr)->ARM_pc,
  143. ((struct pt_regs *)regs_ptr)->ARM_lr,
  144. ((struct pt_regs *)regs_ptr)->ARM_cpsr);
  145. aee_wdt_percpu_printf(cpu, "sp : %08lx, ip : %08lx, fp : %08lx\n",
  146. ((struct pt_regs *)regs_ptr)->ARM_sp,
  147. ((struct pt_regs *)regs_ptr)->ARM_ip,
  148. ((struct pt_regs *)regs_ptr)->ARM_fp);
  149. aee_wdt_percpu_printf(cpu, "r10 : %08lx, r9 : %08lx, r8 : %08lx\n",
  150. ((struct pt_regs *)regs_ptr)->ARM_r10,
  151. ((struct pt_regs *)regs_ptr)->ARM_r9,
  152. ((struct pt_regs *)regs_ptr)->ARM_r8);
  153. aee_wdt_percpu_printf(cpu, "r7 : %08lx, r6 : %08lx, r5 : %08lx\n",
  154. ((struct pt_regs *)regs_ptr)->ARM_r7,
  155. ((struct pt_regs *)regs_ptr)->ARM_r6,
  156. ((struct pt_regs *)regs_ptr)->ARM_r5);
  157. aee_wdt_percpu_printf(cpu, "r4 : %08lx, r3 : %08lx, r2 : %08lx\n",
  158. ((struct pt_regs *)regs_ptr)->ARM_r4,
  159. ((struct pt_regs *)regs_ptr)->ARM_r3,
  160. ((struct pt_regs *)regs_ptr)->ARM_r2);
  161. aee_wdt_percpu_printf(cpu, "r1 : %08lx, r0 : %08lx\n",
  162. ((struct pt_regs *)regs_ptr)->ARM_r1,
  163. ((struct pt_regs *)regs_ptr)->ARM_r0);
  164. }
  165. /* dump the stack into buffer */
  166. static void aee_wdt_dump_stack_bin(unsigned int cpu, unsigned long bottom, unsigned long top)
  167. {
  168. int i, count = 0;
  169. unsigned long p, fp;
  170. unsigned long high;
  171. struct stackframe cur_frame;
  172. struct pt_regs *exp_regs;
  173. stacks_buffer_bin[cpu].real_len =
  174. aee_dump_stack_top_binary(stacks_buffer_bin[cpu].bin_buf,
  175. sizeof(stacks_buffer_bin[cpu].bin_buf), bottom, top);
  176. stacks_buffer_bin[cpu].top = top;
  177. stacks_buffer_bin[cpu].bottom = bottom;
  178. /* should check stack address in kernel range */
  179. if (bottom & 3) {
  180. aee_wdt_percpu_printf(cpu, "%s bottom unaligned %08lx\n", __func__, bottom);
  181. return;
  182. }
  183. if (!((bottom >= (PAGE_OFFSET + THREAD_SIZE)) && virt_addr_valid(bottom))) {
  184. aee_wdt_percpu_printf(cpu, "%s bottom out of kernel addr space %08lx\n", __func__,
  185. bottom);
  186. return;
  187. }
  188. if (!((top >= (PAGE_OFFSET + THREAD_SIZE)) && virt_addr_valid(bottom))) {
  189. aee_wdt_percpu_printf(cpu, "%s top out of kernel addr space %08lx\n", __func__,
  190. top);
  191. return;
  192. }
  193. aee_wdt_percpu_printf(cpu, "stack (0x%08lx to 0x%08lx)\n", bottom, top);
  194. for (p = bottom; p < top; p += 4) {
  195. unsigned long val;
  196. if (count == 0)
  197. aee_wdt_percpu_printf(cpu, "%04lx: ", p & 0xffff);
  198. val = *((unsigned long *)(p));
  199. aee_wdt_percpu_printf(cpu, "%08lx ", val);
  200. count++;
  201. if (count == 8) {
  202. aee_wdt_percpu_printf(cpu, "\n");
  203. count = 0;
  204. }
  205. }
  206. /* save backtrace addr */
  207. high = ALIGN(bottom, THREAD_SIZE);
  208. /* cur_frame.pc = regs_buffer_bin[cpu].regs.ARM_pc; */
  209. cur_frame.lr = regs_buffer_bin[cpu].regs.ARM_lr;
  210. cur_frame.fp = regs_buffer_bin[cpu].regs.ARM_fp;
  211. /* cur_frame.sp = regs_buffer_bin[cpu].regs.ARM_sp; */
  212. for (i = 0; i < MAX_EXCEPTION_FRAME; i++) {
  213. wdt_percpu_stackframe[cpu][i] = cur_frame.lr;
  214. fp = cur_frame.fp;
  215. if ((fp < (bottom + 12)) || ((fp + 4) >= high)) {
  216. /* aee_wdt_percpu_printf(cpu, "\n fp %08lx invalid\n", fp); */
  217. return;
  218. }
  219. cur_frame.fp = *(unsigned long *)(fp - 12);
  220. /* cur_frame.sp = *(unsigned long *)(fp - 8); */
  221. cur_frame.lr = *(unsigned long *)(fp - 4);
  222. cur_frame.pc = *(unsigned long *)(fp);
  223. if (!((cur_frame.lr >= (PAGE_OFFSET + THREAD_SIZE)) &&
  224. virt_addr_valid(cur_frame.lr))) {
  225. /* aee_wdt_percpu_printf(cpu, "\n lr %08lx invalid\n", cur_frame.lr); */
  226. return;
  227. }
  228. if (in_exception_text(cur_frame.pc)) {
  229. exp_regs = (struct pt_regs *)(fp + 4);
  230. cur_frame.lr = exp_regs->ARM_pc;
  231. }
  232. }
  233. }
  234. #endif /* #ifdef CONFIG_FIQ_GLUE */
  235. /* save binary register and stack value into ram console */
  236. static void aee_save_reg_stack_sram(int cpu)
  237. {
  238. int i;
  239. char str_buf[256];
  240. int len = 0;
  241. if (regs_buffer_bin[cpu].real_len != 0) {
  242. snprintf(str_buf, sizeof(str_buf),
  243. "\n\ncpu %d preempt=%lx, softirq=%lx, hardirq=%lx ", cpu,
  244. ((wdt_percpu_preempt_cnt[cpu] & PREEMPT_MASK) >> PREEMPT_SHIFT),
  245. ((wdt_percpu_preempt_cnt[cpu] & SOFTIRQ_MASK) >> SOFTIRQ_SHIFT),
  246. ((wdt_percpu_preempt_cnt[cpu] & HARDIRQ_MASK) >> HARDIRQ_SHIFT));
  247. aee_sram_fiq_log(str_buf);
  248. memset_io(str_buf, 0, sizeof(str_buf));
  249. snprintf(str_buf, sizeof(str_buf),
  250. "\ncpu %d r0->r10 fp ip sp lr pc cpsr orig_r0\n", cpu);
  251. aee_sram_fiq_log(str_buf);
  252. aee_sram_fiq_save_bin((char *)&(regs_buffer_bin[cpu].regs),
  253. regs_buffer_bin[cpu].real_len);
  254. }
  255. if (stacks_buffer_bin[cpu].real_len > 0) {
  256. memset_io(str_buf, 0, sizeof(str_buf));
  257. snprintf(str_buf, sizeof(str_buf), "\ncpu %d stack [%08lx %08lx]\n",
  258. cpu, stacks_buffer_bin[cpu].bottom, stacks_buffer_bin[cpu].top);
  259. aee_sram_fiq_log(str_buf);
  260. aee_sram_fiq_save_bin(stacks_buffer_bin[cpu].bin_buf,
  261. stacks_buffer_bin[cpu].real_len);
  262. memset_io(str_buf, 0, sizeof(str_buf));
  263. len = snprintf(str_buf, sizeof(str_buf), "\ncpu %d backtrace : ", cpu);
  264. for (i = 0; i < MAX_EXCEPTION_FRAME; i++) {
  265. if (wdt_percpu_stackframe[cpu][i] == 0)
  266. break;
  267. len += snprintf((str_buf + len), (sizeof(str_buf) - len),
  268. "%08lx, ", wdt_percpu_stackframe[cpu][i]);
  269. }
  270. aee_sram_fiq_log(str_buf);
  271. }
  272. mrdump_mini_per_cpu_regs(cpu, &regs_buffer_bin[cpu].regs);
  273. }
  274. #ifdef CONFIG_SMP
  275. #ifdef CONFIG_FIQ_GLUE
  276. void aee_fiq_ipi_cpu_stop(void *arg, void *regs, void *svc_sp)
  277. {
  278. int cpu = 0;
  279. register int sp asm("sp");
  280. struct pt_regs *ptregs = (struct pt_regs *)regs;
  281. asm volatile ("mov %0, %1\n\t" "mov fp, %2\n\t":"=r" (sp) : "r"(svc_sp), "r"(ptregs->ARM_fp));
  282. cpu = get_HW_cpuid();
  283. if (!cpu_possible(cpu)) {
  284. aee_wdt_printf("aee_fiq_ipi_cpu_stop at incorrect CPU %d ?\n", cpu);
  285. local_fiq_disable();
  286. local_irq_disable();
  287. while (1)
  288. cpu_relax();
  289. }
  290. aee_wdt_percpu_printf(cpu, "CPU%u: stopping by FIQ\n", cpu);
  291. wdt_percpu_preempt_cnt[cpu] = preempt_count();
  292. aee_wdt_percpu_printf(cpu, "preempt=%lx, softirq=%lx, hardirq=%lx\n",
  293. ((wdt_percpu_preempt_cnt[cpu] & PREEMPT_MASK) >> PREEMPT_SHIFT),
  294. ((wdt_percpu_preempt_cnt[cpu] & SOFTIRQ_MASK) >> SOFTIRQ_SHIFT),
  295. ((wdt_percpu_preempt_cnt[cpu] & HARDIRQ_MASK) >> HARDIRQ_SHIFT));
  296. aee_dump_cpu_reg_bin(cpu, regs);
  297. aee_wdt_dump_stack_bin(cpu, ((struct pt_regs *)regs)->ARM_sp,
  298. ((struct pt_regs *)regs)->ARM_sp + WDT_SAVE_STACK_SIZE);
  299. set_cpu_online(cpu, false);
  300. local_fiq_disable();
  301. local_irq_disable();
  302. while (1)
  303. cpu_relax();
  304. }
  305. void aee_smp_send_stop(void)
  306. {
  307. unsigned long timeout;
  308. struct cpumask mask;
  309. int cpu = 0;
  310. cpumask_copy(&mask, cpu_online_mask);
  311. cpu = get_HW_cpuid();
  312. cpumask_clear_cpu(cpu, &mask);
  313. /* mt_fiq_printf("\n fiq_smp_call_function\n"); */
  314. fiq_smp_call_function(aee_fiq_ipi_cpu_stop, NULL, 0);
  315. /* Wait up to one second for other CPUs to stop */
  316. timeout = USEC_PER_SEC;
  317. while (num_online_cpus() > 1 && timeout--)
  318. udelay(1);
  319. if (num_online_cpus() > 1)
  320. aee_wdt_printf("WDT: failed to stop other CPUs in FIQ\n");
  321. }
  322. #else
  323. void aee_smp_send_stop(void)
  324. {
  325. unsigned long timeout;
  326. struct cpumask mask;
  327. int cpu = 0;
  328. cpumask_copy(&mask, cpu_online_mask);
  329. #ifdef __aarch64__
  330. smp_send_stop();
  331. #else
  332. cpu = get_HW_cpuid();
  333. cpumask_clear_cpu(cpu, &mask);
  334. /* irq_raise_softirq(&mask, IPI_CPU_STOP);//FIXME : temporarily disable for atf project bring up */
  335. #endif
  336. /* Wait up to one second for other CPUs to stop */
  337. timeout = USEC_PER_SEC;
  338. while (num_online_cpus() > 1 && timeout--)
  339. udelay(1);
  340. if (num_online_cpus() > 1)
  341. aee_wdt_printf("WDT: failed to stop other CPUs\n");
  342. }
  343. #endif /* #ifdef CONFIG_FIQ_GLUE */
  344. #endif /* #ifdef CONFIG_SMP */
  345. void aee_wdt_irq_info(void)
  346. {
  347. unsigned long long t;
  348. unsigned long nanosec_rem;
  349. int res = 0, cpu;
  350. struct wd_api *wd_api = NULL;
  351. res = get_wd_api(&wd_api);
  352. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_KICK);
  353. if (res)
  354. aee_wdt_printf("aee_wdt_irq_info, get wd api error\n");
  355. else
  356. wd_api->wd_restart(WD_TYPE_NOLOCK);
  357. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_SMP_STOP);
  358. #ifdef CONFIG_SMP
  359. aee_smp_send_stop();
  360. #endif
  361. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_STACK);
  362. for (cpu = 1; cpu < NR_CPUS; cpu++)
  363. aee_save_reg_stack_sram(cpu);
  364. aee_sram_fiq_log("\n\n");
  365. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_TIME);
  366. t = cpu_clock(smp_processor_id());
  367. nanosec_rem = do_div(t, 1000000000);
  368. aee_wdt_printf("\nQwdt at [%5lu.%06lu] ", (unsigned long)t, nanosec_rem / 1000);
  369. #ifdef WDT_DEBUG_VERBOSE
  370. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_GIC);
  371. mt_irq_dump();
  372. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_LOCALTIMER);
  373. wdt_log_length +=
  374. dump_localtimer_info((wdt_log_buf + wdt_log_length),
  375. (sizeof(wdt_log_buf) - wdt_log_length));
  376. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_IDLE);
  377. wdt_log_length +=
  378. dump_idle_info((wdt_log_buf + wdt_log_length), (sizeof(wdt_log_buf) - wdt_log_length));
  379. #endif
  380. #ifdef CONFIG_MT_SCHED_MONITOR
  381. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_SCHED);
  382. mt_aee_dump_sched_traces();
  383. #endif
  384. aee_sram_fiq_log(wdt_log_buf);
  385. /* avoid lock prove to dump_stack in __debug_locks_off() */
  386. xchg(&debug_locks, 0);
  387. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_IRQ_DONE);
  388. aee_rr_rec_exp_type(1);
  389. BUG();
  390. }
  391. #if defined(CONFIG_FIQ_GLUE)
  392. void aee_wdt_fiq_info(void *arg, void *regs, void *svc_sp)
  393. {
  394. register int sp asm("sp");
  395. struct pt_regs *ptregs = (struct pt_regs *)regs;
  396. int cpu = 0;
  397. asm volatile ("mov %0, %1\n\t" "mov fp, %2\n\t":"=r" (sp) : "r"(svc_sp), "r"(ptregs->ARM_fp));
  398. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_FIQ_INFO);
  399. /* mt_fiq_printf("\n Triggered :cpu-%d\n", cpu); */
  400. cpu = get_HW_cpuid();
  401. if (!cpu_possible(cpu)) {
  402. aee_wdt_printf("FIQ: Watchdog time out at incorrect CPU %d ?\n", cpu);
  403. cpu = 0;
  404. }
  405. aee_wdt_percpu_printf(cpu, "CPU %d FIQ: Watchdog time out\n", cpu);
  406. wdt_percpu_preempt_cnt[cpu] = preempt_count();
  407. aee_wdt_percpu_printf(cpu, "preempt=%lx, softirq=%lx, hardirq=%lx\n",
  408. ((wdt_percpu_preempt_cnt[cpu] & PREEMPT_MASK) >> PREEMPT_SHIFT),
  409. ((wdt_percpu_preempt_cnt[cpu] & SOFTIRQ_MASK) >> SOFTIRQ_SHIFT),
  410. ((wdt_percpu_preempt_cnt[cpu] & HARDIRQ_MASK) >> HARDIRQ_SHIFT));
  411. aee_dump_cpu_reg_bin(cpu, regs);
  412. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_FIQ_STACK);
  413. aee_wdt_dump_stack_bin(cpu, ((struct pt_regs *)regs)->ARM_sp,
  414. ((struct pt_regs *)regs)->ARM_sp + WDT_SAVE_STACK_SIZE);
  415. aee_save_reg_stack_sram(cpu);
  416. if (atomic_xchg(&wdt_enter_fiq, 1) != 0) {
  417. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_FIQ_LOOP);
  418. aee_wdt_percpu_printf(cpu, "Other CPU already enter WDT FIQ handler\n");
  419. /* loop forever here to avoid SMP deadlock risk during panic flow */
  420. while (1)
  421. ;
  422. }
  423. aee_rr_rec_fiq_step(AEE_FIQ_STEP_WDT_FIQ_DONE);
  424. aee_wdt_irq_info();
  425. }
  426. #endif /* CONFIG_FIQ_GLUE */
  427. static int __init aee_wdt_init(void)
  428. {
  429. int i;
  430. atomic_set(&wdt_enter_fiq, 0);
  431. for (i = 0; i < NR_CPUS; i++) {
  432. wdt_percpu_log_buf[i] = kzalloc(WDT_PERCPU_LOG_SIZE, GFP_KERNEL);
  433. if (wdt_percpu_log_buf[i] == NULL)
  434. LOGE("\n aee_common_init : kmalloc fail\n");
  435. wdt_percpu_log_length[i] = 0;
  436. wdt_percpu_preempt_cnt[i] = 0;
  437. }
  438. memset_io(wdt_log_buf, 0, sizeof(wdt_log_buf));
  439. memset_io(regs_buffer_bin, 0, sizeof(regs_buffer_bin));
  440. memset_io(stacks_buffer_bin, 0, sizeof(stacks_buffer_bin));
  441. memset_io(wdt_percpu_stackframe, 0, sizeof(wdt_percpu_stackframe));
  442. return 0;
  443. }
  444. module_init(aee_wdt_init);