debug_core.c 25 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. /*
  2. * Kernel Debug Core
  3. *
  4. * Maintainer: Jason Wessel <jason.wessel@windriver.com>
  5. *
  6. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  7. * Copyright (C) 2002-2004 Timesys Corporation
  8. * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
  9. * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
  10. * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
  11. * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
  12. * Copyright (C) 2005-2009 Wind River Systems, Inc.
  13. * Copyright (C) 2007 MontaVista Software, Inc.
  14. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  15. *
  16. * Contributors at various stages not listed above:
  17. * Jason Wessel ( jason.wessel@windriver.com )
  18. * George Anzinger <george@mvista.com>
  19. * Anurekh Saxena (anurekh.saxena@timesys.com)
  20. * Lake Stevens Instrument Division (Glenn Engel)
  21. * Jim Kingdon, Cygnus Support.
  22. *
  23. * Original KGDB stub: David Grothe <dave@gcom.com>,
  24. * Tigran Aivazian <tigran@sco.com>
  25. *
  26. * This file is licensed under the terms of the GNU General Public License
  27. * version 2. This program is licensed "as is" without any warranty of any
  28. * kind, whether express or implied.
  29. */
  30. #include <linux/pid_namespace.h>
  31. #include <linux/clocksource.h>
  32. #include <linux/serial_core.h>
  33. #include <linux/interrupt.h>
  34. #include <linux/spinlock.h>
  35. #include <linux/console.h>
  36. #include <linux/threads.h>
  37. #include <linux/uaccess.h>
  38. #include <linux/kernel.h>
  39. #include <linux/module.h>
  40. #include <linux/ptrace.h>
  41. #include <linux/string.h>
  42. #include <linux/delay.h>
  43. #include <linux/sched.h>
  44. #include <linux/sysrq.h>
  45. #include <linux/reboot.h>
  46. #include <linux/init.h>
  47. #include <linux/kgdb.h>
  48. #include <linux/kdb.h>
  49. #include <linux/pid.h>
  50. #include <linux/smp.h>
  51. #include <linux/mm.h>
  52. #include <linux/vmacache.h>
  53. #include <linux/rcupdate.h>
  54. #include <asm/cacheflush.h>
  55. #include <asm/byteorder.h>
  56. #include <linux/atomic.h>
  57. #include "debug_core.h"
  58. static int kgdb_break_asap;
  59. struct debuggerinfo_struct kgdb_info[NR_CPUS];
  60. /**
  61. * kgdb_connected - Is a host GDB connected to us?
  62. */
  63. int kgdb_connected;
  64. EXPORT_SYMBOL_GPL(kgdb_connected);
  65. /* All the KGDB handlers are installed */
  66. int kgdb_io_module_registered;
  67. /* Guard for recursive entry */
  68. static int exception_level;
  69. struct kgdb_io *dbg_io_ops;
  70. static DEFINE_SPINLOCK(kgdb_registration_lock);
  71. /* Action for the reboot notifiter, a global allow kdb to change it */
  72. static int kgdbreboot;
  73. /* kgdb console driver is loaded */
  74. static int kgdb_con_registered;
  75. /* determine if kgdb console output should be used */
  76. static int kgdb_use_con;
  77. /* Flag for alternate operations for early debugging */
  78. bool dbg_is_early = true;
  79. /* Next cpu to become the master debug core */
  80. int dbg_switch_cpu;
  81. /* Flag for entering kdb when a panic occurs */
  82. static bool break_on_panic = true;
  83. /* Flag for entering kdb when an exception occurs */
  84. static bool break_on_exception = true;
  85. /* Use kdb or gdbserver mode */
  86. int dbg_kdb_mode = 1;
  87. static int __init opt_kgdb_con(char *str)
  88. {
  89. kgdb_use_con = 1;
  90. return 0;
  91. }
  92. early_param("kgdbcon", opt_kgdb_con);
  93. module_param(kgdb_use_con, int, 0644);
  94. module_param(kgdbreboot, int, 0644);
  95. module_param(break_on_panic, bool, 0644);
  96. module_param(break_on_exception, bool, 0644);
  97. /*
  98. * Holds information about breakpoints in a kernel. These breakpoints are
  99. * added and removed by gdb.
  100. */
  101. static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
  102. [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
  103. };
  104. /*
  105. * The CPU# of the active CPU, or -1 if none:
  106. */
  107. atomic_t kgdb_active = ATOMIC_INIT(-1);
  108. EXPORT_SYMBOL_GPL(kgdb_active);
  109. static DEFINE_RAW_SPINLOCK(dbg_master_lock);
  110. static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
  111. /*
  112. * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
  113. * bootup code (which might not have percpu set up yet):
  114. */
  115. static atomic_t masters_in_kgdb;
  116. static atomic_t slaves_in_kgdb;
  117. static atomic_t kgdb_break_tasklet_var;
  118. atomic_t kgdb_setting_breakpoint;
  119. struct task_struct *kgdb_usethread;
  120. struct task_struct *kgdb_contthread;
  121. int kgdb_single_step;
  122. static pid_t kgdb_sstep_pid;
  123. /* to keep track of the CPU which is doing the single stepping*/
  124. atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
  125. /*
  126. * If you are debugging a problem where roundup (the collection of
  127. * all other CPUs) is a problem [this should be extremely rare],
  128. * then use the nokgdbroundup option to avoid roundup. In that case
  129. * the other CPUs might interfere with your debugging context, so
  130. * use this with care:
  131. */
  132. static int kgdb_do_roundup = 1;
  133. static int __init opt_nokgdbroundup(char *str)
  134. {
  135. kgdb_do_roundup = 0;
  136. return 0;
  137. }
  138. early_param("nokgdbroundup", opt_nokgdbroundup);
  139. /*
  140. * Finally, some KGDB code :-)
  141. */
  142. /*
  143. * Weak aliases for breakpoint management,
  144. * can be overriden by architectures when needed:
  145. */
  146. int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  147. {
  148. int err;
  149. err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
  150. BREAK_INSTR_SIZE);
  151. if (err)
  152. return err;
  153. err = probe_kernel_write((char *)bpt->bpt_addr,
  154. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  155. return err;
  156. }
  157. int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  158. {
  159. return probe_kernel_write((char *)bpt->bpt_addr,
  160. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  161. }
  162. int __weak kgdb_validate_break_address(unsigned long addr)
  163. {
  164. struct kgdb_bkpt tmp;
  165. int err;
  166. /* Validate setting the breakpoint and then removing it. If the
  167. * remove fails, the kernel needs to emit a bad message because we
  168. * are deep trouble not being able to put things back the way we
  169. * found them.
  170. */
  171. tmp.bpt_addr = addr;
  172. err = kgdb_arch_set_breakpoint(&tmp);
  173. if (err)
  174. return err;
  175. err = kgdb_arch_remove_breakpoint(&tmp);
  176. if (err)
  177. printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
  178. "memory destroyed at: %lx", addr);
  179. return err;
  180. }
  181. unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
  182. {
  183. return instruction_pointer(regs);
  184. }
  185. int __weak kgdb_arch_init(void)
  186. {
  187. return 0;
  188. }
  189. int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
  190. {
  191. return 0;
  192. }
  193. /*
  194. * Some architectures need cache flushes when we set/clear a
  195. * breakpoint:
  196. */
  197. static void kgdb_flush_swbreak_addr(unsigned long addr)
  198. {
  199. if (!CACHE_FLUSH_IS_SAFE)
  200. return;
  201. if (current->mm) {
  202. int i;
  203. for (i = 0; i < VMACACHE_SIZE; i++) {
  204. if (!current->vmacache[i])
  205. continue;
  206. flush_cache_range(current->vmacache[i],
  207. addr, addr + BREAK_INSTR_SIZE);
  208. }
  209. }
  210. /* Force flush instruction cache if it was outside the mm */
  211. flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
  212. }
  213. /*
  214. * SW breakpoint management:
  215. */
  216. int dbg_activate_sw_breakpoints(void)
  217. {
  218. int error;
  219. int ret = 0;
  220. int i;
  221. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  222. if (kgdb_break[i].state != BP_SET)
  223. continue;
  224. error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
  225. if (error) {
  226. ret = error;
  227. printk(KERN_INFO "KGDB: BP install failed: %lx",
  228. kgdb_break[i].bpt_addr);
  229. continue;
  230. }
  231. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  232. kgdb_break[i].state = BP_ACTIVE;
  233. }
  234. return ret;
  235. }
  236. int dbg_set_sw_break(unsigned long addr)
  237. {
  238. int err = kgdb_validate_break_address(addr);
  239. int breakno = -1;
  240. int i;
  241. if (err)
  242. return err;
  243. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  244. if ((kgdb_break[i].state == BP_SET) &&
  245. (kgdb_break[i].bpt_addr == addr))
  246. return -EEXIST;
  247. }
  248. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  249. if (kgdb_break[i].state == BP_REMOVED &&
  250. kgdb_break[i].bpt_addr == addr) {
  251. breakno = i;
  252. break;
  253. }
  254. }
  255. if (breakno == -1) {
  256. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  257. if (kgdb_break[i].state == BP_UNDEFINED) {
  258. breakno = i;
  259. break;
  260. }
  261. }
  262. }
  263. if (breakno == -1)
  264. return -E2BIG;
  265. kgdb_break[breakno].state = BP_SET;
  266. kgdb_break[breakno].type = BP_BREAKPOINT;
  267. kgdb_break[breakno].bpt_addr = addr;
  268. return 0;
  269. }
  270. int dbg_deactivate_sw_breakpoints(void)
  271. {
  272. int error;
  273. int ret = 0;
  274. int i;
  275. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  276. if (kgdb_break[i].state != BP_ACTIVE)
  277. continue;
  278. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  279. if (error) {
  280. printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
  281. kgdb_break[i].bpt_addr);
  282. ret = error;
  283. }
  284. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  285. kgdb_break[i].state = BP_SET;
  286. }
  287. return ret;
  288. }
  289. int dbg_remove_sw_break(unsigned long addr)
  290. {
  291. int i;
  292. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  293. if ((kgdb_break[i].state == BP_SET) &&
  294. (kgdb_break[i].bpt_addr == addr)) {
  295. kgdb_break[i].state = BP_REMOVED;
  296. return 0;
  297. }
  298. }
  299. return -ENOENT;
  300. }
  301. int kgdb_isremovedbreak(unsigned long addr)
  302. {
  303. int i;
  304. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  305. if ((kgdb_break[i].state == BP_REMOVED) &&
  306. (kgdb_break[i].bpt_addr == addr))
  307. return 1;
  308. }
  309. return 0;
  310. }
  311. int dbg_remove_all_break(void)
  312. {
  313. int error;
  314. int i;
  315. /* Clear memory breakpoints. */
  316. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  317. if (kgdb_break[i].state != BP_ACTIVE)
  318. goto setundefined;
  319. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  320. if (error)
  321. printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
  322. kgdb_break[i].bpt_addr);
  323. setundefined:
  324. kgdb_break[i].state = BP_UNDEFINED;
  325. }
  326. /* Clear hardware breakpoints. */
  327. if (arch_kgdb_ops.remove_all_hw_break)
  328. arch_kgdb_ops.remove_all_hw_break();
  329. return 0;
  330. }
  331. /*
  332. * Return true if there is a valid kgdb I/O module. Also if no
  333. * debugger is attached a message can be printed to the console about
  334. * waiting for the debugger to attach.
  335. *
  336. * The print_wait argument is only to be true when called from inside
  337. * the core kgdb_handle_exception, because it will wait for the
  338. * debugger to attach.
  339. */
  340. static int kgdb_io_ready(int print_wait)
  341. {
  342. if (!dbg_io_ops)
  343. return 0;
  344. if (kgdb_connected)
  345. return 1;
  346. if (atomic_read(&kgdb_setting_breakpoint))
  347. return 1;
  348. if (print_wait) {
  349. #ifdef CONFIG_KGDB_KDB
  350. if (!dbg_kdb_mode)
  351. printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
  352. #else
  353. printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
  354. #endif
  355. }
  356. return 1;
  357. }
  358. static int kgdb_reenter_check(struct kgdb_state *ks)
  359. {
  360. unsigned long addr;
  361. if (atomic_read(&kgdb_active) != raw_smp_processor_id())
  362. return 0;
  363. /* Panic on recursive debugger calls: */
  364. exception_level++;
  365. addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
  366. dbg_deactivate_sw_breakpoints();
  367. /*
  368. * If the break point removed ok at the place exception
  369. * occurred, try to recover and print a warning to the end
  370. * user because the user planted a breakpoint in a place that
  371. * KGDB needs in order to function.
  372. */
  373. if (dbg_remove_sw_break(addr) == 0) {
  374. exception_level = 0;
  375. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  376. dbg_activate_sw_breakpoints();
  377. printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
  378. addr);
  379. WARN_ON_ONCE(1);
  380. return 1;
  381. }
  382. dbg_remove_all_break();
  383. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  384. if (exception_level > 1) {
  385. dump_stack();
  386. panic("Recursive entry to debugger");
  387. }
  388. printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
  389. #ifdef CONFIG_KGDB_KDB
  390. /* Allow kdb to debug itself one level */
  391. return 0;
  392. #endif
  393. dump_stack();
  394. panic("Recursive entry to debugger");
  395. return 1;
  396. }
  397. static void dbg_touch_watchdogs(void)
  398. {
  399. touch_softlockup_watchdog_sync();
  400. clocksource_touch_watchdog();
  401. rcu_cpu_stall_reset();
  402. }
  403. static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
  404. int exception_state)
  405. {
  406. unsigned long flags;
  407. int sstep_tries = 100;
  408. int error;
  409. int cpu;
  410. int trace_on = 0;
  411. int online_cpus = num_online_cpus();
  412. kgdb_info[ks->cpu].enter_kgdb++;
  413. kgdb_info[ks->cpu].exception_state |= exception_state;
  414. if (exception_state == DCPU_WANT_MASTER)
  415. atomic_inc(&masters_in_kgdb);
  416. else
  417. atomic_inc(&slaves_in_kgdb);
  418. if (arch_kgdb_ops.disable_hw_break)
  419. arch_kgdb_ops.disable_hw_break(regs);
  420. acquirelock:
  421. /*
  422. * Interrupts will be restored by the 'trap return' code, except when
  423. * single stepping.
  424. */
  425. local_irq_save(flags);
  426. cpu = ks->cpu;
  427. kgdb_info[cpu].debuggerinfo = regs;
  428. kgdb_info[cpu].task = current;
  429. kgdb_info[cpu].ret_state = 0;
  430. kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
  431. /* Make sure the above info reaches the primary CPU */
  432. smp_mb();
  433. if (exception_level == 1) {
  434. if (raw_spin_trylock(&dbg_master_lock))
  435. atomic_xchg(&kgdb_active, cpu);
  436. goto cpu_master_loop;
  437. }
  438. /*
  439. * CPU will loop if it is a slave or request to become a kgdb
  440. * master cpu and acquire the kgdb_active lock:
  441. */
  442. while (1) {
  443. cpu_loop:
  444. if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
  445. kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
  446. goto cpu_master_loop;
  447. } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
  448. if (raw_spin_trylock(&dbg_master_lock)) {
  449. atomic_xchg(&kgdb_active, cpu);
  450. break;
  451. }
  452. } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
  453. if (!raw_spin_is_locked(&dbg_slave_lock))
  454. goto return_normal;
  455. } else {
  456. return_normal:
  457. /* Return to normal operation by executing any
  458. * hw breakpoint fixup.
  459. */
  460. if (arch_kgdb_ops.correct_hw_break)
  461. arch_kgdb_ops.correct_hw_break();
  462. if (trace_on)
  463. tracing_on();
  464. kgdb_info[cpu].exception_state &=
  465. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  466. kgdb_info[cpu].enter_kgdb--;
  467. smp_mb__before_atomic();
  468. atomic_dec(&slaves_in_kgdb);
  469. dbg_touch_watchdogs();
  470. local_irq_restore(flags);
  471. return 0;
  472. }
  473. cpu_relax();
  474. }
  475. /*
  476. * For single stepping, try to only enter on the processor
  477. * that was single stepping. To guard against a deadlock, the
  478. * kernel will only try for the value of sstep_tries before
  479. * giving up and continuing on.
  480. */
  481. if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
  482. (kgdb_info[cpu].task &&
  483. kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
  484. atomic_set(&kgdb_active, -1);
  485. raw_spin_unlock(&dbg_master_lock);
  486. dbg_touch_watchdogs();
  487. local_irq_restore(flags);
  488. goto acquirelock;
  489. }
  490. if (!kgdb_io_ready(1)) {
  491. kgdb_info[cpu].ret_state = 1;
  492. goto kgdb_restore; /* No I/O connection, resume the system */
  493. }
  494. /*
  495. * Don't enter if we have hit a removed breakpoint.
  496. */
  497. if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
  498. goto kgdb_restore;
  499. /* Call the I/O driver's pre_exception routine */
  500. if (dbg_io_ops->pre_exception)
  501. dbg_io_ops->pre_exception();
  502. /*
  503. * Get the passive CPU lock which will hold all the non-primary
  504. * CPU in a spin state while the debugger is active
  505. */
  506. if (!kgdb_single_step)
  507. raw_spin_lock(&dbg_slave_lock);
  508. #ifdef CONFIG_SMP
  509. /* If send_ready set, slaves are already waiting */
  510. if (ks->send_ready)
  511. atomic_set(ks->send_ready, 1);
  512. /* Signal the other CPUs to enter kgdb_wait() */
  513. else if ((!kgdb_single_step) && kgdb_do_roundup)
  514. kgdb_roundup_cpus(flags);
  515. #endif
  516. /*
  517. * Wait for the other CPUs to be notified and be waiting for us:
  518. */
  519. while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
  520. atomic_read(&slaves_in_kgdb)) != online_cpus)
  521. cpu_relax();
  522. /*
  523. * At this point the primary processor is completely
  524. * in the debugger and all secondary CPUs are quiescent
  525. */
  526. dbg_deactivate_sw_breakpoints();
  527. kgdb_single_step = 0;
  528. kgdb_contthread = current;
  529. exception_level = 0;
  530. trace_on = tracing_is_on();
  531. if (trace_on)
  532. tracing_off();
  533. while (1) {
  534. cpu_master_loop:
  535. if (dbg_kdb_mode) {
  536. kgdb_connected = 1;
  537. error = kdb_stub(ks);
  538. if (error == -1)
  539. continue;
  540. kgdb_connected = 0;
  541. } else {
  542. error = gdb_serial_stub(ks);
  543. }
  544. if (error == DBG_PASS_EVENT) {
  545. dbg_kdb_mode = !dbg_kdb_mode;
  546. } else if (error == DBG_SWITCH_CPU_EVENT) {
  547. kgdb_info[dbg_switch_cpu].exception_state |=
  548. DCPU_NEXT_MASTER;
  549. goto cpu_loop;
  550. } else {
  551. kgdb_info[cpu].ret_state = error;
  552. break;
  553. }
  554. }
  555. /* Call the I/O driver's post_exception routine */
  556. if (dbg_io_ops->post_exception)
  557. dbg_io_ops->post_exception();
  558. if (!kgdb_single_step) {
  559. raw_spin_unlock(&dbg_slave_lock);
  560. /* Wait till all the CPUs have quit from the debugger. */
  561. while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
  562. cpu_relax();
  563. }
  564. kgdb_restore:
  565. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  566. int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
  567. if (kgdb_info[sstep_cpu].task)
  568. kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
  569. else
  570. kgdb_sstep_pid = 0;
  571. }
  572. if (arch_kgdb_ops.correct_hw_break)
  573. arch_kgdb_ops.correct_hw_break();
  574. if (trace_on)
  575. tracing_on();
  576. kgdb_info[cpu].exception_state &=
  577. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  578. kgdb_info[cpu].enter_kgdb--;
  579. smp_mb__before_atomic();
  580. atomic_dec(&masters_in_kgdb);
  581. /* Free kgdb_active */
  582. atomic_set(&kgdb_active, -1);
  583. raw_spin_unlock(&dbg_master_lock);
  584. dbg_touch_watchdogs();
  585. local_irq_restore(flags);
  586. return kgdb_info[cpu].ret_state;
  587. }
  588. /*
  589. * kgdb_handle_exception() - main entry point from a kernel exception
  590. *
  591. * Locking hierarchy:
  592. * interface locks, if any (begin_session)
  593. * kgdb lock (kgdb_active)
  594. */
  595. int
  596. kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
  597. {
  598. struct kgdb_state kgdb_var;
  599. struct kgdb_state *ks = &kgdb_var;
  600. int ret = 0;
  601. if (arch_kgdb_ops.enable_nmi)
  602. arch_kgdb_ops.enable_nmi(0);
  603. if (unlikely(signo != SIGTRAP && !break_on_exception))
  604. return 1;
  605. memset(ks, 0, sizeof(struct kgdb_state));
  606. ks->cpu = raw_smp_processor_id();
  607. ks->ex_vector = evector;
  608. ks->signo = signo;
  609. ks->err_code = ecode;
  610. ks->linux_regs = regs;
  611. if (kgdb_reenter_check(ks))
  612. goto out; /* Ouch, double exception ! */
  613. if (kgdb_info[ks->cpu].enter_kgdb != 0)
  614. goto out;
  615. ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  616. out:
  617. if (arch_kgdb_ops.enable_nmi)
  618. arch_kgdb_ops.enable_nmi(1);
  619. return ret;
  620. }
  621. /*
  622. * GDB places a breakpoint at this function to know dynamically
  623. * loaded objects. It's not defined static so that only one instance with this
  624. * name exists in the kernel.
  625. */
  626. static int module_event(struct notifier_block *self, unsigned long val,
  627. void *data)
  628. {
  629. return 0;
  630. }
  631. static struct notifier_block dbg_module_load_nb = {
  632. .notifier_call = module_event,
  633. };
  634. int kgdb_nmicallback(int cpu, void *regs)
  635. {
  636. #ifdef CONFIG_SMP
  637. struct kgdb_state kgdb_var;
  638. struct kgdb_state *ks = &kgdb_var;
  639. memset(ks, 0, sizeof(struct kgdb_state));
  640. ks->cpu = cpu;
  641. ks->linux_regs = regs;
  642. if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
  643. raw_spin_is_locked(&dbg_master_lock)) {
  644. kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
  645. return 0;
  646. }
  647. #endif
  648. return 1;
  649. }
  650. int kgdb_nmicallin(int cpu, int trapnr, void *regs, int err_code,
  651. atomic_t *send_ready)
  652. {
  653. #ifdef CONFIG_SMP
  654. if (!kgdb_io_ready(0) || !send_ready)
  655. return 1;
  656. if (kgdb_info[cpu].enter_kgdb == 0) {
  657. struct kgdb_state kgdb_var;
  658. struct kgdb_state *ks = &kgdb_var;
  659. memset(ks, 0, sizeof(struct kgdb_state));
  660. ks->cpu = cpu;
  661. ks->ex_vector = trapnr;
  662. ks->signo = SIGTRAP;
  663. ks->err_code = err_code;
  664. ks->linux_regs = regs;
  665. ks->send_ready = send_ready;
  666. kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  667. return 0;
  668. }
  669. #endif
  670. return 1;
  671. }
  672. static void kgdb_console_write(struct console *co, const char *s,
  673. unsigned count)
  674. {
  675. unsigned long flags;
  676. /* If we're debugging, or KGDB has not connected, don't try
  677. * and print. */
  678. if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
  679. return;
  680. local_irq_save(flags);
  681. gdbstub_msg_write(s, count);
  682. local_irq_restore(flags);
  683. }
  684. static struct console kgdbcons = {
  685. .name = "kgdb",
  686. .write = kgdb_console_write,
  687. .flags = CON_PRINTBUFFER | CON_ENABLED,
  688. .index = -1,
  689. };
  690. #ifdef CONFIG_MAGIC_SYSRQ
  691. static void sysrq_handle_dbg(int key)
  692. {
  693. if (!dbg_io_ops) {
  694. printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
  695. return;
  696. }
  697. if (!kgdb_connected) {
  698. #ifdef CONFIG_KGDB_KDB
  699. if (!dbg_kdb_mode)
  700. printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
  701. #else
  702. printk(KERN_CRIT "Entering KGDB\n");
  703. #endif
  704. }
  705. kgdb_breakpoint();
  706. }
  707. static struct sysrq_key_op sysrq_dbg_op = {
  708. .handler = sysrq_handle_dbg,
  709. .help_msg = "debug(g)",
  710. .action_msg = "DEBUG",
  711. };
  712. #endif
  713. static int kgdb_panic_event(struct notifier_block *self,
  714. unsigned long val,
  715. void *data)
  716. {
  717. if (!break_on_panic)
  718. return NOTIFY_DONE;
  719. if (dbg_kdb_mode)
  720. kdb_printf("PANIC: %s\n", (char *)data);
  721. kgdb_breakpoint();
  722. return NOTIFY_DONE;
  723. }
  724. static struct notifier_block kgdb_panic_event_nb = {
  725. .notifier_call = kgdb_panic_event,
  726. .priority = INT_MAX,
  727. };
  728. void __weak kgdb_arch_late(void)
  729. {
  730. }
  731. void __init dbg_late_init(void)
  732. {
  733. dbg_is_early = false;
  734. if (kgdb_io_module_registered)
  735. kgdb_arch_late();
  736. kdb_init(KDB_INIT_FULL);
  737. }
  738. static int
  739. dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
  740. {
  741. /*
  742. * Take the following action on reboot notify depending on value:
  743. * 1 == Enter debugger
  744. * 0 == [the default] detatch debug client
  745. * -1 == Do nothing... and use this until the board resets
  746. */
  747. switch (kgdbreboot) {
  748. case 1:
  749. kgdb_breakpoint();
  750. case -1:
  751. goto done;
  752. }
  753. if (!dbg_kdb_mode)
  754. gdbstub_exit(code);
  755. done:
  756. return NOTIFY_DONE;
  757. }
  758. static struct notifier_block dbg_reboot_notifier = {
  759. .notifier_call = dbg_notify_reboot,
  760. .next = NULL,
  761. .priority = INT_MAX,
  762. };
  763. static void kgdb_register_callbacks(void)
  764. {
  765. if (!kgdb_io_module_registered) {
  766. kgdb_io_module_registered = 1;
  767. kgdb_arch_init();
  768. if (!dbg_is_early)
  769. kgdb_arch_late();
  770. register_module_notifier(&dbg_module_load_nb);
  771. register_reboot_notifier(&dbg_reboot_notifier);
  772. atomic_notifier_chain_register(&panic_notifier_list,
  773. &kgdb_panic_event_nb);
  774. #ifdef CONFIG_MAGIC_SYSRQ
  775. register_sysrq_key('g', &sysrq_dbg_op);
  776. #endif
  777. if (kgdb_use_con && !kgdb_con_registered) {
  778. register_console(&kgdbcons);
  779. kgdb_con_registered = 1;
  780. }
  781. }
  782. }
  783. static void kgdb_unregister_callbacks(void)
  784. {
  785. /*
  786. * When this routine is called KGDB should unregister from the
  787. * panic handler and clean up, making sure it is not handling any
  788. * break exceptions at the time.
  789. */
  790. if (kgdb_io_module_registered) {
  791. kgdb_io_module_registered = 0;
  792. unregister_reboot_notifier(&dbg_reboot_notifier);
  793. unregister_module_notifier(&dbg_module_load_nb);
  794. atomic_notifier_chain_unregister(&panic_notifier_list,
  795. &kgdb_panic_event_nb);
  796. kgdb_arch_exit();
  797. #ifdef CONFIG_MAGIC_SYSRQ
  798. unregister_sysrq_key('g', &sysrq_dbg_op);
  799. #endif
  800. if (kgdb_con_registered) {
  801. unregister_console(&kgdbcons);
  802. kgdb_con_registered = 0;
  803. }
  804. }
  805. }
  806. /*
  807. * There are times a tasklet needs to be used vs a compiled in
  808. * break point so as to cause an exception outside a kgdb I/O module,
  809. * such as is the case with kgdboe, where calling a breakpoint in the
  810. * I/O driver itself would be fatal.
  811. */
  812. static void kgdb_tasklet_bpt(unsigned long ing)
  813. {
  814. kgdb_breakpoint();
  815. atomic_set(&kgdb_break_tasklet_var, 0);
  816. }
  817. static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
  818. void kgdb_schedule_breakpoint(void)
  819. {
  820. if (atomic_read(&kgdb_break_tasklet_var) ||
  821. atomic_read(&kgdb_active) != -1 ||
  822. atomic_read(&kgdb_setting_breakpoint))
  823. return;
  824. atomic_inc(&kgdb_break_tasklet_var);
  825. tasklet_schedule(&kgdb_tasklet_breakpoint);
  826. }
  827. EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
  828. static void kgdb_initial_breakpoint(void)
  829. {
  830. kgdb_break_asap = 0;
  831. printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
  832. kgdb_breakpoint();
  833. }
  834. /**
  835. * kgdb_register_io_module - register KGDB IO module
  836. * @new_dbg_io_ops: the io ops vector
  837. *
  838. * Register it with the KGDB core.
  839. */
  840. int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
  841. {
  842. int err;
  843. spin_lock(&kgdb_registration_lock);
  844. if (dbg_io_ops) {
  845. spin_unlock(&kgdb_registration_lock);
  846. printk(KERN_ERR "kgdb: Another I/O driver is already "
  847. "registered with KGDB.\n");
  848. return -EBUSY;
  849. }
  850. if (new_dbg_io_ops->init) {
  851. err = new_dbg_io_ops->init();
  852. if (err) {
  853. spin_unlock(&kgdb_registration_lock);
  854. return err;
  855. }
  856. }
  857. dbg_io_ops = new_dbg_io_ops;
  858. spin_unlock(&kgdb_registration_lock);
  859. printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
  860. new_dbg_io_ops->name);
  861. /* Arm KGDB now. */
  862. kgdb_register_callbacks();
  863. if (kgdb_break_asap)
  864. kgdb_initial_breakpoint();
  865. return 0;
  866. }
  867. EXPORT_SYMBOL_GPL(kgdb_register_io_module);
  868. /**
  869. * kkgdb_unregister_io_module - unregister KGDB IO module
  870. * @old_dbg_io_ops: the io ops vector
  871. *
  872. * Unregister it with the KGDB core.
  873. */
  874. void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
  875. {
  876. BUG_ON(kgdb_connected);
  877. /*
  878. * KGDB is no longer able to communicate out, so
  879. * unregister our callbacks and reset state.
  880. */
  881. kgdb_unregister_callbacks();
  882. spin_lock(&kgdb_registration_lock);
  883. WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
  884. dbg_io_ops = NULL;
  885. spin_unlock(&kgdb_registration_lock);
  886. printk(KERN_INFO
  887. "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
  888. old_dbg_io_ops->name);
  889. }
  890. EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
  891. int dbg_io_get_char(void)
  892. {
  893. int ret = dbg_io_ops->read_char();
  894. if (ret == NO_POLL_CHAR)
  895. return -1;
  896. if (!dbg_kdb_mode)
  897. return ret;
  898. if (ret == 127)
  899. return 8;
  900. return ret;
  901. }
  902. /**
  903. * kgdb_breakpoint - generate breakpoint exception
  904. *
  905. * This function will generate a breakpoint exception. It is used at the
  906. * beginning of a program to sync up with a debugger and can be used
  907. * otherwise as a quick means to stop program execution and "break" into
  908. * the debugger.
  909. */
  910. noinline void kgdb_breakpoint(void)
  911. {
  912. atomic_inc(&kgdb_setting_breakpoint);
  913. wmb(); /* Sync point before breakpoint */
  914. arch_kgdb_breakpoint();
  915. wmb(); /* Sync point after breakpoint */
  916. atomic_dec(&kgdb_setting_breakpoint);
  917. }
  918. EXPORT_SYMBOL_GPL(kgdb_breakpoint);
  919. static int __init opt_kgdb_wait(char *str)
  920. {
  921. kgdb_break_asap = 1;
  922. kdb_init(KDB_INIT_EARLY);
  923. if (kgdb_io_module_registered)
  924. kgdb_initial_breakpoint();
  925. return 0;
  926. }
  927. early_param("kgdbwait", opt_kgdb_wait);