debug_core.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /*
  2. * Kernel Debug Core
  3. *
  4. * Maintainer: Jason Wessel <jason.wessel@windriver.com>
  5. *
  6. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  7. * Copyright (C) 2002-2004 Timesys Corporation
  8. * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
  9. * Copyright (C) 2004 Pavel Machek <pavel@ucw.cz>
  10. * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
  11. * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
  12. * Copyright (C) 2005-2009 Wind River Systems, Inc.
  13. * Copyright (C) 2007 MontaVista Software, Inc.
  14. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  15. *
  16. * Contributors at various stages not listed above:
  17. * Jason Wessel ( jason.wessel@windriver.com )
  18. * George Anzinger <george@mvista.com>
  19. * Anurekh Saxena (anurekh.saxena@timesys.com)
  20. * Lake Stevens Instrument Division (Glenn Engel)
  21. * Jim Kingdon, Cygnus Support.
  22. *
  23. * Original KGDB stub: David Grothe <dave@gcom.com>,
  24. * Tigran Aivazian <tigran@sco.com>
  25. *
  26. * This file is licensed under the terms of the GNU General Public License
  27. * version 2. This program is licensed "as is" without any warranty of any
  28. * kind, whether express or implied.
  29. */
  30. #include <linux/pid_namespace.h>
  31. #include <linux/clocksource.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/console.h>
  35. #include <linux/threads.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/ptrace.h>
  40. #include <linux/string.h>
  41. #include <linux/delay.h>
  42. #include <linux/sched.h>
  43. #include <linux/sysrq.h>
  44. #include <linux/reboot.h>
  45. #include <linux/init.h>
  46. #include <linux/kgdb.h>
  47. #include <linux/kdb.h>
  48. #include <linux/pid.h>
  49. #include <linux/smp.h>
  50. #include <linux/mm.h>
  51. #include <linux/rcupdate.h>
  52. #include <asm/cacheflush.h>
  53. #include <asm/byteorder.h>
  54. #include <linux/atomic.h>
  55. #include "debug_core.h"
  56. static int kgdb_break_asap;
  57. struct debuggerinfo_struct kgdb_info[NR_CPUS];
  58. /**
  59. * kgdb_connected - Is a host GDB connected to us?
  60. */
  61. int kgdb_connected;
  62. EXPORT_SYMBOL_GPL(kgdb_connected);
  63. /* All the KGDB handlers are installed */
  64. int kgdb_io_module_registered;
  65. /* Guard for recursive entry */
  66. static int exception_level;
  67. struct kgdb_io *dbg_io_ops;
  68. static DEFINE_SPINLOCK(kgdb_registration_lock);
  69. /* Action for the reboot notifiter, a global allow kdb to change it */
  70. static int kgdbreboot;
  71. /* kgdb console driver is loaded */
  72. static int kgdb_con_registered;
  73. /* determine if kgdb console output should be used */
  74. static int kgdb_use_con;
  75. /* Flag for alternate operations for early debugging */
  76. bool dbg_is_early = true;
  77. /* Next cpu to become the master debug core */
  78. int dbg_switch_cpu;
  79. /* Use kdb or gdbserver mode */
  80. int dbg_kdb_mode = 1;
  81. static int __init opt_kgdb_con(char *str)
  82. {
  83. kgdb_use_con = 1;
  84. return 0;
  85. }
  86. early_param("kgdbcon", opt_kgdb_con);
  87. module_param(kgdb_use_con, int, 0644);
  88. module_param(kgdbreboot, int, 0644);
  89. /*
  90. * Holds information about breakpoints in a kernel. These breakpoints are
  91. * added and removed by gdb.
  92. */
  93. static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
  94. [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
  95. };
  96. /*
  97. * The CPU# of the active CPU, or -1 if none:
  98. */
  99. atomic_t kgdb_active = ATOMIC_INIT(-1);
  100. EXPORT_SYMBOL_GPL(kgdb_active);
  101. static DEFINE_RAW_SPINLOCK(dbg_master_lock);
  102. static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
  103. /*
  104. * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
  105. * bootup code (which might not have percpu set up yet):
  106. */
  107. static atomic_t masters_in_kgdb;
  108. static atomic_t slaves_in_kgdb;
  109. static atomic_t kgdb_break_tasklet_var;
  110. atomic_t kgdb_setting_breakpoint;
  111. struct task_struct *kgdb_usethread;
  112. struct task_struct *kgdb_contthread;
  113. int kgdb_single_step;
  114. static pid_t kgdb_sstep_pid;
  115. /* to keep track of the CPU which is doing the single stepping*/
  116. atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
  117. /*
  118. * If you are debugging a problem where roundup (the collection of
  119. * all other CPUs) is a problem [this should be extremely rare],
  120. * then use the nokgdbroundup option to avoid roundup. In that case
  121. * the other CPUs might interfere with your debugging context, so
  122. * use this with care:
  123. */
  124. static int kgdb_do_roundup = 1;
  125. static int __init opt_nokgdbroundup(char *str)
  126. {
  127. kgdb_do_roundup = 0;
  128. return 0;
  129. }
  130. early_param("nokgdbroundup", opt_nokgdbroundup);
  131. /*
  132. * Finally, some KGDB code :-)
  133. */
  134. /*
  135. * Weak aliases for breakpoint management,
  136. * can be overriden by architectures when needed:
  137. */
  138. int __weak kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  139. {
  140. int err;
  141. err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
  142. BREAK_INSTR_SIZE);
  143. if (err)
  144. return err;
  145. err = probe_kernel_write((char *)bpt->bpt_addr,
  146. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  147. return err;
  148. }
  149. int __weak kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  150. {
  151. return probe_kernel_write((char *)bpt->bpt_addr,
  152. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  153. }
  154. int __weak kgdb_validate_break_address(unsigned long addr)
  155. {
  156. struct kgdb_bkpt tmp;
  157. int err;
  158. /* Validate setting the breakpoint and then removing it. If the
  159. * remove fails, the kernel needs to emit a bad message because we
  160. * are deep trouble not being able to put things back the way we
  161. * found them.
  162. */
  163. tmp.bpt_addr = addr;
  164. err = kgdb_arch_set_breakpoint(&tmp);
  165. if (err)
  166. return err;
  167. err = kgdb_arch_remove_breakpoint(&tmp);
  168. if (err)
  169. printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
  170. "memory destroyed at: %lx", addr);
  171. return err;
  172. }
  173. unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
  174. {
  175. return instruction_pointer(regs);
  176. }
  177. int __weak kgdb_arch_init(void)
  178. {
  179. return 0;
  180. }
  181. int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
  182. {
  183. return 0;
  184. }
  185. /*
  186. * Some architectures need cache flushes when we set/clear a
  187. * breakpoint:
  188. */
  189. static void kgdb_flush_swbreak_addr(unsigned long addr)
  190. {
  191. if (!CACHE_FLUSH_IS_SAFE)
  192. return;
  193. if (current->mm && current->mm->mmap_cache) {
  194. flush_cache_range(current->mm->mmap_cache,
  195. addr, addr + BREAK_INSTR_SIZE);
  196. }
  197. /* Force flush instruction cache if it was outside the mm */
  198. flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
  199. }
  200. /*
  201. * SW breakpoint management:
  202. */
  203. int dbg_activate_sw_breakpoints(void)
  204. {
  205. int error;
  206. int ret = 0;
  207. int i;
  208. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  209. if (kgdb_break[i].state != BP_SET)
  210. continue;
  211. error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
  212. if (error) {
  213. ret = error;
  214. printk(KERN_INFO "KGDB: BP install failed: %lx",
  215. kgdb_break[i].bpt_addr);
  216. continue;
  217. }
  218. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  219. kgdb_break[i].state = BP_ACTIVE;
  220. }
  221. return ret;
  222. }
  223. int dbg_set_sw_break(unsigned long addr)
  224. {
  225. int err = kgdb_validate_break_address(addr);
  226. int breakno = -1;
  227. int i;
  228. if (err)
  229. return err;
  230. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  231. if ((kgdb_break[i].state == BP_SET) &&
  232. (kgdb_break[i].bpt_addr == addr))
  233. return -EEXIST;
  234. }
  235. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  236. if (kgdb_break[i].state == BP_REMOVED &&
  237. kgdb_break[i].bpt_addr == addr) {
  238. breakno = i;
  239. break;
  240. }
  241. }
  242. if (breakno == -1) {
  243. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  244. if (kgdb_break[i].state == BP_UNDEFINED) {
  245. breakno = i;
  246. break;
  247. }
  248. }
  249. }
  250. if (breakno == -1)
  251. return -E2BIG;
  252. kgdb_break[breakno].state = BP_SET;
  253. kgdb_break[breakno].type = BP_BREAKPOINT;
  254. kgdb_break[breakno].bpt_addr = addr;
  255. return 0;
  256. }
  257. int dbg_deactivate_sw_breakpoints(void)
  258. {
  259. int error;
  260. int ret = 0;
  261. int i;
  262. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  263. if (kgdb_break[i].state != BP_ACTIVE)
  264. continue;
  265. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  266. if (error) {
  267. printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
  268. kgdb_break[i].bpt_addr);
  269. ret = error;
  270. }
  271. kgdb_flush_swbreak_addr(kgdb_break[i].bpt_addr);
  272. kgdb_break[i].state = BP_SET;
  273. }
  274. return ret;
  275. }
  276. int dbg_remove_sw_break(unsigned long addr)
  277. {
  278. int i;
  279. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  280. if ((kgdb_break[i].state == BP_SET) &&
  281. (kgdb_break[i].bpt_addr == addr)) {
  282. kgdb_break[i].state = BP_REMOVED;
  283. return 0;
  284. }
  285. }
  286. return -ENOENT;
  287. }
  288. int kgdb_isremovedbreak(unsigned long addr)
  289. {
  290. int i;
  291. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  292. if ((kgdb_break[i].state == BP_REMOVED) &&
  293. (kgdb_break[i].bpt_addr == addr))
  294. return 1;
  295. }
  296. return 0;
  297. }
  298. int dbg_remove_all_break(void)
  299. {
  300. int error;
  301. int i;
  302. /* Clear memory breakpoints. */
  303. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  304. if (kgdb_break[i].state != BP_ACTIVE)
  305. goto setundefined;
  306. error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
  307. if (error)
  308. printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
  309. kgdb_break[i].bpt_addr);
  310. setundefined:
  311. kgdb_break[i].state = BP_UNDEFINED;
  312. }
  313. /* Clear hardware breakpoints. */
  314. if (arch_kgdb_ops.remove_all_hw_break)
  315. arch_kgdb_ops.remove_all_hw_break();
  316. return 0;
  317. }
  318. /*
  319. * Return true if there is a valid kgdb I/O module. Also if no
  320. * debugger is attached a message can be printed to the console about
  321. * waiting for the debugger to attach.
  322. *
  323. * The print_wait argument is only to be true when called from inside
  324. * the core kgdb_handle_exception, because it will wait for the
  325. * debugger to attach.
  326. */
  327. static int kgdb_io_ready(int print_wait)
  328. {
  329. if (!dbg_io_ops)
  330. return 0;
  331. if (kgdb_connected)
  332. return 1;
  333. if (atomic_read(&kgdb_setting_breakpoint))
  334. return 1;
  335. if (print_wait) {
  336. #ifdef CONFIG_KGDB_KDB
  337. if (!dbg_kdb_mode)
  338. printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
  339. #else
  340. printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
  341. #endif
  342. }
  343. return 1;
  344. }
  345. static int kgdb_reenter_check(struct kgdb_state *ks)
  346. {
  347. unsigned long addr;
  348. if (atomic_read(&kgdb_active) != raw_smp_processor_id())
  349. return 0;
  350. /* Panic on recursive debugger calls: */
  351. exception_level++;
  352. addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
  353. dbg_deactivate_sw_breakpoints();
  354. /*
  355. * If the break point removed ok at the place exception
  356. * occurred, try to recover and print a warning to the end
  357. * user because the user planted a breakpoint in a place that
  358. * KGDB needs in order to function.
  359. */
  360. if (dbg_remove_sw_break(addr) == 0) {
  361. exception_level = 0;
  362. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  363. dbg_activate_sw_breakpoints();
  364. printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
  365. addr);
  366. WARN_ON_ONCE(1);
  367. return 1;
  368. }
  369. dbg_remove_all_break();
  370. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  371. if (exception_level > 1) {
  372. dump_stack();
  373. panic("Recursive entry to debugger");
  374. }
  375. printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
  376. #ifdef CONFIG_KGDB_KDB
  377. /* Allow kdb to debug itself one level */
  378. return 0;
  379. #endif
  380. dump_stack();
  381. panic("Recursive entry to debugger");
  382. return 1;
  383. }
  384. static void dbg_touch_watchdogs(void)
  385. {
  386. touch_softlockup_watchdog_sync();
  387. clocksource_touch_watchdog();
  388. rcu_cpu_stall_reset();
  389. }
  390. static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
  391. int exception_state)
  392. {
  393. unsigned long flags;
  394. int sstep_tries = 100;
  395. int error;
  396. int cpu;
  397. int trace_on = 0;
  398. int online_cpus = num_online_cpus();
  399. kgdb_info[ks->cpu].enter_kgdb++;
  400. kgdb_info[ks->cpu].exception_state |= exception_state;
  401. if (exception_state == DCPU_WANT_MASTER)
  402. atomic_inc(&masters_in_kgdb);
  403. else
  404. atomic_inc(&slaves_in_kgdb);
  405. if (arch_kgdb_ops.disable_hw_break)
  406. arch_kgdb_ops.disable_hw_break(regs);
  407. acquirelock:
  408. /*
  409. * Interrupts will be restored by the 'trap return' code, except when
  410. * single stepping.
  411. */
  412. local_irq_save(flags);
  413. cpu = ks->cpu;
  414. kgdb_info[cpu].debuggerinfo = regs;
  415. kgdb_info[cpu].task = current;
  416. kgdb_info[cpu].ret_state = 0;
  417. kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
  418. /* Make sure the above info reaches the primary CPU */
  419. smp_mb();
  420. if (exception_level == 1) {
  421. if (raw_spin_trylock(&dbg_master_lock))
  422. atomic_xchg(&kgdb_active, cpu);
  423. goto cpu_master_loop;
  424. }
  425. /*
  426. * CPU will loop if it is a slave or request to become a kgdb
  427. * master cpu and acquire the kgdb_active lock:
  428. */
  429. while (1) {
  430. cpu_loop:
  431. if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
  432. kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
  433. goto cpu_master_loop;
  434. } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
  435. if (raw_spin_trylock(&dbg_master_lock)) {
  436. atomic_xchg(&kgdb_active, cpu);
  437. break;
  438. }
  439. } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
  440. if (!raw_spin_is_locked(&dbg_slave_lock))
  441. goto return_normal;
  442. } else {
  443. return_normal:
  444. /* Return to normal operation by executing any
  445. * hw breakpoint fixup.
  446. */
  447. if (arch_kgdb_ops.correct_hw_break)
  448. arch_kgdb_ops.correct_hw_break();
  449. if (trace_on)
  450. tracing_on();
  451. kgdb_info[cpu].exception_state &=
  452. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  453. kgdb_info[cpu].enter_kgdb--;
  454. smp_mb__before_atomic_dec();
  455. atomic_dec(&slaves_in_kgdb);
  456. dbg_touch_watchdogs();
  457. local_irq_restore(flags);
  458. return 0;
  459. }
  460. cpu_relax();
  461. }
  462. /*
  463. * For single stepping, try to only enter on the processor
  464. * that was single stepping. To guard against a deadlock, the
  465. * kernel will only try for the value of sstep_tries before
  466. * giving up and continuing on.
  467. */
  468. if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
  469. (kgdb_info[cpu].task &&
  470. kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
  471. atomic_set(&kgdb_active, -1);
  472. raw_spin_unlock(&dbg_master_lock);
  473. dbg_touch_watchdogs();
  474. local_irq_restore(flags);
  475. goto acquirelock;
  476. }
  477. if (!kgdb_io_ready(1)) {
  478. kgdb_info[cpu].ret_state = 1;
  479. goto kgdb_restore; /* No I/O connection, resume the system */
  480. }
  481. /*
  482. * Don't enter if we have hit a removed breakpoint.
  483. */
  484. if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
  485. goto kgdb_restore;
  486. /* Call the I/O driver's pre_exception routine */
  487. if (dbg_io_ops->pre_exception)
  488. dbg_io_ops->pre_exception();
  489. /*
  490. * Get the passive CPU lock which will hold all the non-primary
  491. * CPU in a spin state while the debugger is active
  492. */
  493. if (!kgdb_single_step)
  494. raw_spin_lock(&dbg_slave_lock);
  495. #ifdef CONFIG_SMP
  496. /* Signal the other CPUs to enter kgdb_wait() */
  497. if ((!kgdb_single_step) && kgdb_do_roundup)
  498. kgdb_roundup_cpus(flags);
  499. #endif
  500. /*
  501. * Wait for the other CPUs to be notified and be waiting for us:
  502. */
  503. while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
  504. atomic_read(&slaves_in_kgdb)) != online_cpus)
  505. cpu_relax();
  506. /*
  507. * At this point the primary processor is completely
  508. * in the debugger and all secondary CPUs are quiescent
  509. */
  510. dbg_deactivate_sw_breakpoints();
  511. kgdb_single_step = 0;
  512. kgdb_contthread = current;
  513. exception_level = 0;
  514. trace_on = tracing_is_on();
  515. if (trace_on)
  516. tracing_off();
  517. while (1) {
  518. cpu_master_loop:
  519. if (dbg_kdb_mode) {
  520. kgdb_connected = 1;
  521. error = kdb_stub(ks);
  522. if (error == -1)
  523. continue;
  524. kgdb_connected = 0;
  525. } else {
  526. error = gdb_serial_stub(ks);
  527. }
  528. if (error == DBG_PASS_EVENT) {
  529. dbg_kdb_mode = !dbg_kdb_mode;
  530. } else if (error == DBG_SWITCH_CPU_EVENT) {
  531. kgdb_info[dbg_switch_cpu].exception_state |=
  532. DCPU_NEXT_MASTER;
  533. goto cpu_loop;
  534. } else {
  535. kgdb_info[cpu].ret_state = error;
  536. break;
  537. }
  538. }
  539. /* Call the I/O driver's post_exception routine */
  540. if (dbg_io_ops->post_exception)
  541. dbg_io_ops->post_exception();
  542. if (!kgdb_single_step) {
  543. raw_spin_unlock(&dbg_slave_lock);
  544. /* Wait till all the CPUs have quit from the debugger. */
  545. while (kgdb_do_roundup && atomic_read(&slaves_in_kgdb))
  546. cpu_relax();
  547. }
  548. kgdb_restore:
  549. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  550. int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
  551. if (kgdb_info[sstep_cpu].task)
  552. kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
  553. else
  554. kgdb_sstep_pid = 0;
  555. }
  556. if (arch_kgdb_ops.correct_hw_break)
  557. arch_kgdb_ops.correct_hw_break();
  558. if (trace_on)
  559. tracing_on();
  560. kgdb_info[cpu].exception_state &=
  561. ~(DCPU_WANT_MASTER | DCPU_IS_SLAVE);
  562. kgdb_info[cpu].enter_kgdb--;
  563. smp_mb__before_atomic_dec();
  564. atomic_dec(&masters_in_kgdb);
  565. /* Free kgdb_active */
  566. atomic_set(&kgdb_active, -1);
  567. raw_spin_unlock(&dbg_master_lock);
  568. dbg_touch_watchdogs();
  569. local_irq_restore(flags);
  570. return kgdb_info[cpu].ret_state;
  571. }
  572. /*
  573. * kgdb_handle_exception() - main entry point from a kernel exception
  574. *
  575. * Locking hierarchy:
  576. * interface locks, if any (begin_session)
  577. * kgdb lock (kgdb_active)
  578. */
  579. int
  580. kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
  581. {
  582. struct kgdb_state kgdb_var;
  583. struct kgdb_state *ks = &kgdb_var;
  584. int ret = 0;
  585. if (arch_kgdb_ops.enable_nmi)
  586. arch_kgdb_ops.enable_nmi(0);
  587. ks->cpu = raw_smp_processor_id();
  588. ks->ex_vector = evector;
  589. ks->signo = signo;
  590. ks->err_code = ecode;
  591. ks->kgdb_usethreadid = 0;
  592. ks->linux_regs = regs;
  593. if (kgdb_reenter_check(ks))
  594. goto out; /* Ouch, double exception ! */
  595. if (kgdb_info[ks->cpu].enter_kgdb != 0)
  596. goto out;
  597. ret = kgdb_cpu_enter(ks, regs, DCPU_WANT_MASTER);
  598. out:
  599. if (arch_kgdb_ops.enable_nmi)
  600. arch_kgdb_ops.enable_nmi(1);
  601. return ret;
  602. }
  603. /*
  604. * GDB places a breakpoint at this function to know dynamically
  605. * loaded objects. It's not defined static so that only one instance with this
  606. * name exists in the kernel.
  607. */
  608. static int module_event(struct notifier_block *self, unsigned long val,
  609. void *data)
  610. {
  611. return 0;
  612. }
  613. static struct notifier_block dbg_module_load_nb = {
  614. .notifier_call = module_event,
  615. };
  616. int kgdb_nmicallback(int cpu, void *regs)
  617. {
  618. #ifdef CONFIG_SMP
  619. struct kgdb_state kgdb_var;
  620. struct kgdb_state *ks = &kgdb_var;
  621. memset(ks, 0, sizeof(struct kgdb_state));
  622. ks->cpu = cpu;
  623. ks->linux_regs = regs;
  624. if (kgdb_info[ks->cpu].enter_kgdb == 0 &&
  625. raw_spin_is_locked(&dbg_master_lock)) {
  626. kgdb_cpu_enter(ks, regs, DCPU_IS_SLAVE);
  627. return 0;
  628. }
  629. #endif
  630. return 1;
  631. }
  632. static void kgdb_console_write(struct console *co, const char *s,
  633. unsigned count)
  634. {
  635. unsigned long flags;
  636. /* If we're debugging, or KGDB has not connected, don't try
  637. * and print. */
  638. if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
  639. return;
  640. local_irq_save(flags);
  641. gdbstub_msg_write(s, count);
  642. local_irq_restore(flags);
  643. }
  644. static struct console kgdbcons = {
  645. .name = "kgdb",
  646. .write = kgdb_console_write,
  647. .flags = CON_PRINTBUFFER | CON_ENABLED,
  648. .index = -1,
  649. };
  650. #ifdef CONFIG_MAGIC_SYSRQ
  651. static void sysrq_handle_dbg(int key)
  652. {
  653. if (!dbg_io_ops) {
  654. printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
  655. return;
  656. }
  657. if (!kgdb_connected) {
  658. #ifdef CONFIG_KGDB_KDB
  659. if (!dbg_kdb_mode)
  660. printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
  661. #else
  662. printk(KERN_CRIT "Entering KGDB\n");
  663. #endif
  664. }
  665. kgdb_breakpoint();
  666. }
  667. static struct sysrq_key_op sysrq_dbg_op = {
  668. .handler = sysrq_handle_dbg,
  669. .help_msg = "debug(G)",
  670. .action_msg = "DEBUG",
  671. };
  672. #endif
  673. static int kgdb_panic_event(struct notifier_block *self,
  674. unsigned long val,
  675. void *data)
  676. {
  677. if (dbg_kdb_mode)
  678. kdb_printf("PANIC: %s\n", (char *)data);
  679. kgdb_breakpoint();
  680. return NOTIFY_DONE;
  681. }
  682. static struct notifier_block kgdb_panic_event_nb = {
  683. .notifier_call = kgdb_panic_event,
  684. .priority = INT_MAX,
  685. };
  686. void __weak kgdb_arch_late(void)
  687. {
  688. }
  689. void __init dbg_late_init(void)
  690. {
  691. dbg_is_early = false;
  692. if (kgdb_io_module_registered)
  693. kgdb_arch_late();
  694. kdb_init(KDB_INIT_FULL);
  695. }
  696. static int
  697. dbg_notify_reboot(struct notifier_block *this, unsigned long code, void *x)
  698. {
  699. /*
  700. * Take the following action on reboot notify depending on value:
  701. * 1 == Enter debugger
  702. * 0 == [the default] detatch debug client
  703. * -1 == Do nothing... and use this until the board resets
  704. */
  705. switch (kgdbreboot) {
  706. case 1:
  707. kgdb_breakpoint();
  708. case -1:
  709. goto done;
  710. }
  711. if (!dbg_kdb_mode)
  712. gdbstub_exit(code);
  713. done:
  714. return NOTIFY_DONE;
  715. }
  716. static struct notifier_block dbg_reboot_notifier = {
  717. .notifier_call = dbg_notify_reboot,
  718. .next = NULL,
  719. .priority = INT_MAX,
  720. };
  721. static void kgdb_register_callbacks(void)
  722. {
  723. if (!kgdb_io_module_registered) {
  724. kgdb_io_module_registered = 1;
  725. kgdb_arch_init();
  726. if (!dbg_is_early)
  727. kgdb_arch_late();
  728. register_module_notifier(&dbg_module_load_nb);
  729. register_reboot_notifier(&dbg_reboot_notifier);
  730. atomic_notifier_chain_register(&panic_notifier_list,
  731. &kgdb_panic_event_nb);
  732. #ifdef CONFIG_MAGIC_SYSRQ
  733. register_sysrq_key('g', &sysrq_dbg_op);
  734. #endif
  735. if (kgdb_use_con && !kgdb_con_registered) {
  736. register_console(&kgdbcons);
  737. kgdb_con_registered = 1;
  738. }
  739. }
  740. }
  741. static void kgdb_unregister_callbacks(void)
  742. {
  743. /*
  744. * When this routine is called KGDB should unregister from the
  745. * panic handler and clean up, making sure it is not handling any
  746. * break exceptions at the time.
  747. */
  748. if (kgdb_io_module_registered) {
  749. kgdb_io_module_registered = 0;
  750. unregister_reboot_notifier(&dbg_reboot_notifier);
  751. unregister_module_notifier(&dbg_module_load_nb);
  752. atomic_notifier_chain_unregister(&panic_notifier_list,
  753. &kgdb_panic_event_nb);
  754. kgdb_arch_exit();
  755. #ifdef CONFIG_MAGIC_SYSRQ
  756. unregister_sysrq_key('g', &sysrq_dbg_op);
  757. #endif
  758. if (kgdb_con_registered) {
  759. unregister_console(&kgdbcons);
  760. kgdb_con_registered = 0;
  761. }
  762. }
  763. }
  764. /*
  765. * There are times a tasklet needs to be used vs a compiled in
  766. * break point so as to cause an exception outside a kgdb I/O module,
  767. * such as is the case with kgdboe, where calling a breakpoint in the
  768. * I/O driver itself would be fatal.
  769. */
  770. static void kgdb_tasklet_bpt(unsigned long ing)
  771. {
  772. kgdb_breakpoint();
  773. atomic_set(&kgdb_break_tasklet_var, 0);
  774. }
  775. static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
  776. void kgdb_schedule_breakpoint(void)
  777. {
  778. if (atomic_read(&kgdb_break_tasklet_var) ||
  779. atomic_read(&kgdb_active) != -1 ||
  780. atomic_read(&kgdb_setting_breakpoint))
  781. return;
  782. atomic_inc(&kgdb_break_tasklet_var);
  783. tasklet_schedule(&kgdb_tasklet_breakpoint);
  784. }
  785. EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
  786. static void kgdb_initial_breakpoint(void)
  787. {
  788. kgdb_break_asap = 0;
  789. printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
  790. kgdb_breakpoint();
  791. }
  792. /**
  793. * kgdb_register_io_module - register KGDB IO module
  794. * @new_dbg_io_ops: the io ops vector
  795. *
  796. * Register it with the KGDB core.
  797. */
  798. int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
  799. {
  800. int err;
  801. spin_lock(&kgdb_registration_lock);
  802. if (dbg_io_ops) {
  803. spin_unlock(&kgdb_registration_lock);
  804. printk(KERN_ERR "kgdb: Another I/O driver is already "
  805. "registered with KGDB.\n");
  806. return -EBUSY;
  807. }
  808. if (new_dbg_io_ops->init) {
  809. err = new_dbg_io_ops->init();
  810. if (err) {
  811. spin_unlock(&kgdb_registration_lock);
  812. return err;
  813. }
  814. }
  815. dbg_io_ops = new_dbg_io_ops;
  816. spin_unlock(&kgdb_registration_lock);
  817. printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
  818. new_dbg_io_ops->name);
  819. /* Arm KGDB now. */
  820. kgdb_register_callbacks();
  821. if (kgdb_break_asap)
  822. kgdb_initial_breakpoint();
  823. return 0;
  824. }
  825. EXPORT_SYMBOL_GPL(kgdb_register_io_module);
  826. /**
  827. * kkgdb_unregister_io_module - unregister KGDB IO module
  828. * @old_dbg_io_ops: the io ops vector
  829. *
  830. * Unregister it with the KGDB core.
  831. */
  832. void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
  833. {
  834. BUG_ON(kgdb_connected);
  835. /*
  836. * KGDB is no longer able to communicate out, so
  837. * unregister our callbacks and reset state.
  838. */
  839. kgdb_unregister_callbacks();
  840. spin_lock(&kgdb_registration_lock);
  841. WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
  842. dbg_io_ops = NULL;
  843. spin_unlock(&kgdb_registration_lock);
  844. printk(KERN_INFO
  845. "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
  846. old_dbg_io_ops->name);
  847. }
  848. EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
  849. int dbg_io_get_char(void)
  850. {
  851. int ret = dbg_io_ops->read_char();
  852. if (ret == NO_POLL_CHAR)
  853. return -1;
  854. if (!dbg_kdb_mode)
  855. return ret;
  856. if (ret == 127)
  857. return 8;
  858. return ret;
  859. }
  860. /**
  861. * kgdb_breakpoint - generate breakpoint exception
  862. *
  863. * This function will generate a breakpoint exception. It is used at the
  864. * beginning of a program to sync up with a debugger and can be used
  865. * otherwise as a quick means to stop program execution and "break" into
  866. * the debugger.
  867. */
  868. void kgdb_breakpoint(void)
  869. {
  870. atomic_inc(&kgdb_setting_breakpoint);
  871. wmb(); /* Sync point before breakpoint */
  872. arch_kgdb_breakpoint();
  873. wmb(); /* Sync point after breakpoint */
  874. atomic_dec(&kgdb_setting_breakpoint);
  875. }
  876. EXPORT_SYMBOL_GPL(kgdb_breakpoint);
  877. static int __init opt_kgdb_wait(char *str)
  878. {
  879. kgdb_break_asap = 1;
  880. kdb_init(KDB_INIT_EARLY);
  881. if (kgdb_io_module_registered)
  882. kgdb_initial_breakpoint();
  883. return 0;
  884. }
  885. early_param("kgdbwait", opt_kgdb_wait);