debug_core.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967
  1. /*
  2. * Kernel Debug Core
  3. *
  4. * Maintainer: Jason Wessel <jason.wessel@windriver.com>
  5. *
  6. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  7. * Copyright (C) 2002-2004 Timesys Corporation
  8. * Copyright (C) 2003-2004 Amit S. Kale <amitkale@linsyssoft.com>
  9. * Copyright (C) 2004 Pavel Machek <pavel@suse.cz>
  10. * Copyright (C) 2004-2006 Tom Rini <trini@kernel.crashing.org>
  11. * Copyright (C) 2004-2006 LinSysSoft Technologies Pvt. Ltd.
  12. * Copyright (C) 2005-2009 Wind River Systems, Inc.
  13. * Copyright (C) 2007 MontaVista Software, Inc.
  14. * Copyright (C) 2008 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  15. *
  16. * Contributors at various stages not listed above:
  17. * Jason Wessel ( jason.wessel@windriver.com )
  18. * George Anzinger <george@mvista.com>
  19. * Anurekh Saxena (anurekh.saxena@timesys.com)
  20. * Lake Stevens Instrument Division (Glenn Engel)
  21. * Jim Kingdon, Cygnus Support.
  22. *
  23. * Original KGDB stub: David Grothe <dave@gcom.com>,
  24. * Tigran Aivazian <tigran@sco.com>
  25. *
  26. * This file is licensed under the terms of the GNU General Public License
  27. * version 2. This program is licensed "as is" without any warranty of any
  28. * kind, whether express or implied.
  29. */
  30. #include <linux/pid_namespace.h>
  31. #include <linux/clocksource.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/spinlock.h>
  34. #include <linux/console.h>
  35. #include <linux/threads.h>
  36. #include <linux/uaccess.h>
  37. #include <linux/kernel.h>
  38. #include <linux/module.h>
  39. #include <linux/ptrace.h>
  40. #include <linux/string.h>
  41. #include <linux/delay.h>
  42. #include <linux/sched.h>
  43. #include <linux/sysrq.h>
  44. #include <linux/init.h>
  45. #include <linux/kgdb.h>
  46. #include <linux/kdb.h>
  47. #include <linux/pid.h>
  48. #include <linux/smp.h>
  49. #include <linux/mm.h>
  50. #include <asm/cacheflush.h>
  51. #include <asm/byteorder.h>
  52. #include <asm/atomic.h>
  53. #include <asm/system.h>
  54. #include "debug_core.h"
  55. static int kgdb_break_asap;
  56. struct debuggerinfo_struct kgdb_info[NR_CPUS];
  57. /**
  58. * kgdb_connected - Is a host GDB connected to us?
  59. */
  60. int kgdb_connected;
  61. EXPORT_SYMBOL_GPL(kgdb_connected);
  62. /* All the KGDB handlers are installed */
  63. int kgdb_io_module_registered;
  64. /* Guard for recursive entry */
  65. static int exception_level;
  66. struct kgdb_io *dbg_io_ops;
  67. static DEFINE_SPINLOCK(kgdb_registration_lock);
  68. /* kgdb console driver is loaded */
  69. static int kgdb_con_registered;
  70. /* determine if kgdb console output should be used */
  71. static int kgdb_use_con;
  72. /* Next cpu to become the master debug core */
  73. int dbg_switch_cpu;
  74. /* Use kdb or gdbserver mode */
  75. int dbg_kdb_mode = 1;
  76. static int __init opt_kgdb_con(char *str)
  77. {
  78. kgdb_use_con = 1;
  79. return 0;
  80. }
  81. early_param("kgdbcon", opt_kgdb_con);
  82. module_param(kgdb_use_con, int, 0644);
  83. /*
  84. * Holds information about breakpoints in a kernel. These breakpoints are
  85. * added and removed by gdb.
  86. */
  87. static struct kgdb_bkpt kgdb_break[KGDB_MAX_BREAKPOINTS] = {
  88. [0 ... KGDB_MAX_BREAKPOINTS-1] = { .state = BP_UNDEFINED }
  89. };
  90. /*
  91. * The CPU# of the active CPU, or -1 if none:
  92. */
  93. atomic_t kgdb_active = ATOMIC_INIT(-1);
  94. EXPORT_SYMBOL_GPL(kgdb_active);
  95. /*
  96. * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
  97. * bootup code (which might not have percpu set up yet):
  98. */
  99. static atomic_t passive_cpu_wait[NR_CPUS];
  100. static atomic_t cpu_in_kgdb[NR_CPUS];
  101. static atomic_t kgdb_break_tasklet_var;
  102. atomic_t kgdb_setting_breakpoint;
  103. struct task_struct *kgdb_usethread;
  104. struct task_struct *kgdb_contthread;
  105. int kgdb_single_step;
  106. static pid_t kgdb_sstep_pid;
  107. /* to keep track of the CPU which is doing the single stepping*/
  108. atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
  109. /*
  110. * If you are debugging a problem where roundup (the collection of
  111. * all other CPUs) is a problem [this should be extremely rare],
  112. * then use the nokgdbroundup option to avoid roundup. In that case
  113. * the other CPUs might interfere with your debugging context, so
  114. * use this with care:
  115. */
  116. static int kgdb_do_roundup = 1;
  117. static int __init opt_nokgdbroundup(char *str)
  118. {
  119. kgdb_do_roundup = 0;
  120. return 0;
  121. }
  122. early_param("nokgdbroundup", opt_nokgdbroundup);
  123. /*
  124. * Finally, some KGDB code :-)
  125. */
  126. /*
  127. * Weak aliases for breakpoint management,
  128. * can be overriden by architectures when needed:
  129. */
  130. int __weak kgdb_arch_set_breakpoint(unsigned long addr, char *saved_instr)
  131. {
  132. int err;
  133. err = probe_kernel_read(saved_instr, (char *)addr, BREAK_INSTR_SIZE);
  134. if (err)
  135. return err;
  136. return probe_kernel_write((char *)addr, arch_kgdb_ops.gdb_bpt_instr,
  137. BREAK_INSTR_SIZE);
  138. }
  139. int __weak kgdb_arch_remove_breakpoint(unsigned long addr, char *bundle)
  140. {
  141. return probe_kernel_write((char *)addr,
  142. (char *)bundle, BREAK_INSTR_SIZE);
  143. }
  144. int __weak kgdb_validate_break_address(unsigned long addr)
  145. {
  146. char tmp_variable[BREAK_INSTR_SIZE];
  147. int err;
  148. /* Validate setting the breakpoint and then removing it. In the
  149. * remove fails, the kernel needs to emit a bad message because we
  150. * are deep trouble not being able to put things back the way we
  151. * found them.
  152. */
  153. err = kgdb_arch_set_breakpoint(addr, tmp_variable);
  154. if (err)
  155. return err;
  156. err = kgdb_arch_remove_breakpoint(addr, tmp_variable);
  157. if (err)
  158. printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
  159. "memory destroyed at: %lx", addr);
  160. return err;
  161. }
  162. unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs)
  163. {
  164. return instruction_pointer(regs);
  165. }
  166. int __weak kgdb_arch_init(void)
  167. {
  168. return 0;
  169. }
  170. int __weak kgdb_skipexception(int exception, struct pt_regs *regs)
  171. {
  172. return 0;
  173. }
  174. /**
  175. * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
  176. * @regs: Current &struct pt_regs.
  177. *
  178. * This function will be called if the particular architecture must
  179. * disable hardware debugging while it is processing gdb packets or
  180. * handling exception.
  181. */
  182. void __weak kgdb_disable_hw_debug(struct pt_regs *regs)
  183. {
  184. }
  185. /*
  186. * Some architectures need cache flushes when we set/clear a
  187. * breakpoint:
  188. */
  189. static void kgdb_flush_swbreak_addr(unsigned long addr)
  190. {
  191. if (!CACHE_FLUSH_IS_SAFE)
  192. return;
  193. if (current->mm && current->mm->mmap_cache) {
  194. flush_cache_range(current->mm->mmap_cache,
  195. addr, addr + BREAK_INSTR_SIZE);
  196. }
  197. /* Force flush instruction cache if it was outside the mm */
  198. flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
  199. }
  200. /*
  201. * SW breakpoint management:
  202. */
  203. int dbg_activate_sw_breakpoints(void)
  204. {
  205. unsigned long addr;
  206. int error;
  207. int ret = 0;
  208. int i;
  209. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  210. if (kgdb_break[i].state != BP_SET)
  211. continue;
  212. addr = kgdb_break[i].bpt_addr;
  213. error = kgdb_arch_set_breakpoint(addr,
  214. kgdb_break[i].saved_instr);
  215. if (error) {
  216. ret = error;
  217. printk(KERN_INFO "KGDB: BP install failed: %lx", addr);
  218. continue;
  219. }
  220. kgdb_flush_swbreak_addr(addr);
  221. kgdb_break[i].state = BP_ACTIVE;
  222. }
  223. return ret;
  224. }
  225. int dbg_set_sw_break(unsigned long addr)
  226. {
  227. int err = kgdb_validate_break_address(addr);
  228. int breakno = -1;
  229. int i;
  230. if (err)
  231. return err;
  232. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  233. if ((kgdb_break[i].state == BP_SET) &&
  234. (kgdb_break[i].bpt_addr == addr))
  235. return -EEXIST;
  236. }
  237. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  238. if (kgdb_break[i].state == BP_REMOVED &&
  239. kgdb_break[i].bpt_addr == addr) {
  240. breakno = i;
  241. break;
  242. }
  243. }
  244. if (breakno == -1) {
  245. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  246. if (kgdb_break[i].state == BP_UNDEFINED) {
  247. breakno = i;
  248. break;
  249. }
  250. }
  251. }
  252. if (breakno == -1)
  253. return -E2BIG;
  254. kgdb_break[breakno].state = BP_SET;
  255. kgdb_break[breakno].type = BP_BREAKPOINT;
  256. kgdb_break[breakno].bpt_addr = addr;
  257. return 0;
  258. }
  259. int dbg_deactivate_sw_breakpoints(void)
  260. {
  261. unsigned long addr;
  262. int error;
  263. int ret = 0;
  264. int i;
  265. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  266. if (kgdb_break[i].state != BP_ACTIVE)
  267. continue;
  268. addr = kgdb_break[i].bpt_addr;
  269. error = kgdb_arch_remove_breakpoint(addr,
  270. kgdb_break[i].saved_instr);
  271. if (error) {
  272. printk(KERN_INFO "KGDB: BP remove failed: %lx\n", addr);
  273. ret = error;
  274. }
  275. kgdb_flush_swbreak_addr(addr);
  276. kgdb_break[i].state = BP_SET;
  277. }
  278. return ret;
  279. }
  280. int dbg_remove_sw_break(unsigned long addr)
  281. {
  282. int i;
  283. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  284. if ((kgdb_break[i].state == BP_SET) &&
  285. (kgdb_break[i].bpt_addr == addr)) {
  286. kgdb_break[i].state = BP_REMOVED;
  287. return 0;
  288. }
  289. }
  290. return -ENOENT;
  291. }
  292. int kgdb_isremovedbreak(unsigned long addr)
  293. {
  294. int i;
  295. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  296. if ((kgdb_break[i].state == BP_REMOVED) &&
  297. (kgdb_break[i].bpt_addr == addr))
  298. return 1;
  299. }
  300. return 0;
  301. }
  302. int dbg_remove_all_break(void)
  303. {
  304. unsigned long addr;
  305. int error;
  306. int i;
  307. /* Clear memory breakpoints. */
  308. for (i = 0; i < KGDB_MAX_BREAKPOINTS; i++) {
  309. if (kgdb_break[i].state != BP_ACTIVE)
  310. goto setundefined;
  311. addr = kgdb_break[i].bpt_addr;
  312. error = kgdb_arch_remove_breakpoint(addr,
  313. kgdb_break[i].saved_instr);
  314. if (error)
  315. printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
  316. addr);
  317. setundefined:
  318. kgdb_break[i].state = BP_UNDEFINED;
  319. }
  320. /* Clear hardware breakpoints. */
  321. if (arch_kgdb_ops.remove_all_hw_break)
  322. arch_kgdb_ops.remove_all_hw_break();
  323. return 0;
  324. }
  325. /*
  326. * Return true if there is a valid kgdb I/O module. Also if no
  327. * debugger is attached a message can be printed to the console about
  328. * waiting for the debugger to attach.
  329. *
  330. * The print_wait argument is only to be true when called from inside
  331. * the core kgdb_handle_exception, because it will wait for the
  332. * debugger to attach.
  333. */
  334. static int kgdb_io_ready(int print_wait)
  335. {
  336. if (!dbg_io_ops)
  337. return 0;
  338. if (kgdb_connected)
  339. return 1;
  340. if (atomic_read(&kgdb_setting_breakpoint))
  341. return 1;
  342. if (print_wait) {
  343. #ifdef CONFIG_KGDB_KDB
  344. if (!dbg_kdb_mode)
  345. printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
  346. #else
  347. printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
  348. #endif
  349. }
  350. return 1;
  351. }
  352. static int kgdb_reenter_check(struct kgdb_state *ks)
  353. {
  354. unsigned long addr;
  355. if (atomic_read(&kgdb_active) != raw_smp_processor_id())
  356. return 0;
  357. /* Panic on recursive debugger calls: */
  358. exception_level++;
  359. addr = kgdb_arch_pc(ks->ex_vector, ks->linux_regs);
  360. dbg_deactivate_sw_breakpoints();
  361. /*
  362. * If the break point removed ok at the place exception
  363. * occurred, try to recover and print a warning to the end
  364. * user because the user planted a breakpoint in a place that
  365. * KGDB needs in order to function.
  366. */
  367. if (dbg_remove_sw_break(addr) == 0) {
  368. exception_level = 0;
  369. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  370. dbg_activate_sw_breakpoints();
  371. printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
  372. addr);
  373. WARN_ON_ONCE(1);
  374. return 1;
  375. }
  376. dbg_remove_all_break();
  377. kgdb_skipexception(ks->ex_vector, ks->linux_regs);
  378. if (exception_level > 1) {
  379. dump_stack();
  380. panic("Recursive entry to debugger");
  381. }
  382. printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
  383. #ifdef CONFIG_KGDB_KDB
  384. /* Allow kdb to debug itself one level */
  385. return 0;
  386. #endif
  387. dump_stack();
  388. panic("Recursive entry to debugger");
  389. return 1;
  390. }
  391. static void dbg_cpu_switch(int cpu, int next_cpu)
  392. {
  393. /* Mark the cpu we are switching away from as a slave when it
  394. * holds the kgdb_active token. This must be done so that the
  395. * that all the cpus wait in for the debug core will not enter
  396. * again as the master. */
  397. if (cpu == atomic_read(&kgdb_active)) {
  398. kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
  399. kgdb_info[cpu].exception_state &= ~DCPU_WANT_MASTER;
  400. }
  401. kgdb_info[next_cpu].exception_state |= DCPU_NEXT_MASTER;
  402. }
  403. static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
  404. {
  405. unsigned long flags;
  406. int sstep_tries = 100;
  407. int error;
  408. int i, cpu;
  409. int trace_on = 0;
  410. acquirelock:
  411. /*
  412. * Interrupts will be restored by the 'trap return' code, except when
  413. * single stepping.
  414. */
  415. local_irq_save(flags);
  416. cpu = ks->cpu;
  417. kgdb_info[cpu].debuggerinfo = regs;
  418. kgdb_info[cpu].task = current;
  419. kgdb_info[cpu].ret_state = 0;
  420. kgdb_info[cpu].irq_depth = hardirq_count() >> HARDIRQ_SHIFT;
  421. /*
  422. * Make sure the above info reaches the primary CPU before
  423. * our cpu_in_kgdb[] flag setting does:
  424. */
  425. atomic_inc(&cpu_in_kgdb[cpu]);
  426. if (exception_level == 1)
  427. goto cpu_master_loop;
  428. /*
  429. * CPU will loop if it is a slave or request to become a kgdb
  430. * master cpu and acquire the kgdb_active lock:
  431. */
  432. while (1) {
  433. cpu_loop:
  434. if (kgdb_info[cpu].exception_state & DCPU_NEXT_MASTER) {
  435. kgdb_info[cpu].exception_state &= ~DCPU_NEXT_MASTER;
  436. goto cpu_master_loop;
  437. } else if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
  438. if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
  439. break;
  440. } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
  441. if (!atomic_read(&passive_cpu_wait[cpu]))
  442. goto return_normal;
  443. } else {
  444. return_normal:
  445. /* Return to normal operation by executing any
  446. * hw breakpoint fixup.
  447. */
  448. if (arch_kgdb_ops.correct_hw_break)
  449. arch_kgdb_ops.correct_hw_break();
  450. if (trace_on)
  451. tracing_on();
  452. atomic_dec(&cpu_in_kgdb[cpu]);
  453. touch_softlockup_watchdog_sync();
  454. clocksource_touch_watchdog();
  455. local_irq_restore(flags);
  456. return 0;
  457. }
  458. cpu_relax();
  459. }
  460. /*
  461. * For single stepping, try to only enter on the processor
  462. * that was single stepping. To gaurd against a deadlock, the
  463. * kernel will only try for the value of sstep_tries before
  464. * giving up and continuing on.
  465. */
  466. if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
  467. (kgdb_info[cpu].task &&
  468. kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
  469. atomic_set(&kgdb_active, -1);
  470. touch_softlockup_watchdog_sync();
  471. clocksource_touch_watchdog();
  472. local_irq_restore(flags);
  473. goto acquirelock;
  474. }
  475. if (!kgdb_io_ready(1)) {
  476. kgdb_info[cpu].ret_state = 1;
  477. goto kgdb_restore; /* No I/O connection, resume the system */
  478. }
  479. /*
  480. * Don't enter if we have hit a removed breakpoint.
  481. */
  482. if (kgdb_skipexception(ks->ex_vector, ks->linux_regs))
  483. goto kgdb_restore;
  484. /* Call the I/O driver's pre_exception routine */
  485. if (dbg_io_ops->pre_exception)
  486. dbg_io_ops->pre_exception();
  487. kgdb_disable_hw_debug(ks->linux_regs);
  488. /*
  489. * Get the passive CPU lock which will hold all the non-primary
  490. * CPU in a spin state while the debugger is active
  491. */
  492. if (!kgdb_single_step) {
  493. for (i = 0; i < NR_CPUS; i++)
  494. atomic_inc(&passive_cpu_wait[i]);
  495. }
  496. #ifdef CONFIG_SMP
  497. /* Signal the other CPUs to enter kgdb_wait() */
  498. if ((!kgdb_single_step) && kgdb_do_roundup)
  499. kgdb_roundup_cpus(flags);
  500. #endif
  501. /*
  502. * Wait for the other CPUs to be notified and be waiting for us:
  503. */
  504. for_each_online_cpu(i) {
  505. while (kgdb_do_roundup && !atomic_read(&cpu_in_kgdb[i]))
  506. cpu_relax();
  507. }
  508. /*
  509. * At this point the primary processor is completely
  510. * in the debugger and all secondary CPUs are quiescent
  511. */
  512. dbg_deactivate_sw_breakpoints();
  513. kgdb_single_step = 0;
  514. kgdb_contthread = current;
  515. exception_level = 0;
  516. trace_on = tracing_is_on();
  517. if (trace_on)
  518. tracing_off();
  519. while (1) {
  520. cpu_master_loop:
  521. if (dbg_kdb_mode) {
  522. kgdb_connected = 1;
  523. error = kdb_stub(ks);
  524. } else {
  525. error = gdb_serial_stub(ks);
  526. }
  527. if (error == DBG_PASS_EVENT) {
  528. dbg_kdb_mode = !dbg_kdb_mode;
  529. kgdb_connected = 0;
  530. } else if (error == DBG_SWITCH_CPU_EVENT) {
  531. dbg_cpu_switch(cpu, dbg_switch_cpu);
  532. goto cpu_loop;
  533. } else {
  534. kgdb_info[cpu].ret_state = error;
  535. break;
  536. }
  537. }
  538. /* Call the I/O driver's post_exception routine */
  539. if (dbg_io_ops->post_exception)
  540. dbg_io_ops->post_exception();
  541. atomic_dec(&cpu_in_kgdb[ks->cpu]);
  542. if (!kgdb_single_step) {
  543. for (i = NR_CPUS-1; i >= 0; i--)
  544. atomic_dec(&passive_cpu_wait[i]);
  545. /*
  546. * Wait till all the CPUs have quit from the debugger,
  547. * but allow a CPU that hit an exception and is
  548. * waiting to become the master to remain in the debug
  549. * core.
  550. */
  551. for_each_online_cpu(i) {
  552. while (kgdb_do_roundup &&
  553. atomic_read(&cpu_in_kgdb[i]) &&
  554. !(kgdb_info[i].exception_state &
  555. DCPU_WANT_MASTER))
  556. cpu_relax();
  557. }
  558. }
  559. kgdb_restore:
  560. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  561. int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
  562. if (kgdb_info[sstep_cpu].task)
  563. kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
  564. else
  565. kgdb_sstep_pid = 0;
  566. }
  567. if (trace_on)
  568. tracing_on();
  569. /* Free kgdb_active */
  570. atomic_set(&kgdb_active, -1);
  571. touch_softlockup_watchdog_sync();
  572. clocksource_touch_watchdog();
  573. local_irq_restore(flags);
  574. return kgdb_info[cpu].ret_state;
  575. }
  576. /*
  577. * kgdb_handle_exception() - main entry point from a kernel exception
  578. *
  579. * Locking hierarchy:
  580. * interface locks, if any (begin_session)
  581. * kgdb lock (kgdb_active)
  582. */
  583. int
  584. kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
  585. {
  586. struct kgdb_state kgdb_var;
  587. struct kgdb_state *ks = &kgdb_var;
  588. int ret;
  589. ks->cpu = raw_smp_processor_id();
  590. ks->ex_vector = evector;
  591. ks->signo = signo;
  592. ks->err_code = ecode;
  593. ks->kgdb_usethreadid = 0;
  594. ks->linux_regs = regs;
  595. if (kgdb_reenter_check(ks))
  596. return 0; /* Ouch, double exception ! */
  597. kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
  598. ret = kgdb_cpu_enter(ks, regs);
  599. kgdb_info[ks->cpu].exception_state &= ~(DCPU_WANT_MASTER |
  600. DCPU_IS_SLAVE);
  601. return ret;
  602. }
  603. int kgdb_nmicallback(int cpu, void *regs)
  604. {
  605. #ifdef CONFIG_SMP
  606. struct kgdb_state kgdb_var;
  607. struct kgdb_state *ks = &kgdb_var;
  608. memset(ks, 0, sizeof(struct kgdb_state));
  609. ks->cpu = cpu;
  610. ks->linux_regs = regs;
  611. if (!atomic_read(&cpu_in_kgdb[cpu]) &&
  612. atomic_read(&kgdb_active) != -1 &&
  613. atomic_read(&kgdb_active) != cpu) {
  614. kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
  615. kgdb_cpu_enter(ks, regs);
  616. kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
  617. return 0;
  618. }
  619. #endif
  620. return 1;
  621. }
  622. static void kgdb_console_write(struct console *co, const char *s,
  623. unsigned count)
  624. {
  625. unsigned long flags;
  626. /* If we're debugging, or KGDB has not connected, don't try
  627. * and print. */
  628. if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
  629. return;
  630. local_irq_save(flags);
  631. gdbstub_msg_write(s, count);
  632. local_irq_restore(flags);
  633. }
  634. static struct console kgdbcons = {
  635. .name = "kgdb",
  636. .write = kgdb_console_write,
  637. .flags = CON_PRINTBUFFER | CON_ENABLED,
  638. .index = -1,
  639. };
  640. #ifdef CONFIG_MAGIC_SYSRQ
  641. static void sysrq_handle_dbg(int key, struct tty_struct *tty)
  642. {
  643. if (!dbg_io_ops) {
  644. printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
  645. return;
  646. }
  647. if (!kgdb_connected) {
  648. #ifdef CONFIG_KGDB_KDB
  649. if (!dbg_kdb_mode)
  650. printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
  651. #else
  652. printk(KERN_CRIT "Entering KGDB\n");
  653. #endif
  654. }
  655. kgdb_breakpoint();
  656. }
  657. static struct sysrq_key_op sysrq_dbg_op = {
  658. .handler = sysrq_handle_dbg,
  659. .help_msg = "debug(G)",
  660. .action_msg = "DEBUG",
  661. };
  662. #endif
  663. static int kgdb_panic_event(struct notifier_block *self,
  664. unsigned long val,
  665. void *data)
  666. {
  667. if (dbg_kdb_mode)
  668. kdb_printf("PANIC: %s\n", (char *)data);
  669. kgdb_breakpoint();
  670. return NOTIFY_DONE;
  671. }
  672. static struct notifier_block kgdb_panic_event_nb = {
  673. .notifier_call = kgdb_panic_event,
  674. .priority = INT_MAX,
  675. };
  676. static void kgdb_register_callbacks(void)
  677. {
  678. if (!kgdb_io_module_registered) {
  679. kgdb_io_module_registered = 1;
  680. kgdb_arch_init();
  681. atomic_notifier_chain_register(&panic_notifier_list,
  682. &kgdb_panic_event_nb);
  683. #ifdef CONFIG_MAGIC_SYSRQ
  684. register_sysrq_key('g', &sysrq_dbg_op);
  685. #endif
  686. if (kgdb_use_con && !kgdb_con_registered) {
  687. register_console(&kgdbcons);
  688. kgdb_con_registered = 1;
  689. }
  690. }
  691. }
  692. static void kgdb_unregister_callbacks(void)
  693. {
  694. /*
  695. * When this routine is called KGDB should unregister from the
  696. * panic handler and clean up, making sure it is not handling any
  697. * break exceptions at the time.
  698. */
  699. if (kgdb_io_module_registered) {
  700. kgdb_io_module_registered = 0;
  701. atomic_notifier_chain_unregister(&panic_notifier_list,
  702. &kgdb_panic_event_nb);
  703. kgdb_arch_exit();
  704. #ifdef CONFIG_MAGIC_SYSRQ
  705. unregister_sysrq_key('g', &sysrq_dbg_op);
  706. #endif
  707. if (kgdb_con_registered) {
  708. unregister_console(&kgdbcons);
  709. kgdb_con_registered = 0;
  710. }
  711. }
  712. }
  713. /*
  714. * There are times a tasklet needs to be used vs a compiled in
  715. * break point so as to cause an exception outside a kgdb I/O module,
  716. * such as is the case with kgdboe, where calling a breakpoint in the
  717. * I/O driver itself would be fatal.
  718. */
  719. static void kgdb_tasklet_bpt(unsigned long ing)
  720. {
  721. kgdb_breakpoint();
  722. atomic_set(&kgdb_break_tasklet_var, 0);
  723. }
  724. static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
  725. void kgdb_schedule_breakpoint(void)
  726. {
  727. if (atomic_read(&kgdb_break_tasklet_var) ||
  728. atomic_read(&kgdb_active) != -1 ||
  729. atomic_read(&kgdb_setting_breakpoint))
  730. return;
  731. atomic_inc(&kgdb_break_tasklet_var);
  732. tasklet_schedule(&kgdb_tasklet_breakpoint);
  733. }
  734. EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
  735. static void kgdb_initial_breakpoint(void)
  736. {
  737. kgdb_break_asap = 0;
  738. printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
  739. kgdb_breakpoint();
  740. }
  741. /**
  742. * kgdb_register_io_module - register KGDB IO module
  743. * @new_dbg_io_ops: the io ops vector
  744. *
  745. * Register it with the KGDB core.
  746. */
  747. int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
  748. {
  749. int err;
  750. spin_lock(&kgdb_registration_lock);
  751. if (dbg_io_ops) {
  752. spin_unlock(&kgdb_registration_lock);
  753. printk(KERN_ERR "kgdb: Another I/O driver is already "
  754. "registered with KGDB.\n");
  755. return -EBUSY;
  756. }
  757. if (new_dbg_io_ops->init) {
  758. err = new_dbg_io_ops->init();
  759. if (err) {
  760. spin_unlock(&kgdb_registration_lock);
  761. return err;
  762. }
  763. }
  764. dbg_io_ops = new_dbg_io_ops;
  765. spin_unlock(&kgdb_registration_lock);
  766. printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
  767. new_dbg_io_ops->name);
  768. /* Arm KGDB now. */
  769. kgdb_register_callbacks();
  770. if (kgdb_break_asap)
  771. kgdb_initial_breakpoint();
  772. return 0;
  773. }
  774. EXPORT_SYMBOL_GPL(kgdb_register_io_module);
  775. /**
  776. * kkgdb_unregister_io_module - unregister KGDB IO module
  777. * @old_dbg_io_ops: the io ops vector
  778. *
  779. * Unregister it with the KGDB core.
  780. */
  781. void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
  782. {
  783. BUG_ON(kgdb_connected);
  784. /*
  785. * KGDB is no longer able to communicate out, so
  786. * unregister our callbacks and reset state.
  787. */
  788. kgdb_unregister_callbacks();
  789. spin_lock(&kgdb_registration_lock);
  790. WARN_ON_ONCE(dbg_io_ops != old_dbg_io_ops);
  791. dbg_io_ops = NULL;
  792. spin_unlock(&kgdb_registration_lock);
  793. printk(KERN_INFO
  794. "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
  795. old_dbg_io_ops->name);
  796. }
  797. EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
  798. int dbg_io_get_char(void)
  799. {
  800. int ret = dbg_io_ops->read_char();
  801. if (ret == NO_POLL_CHAR)
  802. return -1;
  803. if (!dbg_kdb_mode)
  804. return ret;
  805. if (ret == 127)
  806. return 8;
  807. return ret;
  808. }
  809. /**
  810. * kgdb_breakpoint - generate breakpoint exception
  811. *
  812. * This function will generate a breakpoint exception. It is used at the
  813. * beginning of a program to sync up with a debugger and can be used
  814. * otherwise as a quick means to stop program execution and "break" into
  815. * the debugger.
  816. */
  817. void kgdb_breakpoint(void)
  818. {
  819. atomic_inc(&kgdb_setting_breakpoint);
  820. wmb(); /* Sync point before breakpoint */
  821. arch_kgdb_breakpoint();
  822. wmb(); /* Sync point after breakpoint */
  823. atomic_dec(&kgdb_setting_breakpoint);
  824. }
  825. EXPORT_SYMBOL_GPL(kgdb_breakpoint);
  826. static int __init opt_kgdb_wait(char *str)
  827. {
  828. kgdb_break_asap = 1;
  829. kdb_init(KDB_INIT_EARLY);
  830. if (kgdb_io_module_registered)
  831. kgdb_initial_breakpoint();
  832. return 0;
  833. }
  834. early_param("kgdbwait", opt_kgdb_wait);