kgdb.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * This program is free software; you can redistribute it and/or modify it
  3. * under the terms of the GNU General Public License as published by the
  4. * Free Software Foundation; either version 2, or (at your option) any
  5. * later version.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. *
  12. */
  13. /*
  14. * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
  15. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  16. * Copyright (C) 2002 Andi Kleen, SuSE Labs
  17. * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
  18. * Copyright (C) 2007 MontaVista Software, Inc.
  19. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
  20. */
  21. /****************************************************************************
  22. * Contributor: Lake Stevens Instrument Division$
  23. * Written by: Glenn Engel $
  24. * Updated by: Amit Kale<akale@veritas.com>
  25. * Updated by: Tom Rini <trini@kernel.crashing.org>
  26. * Updated by: Jason Wessel <jason.wessel@windriver.com>
  27. * Modified for 386 by Jim Kingdon, Cygnus Support.
  28. * Origianl kgdb, compatibility with 2.1.xx kernel by
  29. * David Grothe <dave@gcom.com>
  30. * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
  31. * X86_64 changes from Andi Kleen's patch merged by Jim Houston
  32. */
  33. #include <linux/spinlock.h>
  34. #include <linux/kdebug.h>
  35. #include <linux/string.h>
  36. #include <linux/kernel.h>
  37. #include <linux/ptrace.h>
  38. #include <linux/sched.h>
  39. #include <linux/delay.h>
  40. #include <linux/kgdb.h>
  41. #include <linux/init.h>
  42. #include <linux/smp.h>
  43. #include <linux/nmi.h>
  44. #include <linux/hw_breakpoint.h>
  45. #include <linux/uaccess.h>
  46. #include <linux/memory.h>
  47. #include <asm/debugreg.h>
  48. #include <asm/apicdef.h>
  49. #include <asm/system.h>
  50. #include <asm/apic.h>
  51. #include <asm/nmi.h>
  52. struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
  53. {
  54. #ifdef CONFIG_X86_32
  55. { "ax", 4, offsetof(struct pt_regs, ax) },
  56. { "cx", 4, offsetof(struct pt_regs, cx) },
  57. { "dx", 4, offsetof(struct pt_regs, dx) },
  58. { "bx", 4, offsetof(struct pt_regs, bx) },
  59. { "sp", 4, offsetof(struct pt_regs, sp) },
  60. { "bp", 4, offsetof(struct pt_regs, bp) },
  61. { "si", 4, offsetof(struct pt_regs, si) },
  62. { "di", 4, offsetof(struct pt_regs, di) },
  63. { "ip", 4, offsetof(struct pt_regs, ip) },
  64. { "flags", 4, offsetof(struct pt_regs, flags) },
  65. { "cs", 4, offsetof(struct pt_regs, cs) },
  66. { "ss", 4, offsetof(struct pt_regs, ss) },
  67. { "ds", 4, offsetof(struct pt_regs, ds) },
  68. { "es", 4, offsetof(struct pt_regs, es) },
  69. #else
  70. { "ax", 8, offsetof(struct pt_regs, ax) },
  71. { "bx", 8, offsetof(struct pt_regs, bx) },
  72. { "cx", 8, offsetof(struct pt_regs, cx) },
  73. { "dx", 8, offsetof(struct pt_regs, dx) },
  74. { "si", 8, offsetof(struct pt_regs, dx) },
  75. { "di", 8, offsetof(struct pt_regs, di) },
  76. { "bp", 8, offsetof(struct pt_regs, bp) },
  77. { "sp", 8, offsetof(struct pt_regs, sp) },
  78. { "r8", 8, offsetof(struct pt_regs, r8) },
  79. { "r9", 8, offsetof(struct pt_regs, r9) },
  80. { "r10", 8, offsetof(struct pt_regs, r10) },
  81. { "r11", 8, offsetof(struct pt_regs, r11) },
  82. { "r12", 8, offsetof(struct pt_regs, r12) },
  83. { "r13", 8, offsetof(struct pt_regs, r13) },
  84. { "r14", 8, offsetof(struct pt_regs, r14) },
  85. { "r15", 8, offsetof(struct pt_regs, r15) },
  86. { "ip", 8, offsetof(struct pt_regs, ip) },
  87. { "flags", 4, offsetof(struct pt_regs, flags) },
  88. { "cs", 4, offsetof(struct pt_regs, cs) },
  89. { "ss", 4, offsetof(struct pt_regs, ss) },
  90. { "ds", 4, -1 },
  91. { "es", 4, -1 },
  92. #endif
  93. { "fs", 4, -1 },
  94. { "gs", 4, -1 },
  95. };
  96. int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
  97. {
  98. if (
  99. #ifdef CONFIG_X86_32
  100. regno == GDB_SS || regno == GDB_FS || regno == GDB_GS ||
  101. #endif
  102. regno == GDB_SP || regno == GDB_ORIG_AX)
  103. return 0;
  104. if (dbg_reg_def[regno].offset != -1)
  105. memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
  106. dbg_reg_def[regno].size);
  107. return 0;
  108. }
  109. char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
  110. {
  111. if (regno == GDB_ORIG_AX) {
  112. memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax));
  113. return "orig_ax";
  114. }
  115. if (regno >= DBG_MAX_REG_NUM || regno < 0)
  116. return NULL;
  117. if (dbg_reg_def[regno].offset != -1)
  118. memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
  119. dbg_reg_def[regno].size);
  120. #ifdef CONFIG_X86_32
  121. switch (regno) {
  122. case GDB_SS:
  123. if (!user_mode_vm(regs))
  124. *(unsigned long *)mem = __KERNEL_DS;
  125. break;
  126. case GDB_SP:
  127. if (!user_mode_vm(regs))
  128. *(unsigned long *)mem = kernel_stack_pointer(regs);
  129. break;
  130. case GDB_GS:
  131. case GDB_FS:
  132. *(unsigned long *)mem = 0xFFFF;
  133. break;
  134. }
  135. #endif
  136. return dbg_reg_def[regno].name;
  137. }
  138. /**
  139. * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
  140. * @gdb_regs: A pointer to hold the registers in the order GDB wants.
  141. * @p: The &struct task_struct of the desired process.
  142. *
  143. * Convert the register values of the sleeping process in @p to
  144. * the format that GDB expects.
  145. * This function is called when kgdb does not have access to the
  146. * &struct pt_regs and therefore it should fill the gdb registers
  147. * @gdb_regs with what has been saved in &struct thread_struct
  148. * thread field during switch_to.
  149. */
  150. void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
  151. {
  152. #ifndef CONFIG_X86_32
  153. u32 *gdb_regs32 = (u32 *)gdb_regs;
  154. #endif
  155. gdb_regs[GDB_AX] = 0;
  156. gdb_regs[GDB_BX] = 0;
  157. gdb_regs[GDB_CX] = 0;
  158. gdb_regs[GDB_DX] = 0;
  159. gdb_regs[GDB_SI] = 0;
  160. gdb_regs[GDB_DI] = 0;
  161. gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
  162. #ifdef CONFIG_X86_32
  163. gdb_regs[GDB_DS] = __KERNEL_DS;
  164. gdb_regs[GDB_ES] = __KERNEL_DS;
  165. gdb_regs[GDB_PS] = 0;
  166. gdb_regs[GDB_CS] = __KERNEL_CS;
  167. gdb_regs[GDB_PC] = p->thread.ip;
  168. gdb_regs[GDB_SS] = __KERNEL_DS;
  169. gdb_regs[GDB_FS] = 0xFFFF;
  170. gdb_regs[GDB_GS] = 0xFFFF;
  171. #else
  172. gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
  173. gdb_regs32[GDB_CS] = __KERNEL_CS;
  174. gdb_regs32[GDB_SS] = __KERNEL_DS;
  175. gdb_regs[GDB_PC] = 0;
  176. gdb_regs[GDB_R8] = 0;
  177. gdb_regs[GDB_R9] = 0;
  178. gdb_regs[GDB_R10] = 0;
  179. gdb_regs[GDB_R11] = 0;
  180. gdb_regs[GDB_R12] = 0;
  181. gdb_regs[GDB_R13] = 0;
  182. gdb_regs[GDB_R14] = 0;
  183. gdb_regs[GDB_R15] = 0;
  184. #endif
  185. gdb_regs[GDB_SP] = p->thread.sp;
  186. }
  187. static struct hw_breakpoint {
  188. unsigned enabled;
  189. unsigned long addr;
  190. int len;
  191. int type;
  192. struct perf_event * __percpu *pev;
  193. } breakinfo[HBP_NUM];
  194. static unsigned long early_dr7;
  195. static void kgdb_correct_hw_break(void)
  196. {
  197. int breakno;
  198. for (breakno = 0; breakno < HBP_NUM; breakno++) {
  199. struct perf_event *bp;
  200. struct arch_hw_breakpoint *info;
  201. int val;
  202. int cpu = raw_smp_processor_id();
  203. if (!breakinfo[breakno].enabled)
  204. continue;
  205. if (dbg_is_early) {
  206. set_debugreg(breakinfo[breakno].addr, breakno);
  207. early_dr7 |= encode_dr7(breakno,
  208. breakinfo[breakno].len,
  209. breakinfo[breakno].type);
  210. set_debugreg(early_dr7, 7);
  211. continue;
  212. }
  213. bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
  214. info = counter_arch_bp(bp);
  215. if (bp->attr.disabled != 1)
  216. continue;
  217. bp->attr.bp_addr = breakinfo[breakno].addr;
  218. bp->attr.bp_len = breakinfo[breakno].len;
  219. bp->attr.bp_type = breakinfo[breakno].type;
  220. info->address = breakinfo[breakno].addr;
  221. info->len = breakinfo[breakno].len;
  222. info->type = breakinfo[breakno].type;
  223. val = arch_install_hw_breakpoint(bp);
  224. if (!val)
  225. bp->attr.disabled = 0;
  226. }
  227. if (!dbg_is_early)
  228. hw_breakpoint_restore();
  229. }
  230. static int hw_break_reserve_slot(int breakno)
  231. {
  232. int cpu;
  233. int cnt = 0;
  234. struct perf_event **pevent;
  235. if (dbg_is_early)
  236. return 0;
  237. for_each_online_cpu(cpu) {
  238. cnt++;
  239. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  240. if (dbg_reserve_bp_slot(*pevent))
  241. goto fail;
  242. }
  243. return 0;
  244. fail:
  245. for_each_online_cpu(cpu) {
  246. cnt--;
  247. if (!cnt)
  248. break;
  249. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  250. dbg_release_bp_slot(*pevent);
  251. }
  252. return -1;
  253. }
  254. static int hw_break_release_slot(int breakno)
  255. {
  256. struct perf_event **pevent;
  257. int cpu;
  258. if (dbg_is_early)
  259. return 0;
  260. for_each_online_cpu(cpu) {
  261. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  262. if (dbg_release_bp_slot(*pevent))
  263. /*
  264. * The debugger is responsible for handing the retry on
  265. * remove failure.
  266. */
  267. return -1;
  268. }
  269. return 0;
  270. }
  271. static int
  272. kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  273. {
  274. int i;
  275. for (i = 0; i < HBP_NUM; i++)
  276. if (breakinfo[i].addr == addr && breakinfo[i].enabled)
  277. break;
  278. if (i == HBP_NUM)
  279. return -1;
  280. if (hw_break_release_slot(i)) {
  281. printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
  282. return -1;
  283. }
  284. breakinfo[i].enabled = 0;
  285. return 0;
  286. }
  287. static void kgdb_remove_all_hw_break(void)
  288. {
  289. int i;
  290. int cpu = raw_smp_processor_id();
  291. struct perf_event *bp;
  292. for (i = 0; i < HBP_NUM; i++) {
  293. if (!breakinfo[i].enabled)
  294. continue;
  295. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  296. if (!bp->attr.disabled) {
  297. arch_uninstall_hw_breakpoint(bp);
  298. bp->attr.disabled = 1;
  299. continue;
  300. }
  301. if (dbg_is_early)
  302. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  303. breakinfo[i].type);
  304. else if (hw_break_release_slot(i))
  305. printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
  306. breakinfo[i].addr);
  307. breakinfo[i].enabled = 0;
  308. }
  309. }
  310. static int
  311. kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  312. {
  313. int i;
  314. for (i = 0; i < HBP_NUM; i++)
  315. if (!breakinfo[i].enabled)
  316. break;
  317. if (i == HBP_NUM)
  318. return -1;
  319. switch (bptype) {
  320. case BP_HARDWARE_BREAKPOINT:
  321. len = 1;
  322. breakinfo[i].type = X86_BREAKPOINT_EXECUTE;
  323. break;
  324. case BP_WRITE_WATCHPOINT:
  325. breakinfo[i].type = X86_BREAKPOINT_WRITE;
  326. break;
  327. case BP_ACCESS_WATCHPOINT:
  328. breakinfo[i].type = X86_BREAKPOINT_RW;
  329. break;
  330. default:
  331. return -1;
  332. }
  333. switch (len) {
  334. case 1:
  335. breakinfo[i].len = X86_BREAKPOINT_LEN_1;
  336. break;
  337. case 2:
  338. breakinfo[i].len = X86_BREAKPOINT_LEN_2;
  339. break;
  340. case 4:
  341. breakinfo[i].len = X86_BREAKPOINT_LEN_4;
  342. break;
  343. #ifdef CONFIG_X86_64
  344. case 8:
  345. breakinfo[i].len = X86_BREAKPOINT_LEN_8;
  346. break;
  347. #endif
  348. default:
  349. return -1;
  350. }
  351. breakinfo[i].addr = addr;
  352. if (hw_break_reserve_slot(i)) {
  353. breakinfo[i].addr = 0;
  354. return -1;
  355. }
  356. breakinfo[i].enabled = 1;
  357. return 0;
  358. }
  359. /**
  360. * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
  361. * @regs: Current &struct pt_regs.
  362. *
  363. * This function will be called if the particular architecture must
  364. * disable hardware debugging while it is processing gdb packets or
  365. * handling exception.
  366. */
  367. static void kgdb_disable_hw_debug(struct pt_regs *regs)
  368. {
  369. int i;
  370. int cpu = raw_smp_processor_id();
  371. struct perf_event *bp;
  372. /* Disable hardware debugging while we are in kgdb: */
  373. set_debugreg(0UL, 7);
  374. for (i = 0; i < HBP_NUM; i++) {
  375. if (!breakinfo[i].enabled)
  376. continue;
  377. if (dbg_is_early) {
  378. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  379. breakinfo[i].type);
  380. continue;
  381. }
  382. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  383. if (bp->attr.disabled == 1)
  384. continue;
  385. arch_uninstall_hw_breakpoint(bp);
  386. bp->attr.disabled = 1;
  387. }
  388. }
  389. #ifdef CONFIG_SMP
  390. /**
  391. * kgdb_roundup_cpus - Get other CPUs into a holding pattern
  392. * @flags: Current IRQ state
  393. *
  394. * On SMP systems, we need to get the attention of the other CPUs
  395. * and get them be in a known state. This should do what is needed
  396. * to get the other CPUs to call kgdb_wait(). Note that on some arches,
  397. * the NMI approach is not used for rounding up all the CPUs. For example,
  398. * in case of MIPS, smp_call_function() is used to roundup CPUs. In
  399. * this case, we have to make sure that interrupts are enabled before
  400. * calling smp_call_function(). The argument to this function is
  401. * the flags that will be used when restoring the interrupts. There is
  402. * local_irq_save() call before kgdb_roundup_cpus().
  403. *
  404. * On non-SMP systems, this is not called.
  405. */
  406. void kgdb_roundup_cpus(unsigned long flags)
  407. {
  408. apic->send_IPI_allbutself(APIC_DM_NMI);
  409. }
  410. #endif
  411. /**
  412. * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
  413. * @vector: The error vector of the exception that happened.
  414. * @signo: The signal number of the exception that happened.
  415. * @err_code: The error code of the exception that happened.
  416. * @remcom_in_buffer: The buffer of the packet we have read.
  417. * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
  418. * @regs: The &struct pt_regs of the current process.
  419. *
  420. * This function MUST handle the 'c' and 's' command packets,
  421. * as well packets to set / remove a hardware breakpoint, if used.
  422. * If there are additional packets which the hardware needs to handle,
  423. * they are handled here. The code should return -1 if it wants to
  424. * process more packets, and a %0 or %1 if it wants to exit from the
  425. * kgdb callback.
  426. */
  427. int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
  428. char *remcomInBuffer, char *remcomOutBuffer,
  429. struct pt_regs *linux_regs)
  430. {
  431. unsigned long addr;
  432. char *ptr;
  433. switch (remcomInBuffer[0]) {
  434. case 'c':
  435. case 's':
  436. /* try to read optional parameter, pc unchanged if no parm */
  437. ptr = &remcomInBuffer[1];
  438. if (kgdb_hex2long(&ptr, &addr))
  439. linux_regs->ip = addr;
  440. case 'D':
  441. case 'k':
  442. /* clear the trace bit */
  443. linux_regs->flags &= ~X86_EFLAGS_TF;
  444. atomic_set(&kgdb_cpu_doing_single_step, -1);
  445. /* set the trace bit if we're stepping */
  446. if (remcomInBuffer[0] == 's') {
  447. linux_regs->flags |= X86_EFLAGS_TF;
  448. atomic_set(&kgdb_cpu_doing_single_step,
  449. raw_smp_processor_id());
  450. }
  451. return 0;
  452. }
  453. /* this means that we do not want to exit from the handler: */
  454. return -1;
  455. }
  456. static inline int
  457. single_step_cont(struct pt_regs *regs, struct die_args *args)
  458. {
  459. /*
  460. * Single step exception from kernel space to user space so
  461. * eat the exception and continue the process:
  462. */
  463. printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
  464. "resuming...\n");
  465. kgdb_arch_handle_exception(args->trapnr, args->signr,
  466. args->err, "c", "", regs);
  467. /*
  468. * Reset the BS bit in dr6 (pointed by args->err) to
  469. * denote completion of processing
  470. */
  471. (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
  472. return NOTIFY_STOP;
  473. }
  474. static int was_in_debug_nmi[NR_CPUS];
  475. static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  476. {
  477. switch (cmd) {
  478. case NMI_LOCAL:
  479. if (atomic_read(&kgdb_active) != -1) {
  480. /* KGDB CPU roundup */
  481. kgdb_nmicallback(raw_smp_processor_id(), regs);
  482. was_in_debug_nmi[raw_smp_processor_id()] = 1;
  483. touch_nmi_watchdog();
  484. return NMI_HANDLED;
  485. }
  486. break;
  487. case NMI_UNKNOWN:
  488. if (was_in_debug_nmi[raw_smp_processor_id()]) {
  489. was_in_debug_nmi[raw_smp_processor_id()] = 0;
  490. return NMI_HANDLED;
  491. }
  492. break;
  493. default:
  494. /* do nothing */
  495. break;
  496. }
  497. return NMI_DONE;
  498. }
  499. static int __kgdb_notify(struct die_args *args, unsigned long cmd)
  500. {
  501. struct pt_regs *regs = args->regs;
  502. switch (cmd) {
  503. case DIE_DEBUG:
  504. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  505. if (user_mode(regs))
  506. return single_step_cont(regs, args);
  507. break;
  508. } else if (test_thread_flag(TIF_SINGLESTEP))
  509. /* This means a user thread is single stepping
  510. * a system call which should be ignored
  511. */
  512. return NOTIFY_DONE;
  513. /* fall through */
  514. default:
  515. if (user_mode(regs))
  516. return NOTIFY_DONE;
  517. }
  518. if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
  519. return NOTIFY_DONE;
  520. /* Must touch watchdog before return to normal operation */
  521. touch_nmi_watchdog();
  522. return NOTIFY_STOP;
  523. }
  524. int kgdb_ll_trap(int cmd, const char *str,
  525. struct pt_regs *regs, long err, int trap, int sig)
  526. {
  527. struct die_args args = {
  528. .regs = regs,
  529. .str = str,
  530. .err = err,
  531. .trapnr = trap,
  532. .signr = sig,
  533. };
  534. if (!kgdb_io_module_registered)
  535. return NOTIFY_DONE;
  536. return __kgdb_notify(&args, cmd);
  537. }
  538. static int
  539. kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
  540. {
  541. unsigned long flags;
  542. int ret;
  543. local_irq_save(flags);
  544. ret = __kgdb_notify(ptr, cmd);
  545. local_irq_restore(flags);
  546. return ret;
  547. }
  548. static struct notifier_block kgdb_notifier = {
  549. .notifier_call = kgdb_notify,
  550. };
  551. /**
  552. * kgdb_arch_init - Perform any architecture specific initalization.
  553. *
  554. * This function will handle the initalization of any architecture
  555. * specific callbacks.
  556. */
  557. int kgdb_arch_init(void)
  558. {
  559. int retval;
  560. retval = register_die_notifier(&kgdb_notifier);
  561. if (retval)
  562. goto out;
  563. retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
  564. 0, "kgdb");
  565. if (retval)
  566. goto out1;
  567. retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
  568. 0, "kgdb");
  569. if (retval)
  570. goto out2;
  571. return retval;
  572. out2:
  573. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  574. out1:
  575. unregister_die_notifier(&kgdb_notifier);
  576. out:
  577. return retval;
  578. }
  579. static void kgdb_hw_overflow_handler(struct perf_event *event,
  580. struct perf_sample_data *data, struct pt_regs *regs)
  581. {
  582. struct task_struct *tsk = current;
  583. int i;
  584. for (i = 0; i < 4; i++)
  585. if (breakinfo[i].enabled)
  586. tsk->thread.debugreg6 |= (DR_TRAP0 << i);
  587. }
  588. void kgdb_arch_late(void)
  589. {
  590. int i, cpu;
  591. struct perf_event_attr attr;
  592. struct perf_event **pevent;
  593. /*
  594. * Pre-allocate the hw breakpoint structions in the non-atomic
  595. * portion of kgdb because this operation requires mutexs to
  596. * complete.
  597. */
  598. hw_breakpoint_init(&attr);
  599. attr.bp_addr = (unsigned long)kgdb_arch_init;
  600. attr.bp_len = HW_BREAKPOINT_LEN_1;
  601. attr.bp_type = HW_BREAKPOINT_W;
  602. attr.disabled = 1;
  603. for (i = 0; i < HBP_NUM; i++) {
  604. if (breakinfo[i].pev)
  605. continue;
  606. breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
  607. if (IS_ERR((void * __force)breakinfo[i].pev)) {
  608. printk(KERN_ERR "kgdb: Could not allocate hw"
  609. "breakpoints\nDisabling the kernel debugger\n");
  610. breakinfo[i].pev = NULL;
  611. kgdb_arch_exit();
  612. return;
  613. }
  614. for_each_online_cpu(cpu) {
  615. pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
  616. pevent[0]->hw.sample_period = 1;
  617. pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
  618. if (pevent[0]->destroy != NULL) {
  619. pevent[0]->destroy = NULL;
  620. release_bp_slot(*pevent);
  621. }
  622. }
  623. }
  624. }
  625. /**
  626. * kgdb_arch_exit - Perform any architecture specific uninitalization.
  627. *
  628. * This function will handle the uninitalization of any architecture
  629. * specific callbacks, for dynamic registration and unregistration.
  630. */
  631. void kgdb_arch_exit(void)
  632. {
  633. int i;
  634. for (i = 0; i < 4; i++) {
  635. if (breakinfo[i].pev) {
  636. unregister_wide_hw_breakpoint(breakinfo[i].pev);
  637. breakinfo[i].pev = NULL;
  638. }
  639. }
  640. unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
  641. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  642. unregister_die_notifier(&kgdb_notifier);
  643. }
  644. /**
  645. *
  646. * kgdb_skipexception - Bail out of KGDB when we've been triggered.
  647. * @exception: Exception vector number
  648. * @regs: Current &struct pt_regs.
  649. *
  650. * On some architectures we need to skip a breakpoint exception when
  651. * it occurs after a breakpoint has been removed.
  652. *
  653. * Skip an int3 exception when it occurs after a breakpoint has been
  654. * removed. Backtrack eip by 1 since the int3 would have caused it to
  655. * increment by 1.
  656. */
  657. int kgdb_skipexception(int exception, struct pt_regs *regs)
  658. {
  659. if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
  660. regs->ip -= 1;
  661. return 1;
  662. }
  663. return 0;
  664. }
  665. unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
  666. {
  667. if (exception == 3)
  668. return instruction_pointer(regs) - 1;
  669. return instruction_pointer(regs);
  670. }
  671. void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
  672. {
  673. regs->ip = ip;
  674. }
  675. int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
  676. {
  677. int err;
  678. char opc[BREAK_INSTR_SIZE];
  679. bpt->type = BP_BREAKPOINT;
  680. err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
  681. BREAK_INSTR_SIZE);
  682. if (err)
  683. return err;
  684. err = probe_kernel_write((char *)bpt->bpt_addr,
  685. arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
  686. #ifdef CONFIG_DEBUG_RODATA
  687. if (!err)
  688. return err;
  689. /*
  690. * It is safe to call text_poke() because normal kernel execution
  691. * is stopped on all cores, so long as the text_mutex is not locked.
  692. */
  693. if (mutex_is_locked(&text_mutex))
  694. return -EBUSY;
  695. text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
  696. BREAK_INSTR_SIZE);
  697. err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
  698. if (err)
  699. return err;
  700. if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
  701. return -EINVAL;
  702. bpt->type = BP_POKE_BREAKPOINT;
  703. #endif /* CONFIG_DEBUG_RODATA */
  704. return err;
  705. }
  706. int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
  707. {
  708. #ifdef CONFIG_DEBUG_RODATA
  709. int err;
  710. char opc[BREAK_INSTR_SIZE];
  711. if (bpt->type != BP_POKE_BREAKPOINT)
  712. goto knl_write;
  713. /*
  714. * It is safe to call text_poke() because normal kernel execution
  715. * is stopped on all cores, so long as the text_mutex is not locked.
  716. */
  717. if (mutex_is_locked(&text_mutex))
  718. goto knl_write;
  719. text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
  720. err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
  721. if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
  722. goto knl_write;
  723. return err;
  724. knl_write:
  725. #endif /* CONFIG_DEBUG_RODATA */
  726. return probe_kernel_write((char *)bpt->bpt_addr,
  727. (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
  728. }
  729. struct kgdb_arch arch_kgdb_ops = {
  730. /* Breakpoint instruction: */
  731. .gdb_bpt_instr = { 0xcc },
  732. .flags = KGDB_HW_BREAKPOINT,
  733. .set_hw_breakpoint = kgdb_set_hw_break,
  734. .remove_hw_breakpoint = kgdb_remove_hw_break,
  735. .disable_hw_break = kgdb_disable_hw_debug,
  736. .remove_all_hw_break = kgdb_remove_all_hw_break,
  737. .correct_hw_break = kgdb_correct_hw_break,
  738. };