kgdb.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * This program is free software; you can redistribute it and/or modify it
  3. * under the terms of the GNU General Public License as published by the
  4. * Free Software Foundation; either version 2, or (at your option) any
  5. * later version.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. *
  12. */
  13. /*
  14. * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
  15. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  16. * Copyright (C) 2002 Andi Kleen, SuSE Labs
  17. * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
  18. * Copyright (C) 2007 MontaVista Software, Inc.
  19. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
  20. */
  21. /****************************************************************************
  22. * Contributor: Lake Stevens Instrument Division$
  23. * Written by: Glenn Engel $
  24. * Updated by: Amit Kale<akale@veritas.com>
  25. * Updated by: Tom Rini <trini@kernel.crashing.org>
  26. * Updated by: Jason Wessel <jason.wessel@windriver.com>
  27. * Modified for 386 by Jim Kingdon, Cygnus Support.
  28. * Origianl kgdb, compatibility with 2.1.xx kernel by
  29. * David Grothe <dave@gcom.com>
  30. * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
  31. * X86_64 changes from Andi Kleen's patch merged by Jim Houston
  32. */
  33. #include <linux/spinlock.h>
  34. #include <linux/kdebug.h>
  35. #include <linux/string.h>
  36. #include <linux/kernel.h>
  37. #include <linux/ptrace.h>
  38. #include <linux/sched.h>
  39. #include <linux/delay.h>
  40. #include <linux/kgdb.h>
  41. #include <linux/init.h>
  42. #include <linux/smp.h>
  43. #include <linux/nmi.h>
  44. #include <linux/hw_breakpoint.h>
  45. #include <asm/debugreg.h>
  46. #include <asm/apicdef.h>
  47. #include <asm/apic.h>
  48. #include <asm/nmi.h>
  49. struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
  50. {
  51. #ifdef CONFIG_X86_32
  52. { "ax", 4, offsetof(struct pt_regs, ax) },
  53. { "cx", 4, offsetof(struct pt_regs, cx) },
  54. { "dx", 4, offsetof(struct pt_regs, dx) },
  55. { "bx", 4, offsetof(struct pt_regs, bx) },
  56. { "sp", 4, offsetof(struct pt_regs, sp) },
  57. { "bp", 4, offsetof(struct pt_regs, bp) },
  58. { "si", 4, offsetof(struct pt_regs, si) },
  59. { "di", 4, offsetof(struct pt_regs, di) },
  60. { "ip", 4, offsetof(struct pt_regs, ip) },
  61. { "flags", 4, offsetof(struct pt_regs, flags) },
  62. { "cs", 4, offsetof(struct pt_regs, cs) },
  63. { "ss", 4, offsetof(struct pt_regs, ss) },
  64. { "ds", 4, offsetof(struct pt_regs, ds) },
  65. { "es", 4, offsetof(struct pt_regs, es) },
  66. #else
  67. { "ax", 8, offsetof(struct pt_regs, ax) },
  68. { "bx", 8, offsetof(struct pt_regs, bx) },
  69. { "cx", 8, offsetof(struct pt_regs, cx) },
  70. { "dx", 8, offsetof(struct pt_regs, dx) },
  71. { "si", 8, offsetof(struct pt_regs, dx) },
  72. { "di", 8, offsetof(struct pt_regs, di) },
  73. { "bp", 8, offsetof(struct pt_regs, bp) },
  74. { "sp", 8, offsetof(struct pt_regs, sp) },
  75. { "r8", 8, offsetof(struct pt_regs, r8) },
  76. { "r9", 8, offsetof(struct pt_regs, r9) },
  77. { "r10", 8, offsetof(struct pt_regs, r10) },
  78. { "r11", 8, offsetof(struct pt_regs, r11) },
  79. { "r12", 8, offsetof(struct pt_regs, r12) },
  80. { "r13", 8, offsetof(struct pt_regs, r13) },
  81. { "r14", 8, offsetof(struct pt_regs, r14) },
  82. { "r15", 8, offsetof(struct pt_regs, r15) },
  83. { "ip", 8, offsetof(struct pt_regs, ip) },
  84. { "flags", 4, offsetof(struct pt_regs, flags) },
  85. { "cs", 4, offsetof(struct pt_regs, cs) },
  86. { "ss", 4, offsetof(struct pt_regs, ss) },
  87. { "ds", 4, -1 },
  88. { "es", 4, -1 },
  89. #endif
  90. { "fs", 4, -1 },
  91. { "gs", 4, -1 },
  92. };
  93. int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
  94. {
  95. if (
  96. #ifdef CONFIG_X86_32
  97. regno == GDB_SS || regno == GDB_FS || regno == GDB_GS ||
  98. #endif
  99. regno == GDB_SP || regno == GDB_ORIG_AX)
  100. return 0;
  101. if (dbg_reg_def[regno].offset != -1)
  102. memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
  103. dbg_reg_def[regno].size);
  104. return 0;
  105. }
  106. char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
  107. {
  108. if (regno == GDB_ORIG_AX) {
  109. memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax));
  110. return "orig_ax";
  111. }
  112. if (regno >= DBG_MAX_REG_NUM || regno < 0)
  113. return NULL;
  114. if (dbg_reg_def[regno].offset != -1)
  115. memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
  116. dbg_reg_def[regno].size);
  117. #ifdef CONFIG_X86_32
  118. switch (regno) {
  119. case GDB_SS:
  120. if (!user_mode_vm(regs))
  121. *(unsigned long *)mem = __KERNEL_DS;
  122. break;
  123. case GDB_SP:
  124. if (!user_mode_vm(regs))
  125. *(unsigned long *)mem = kernel_stack_pointer(regs);
  126. break;
  127. case GDB_GS:
  128. case GDB_FS:
  129. *(unsigned long *)mem = 0xFFFF;
  130. break;
  131. }
  132. #endif
  133. return dbg_reg_def[regno].name;
  134. }
  135. /**
  136. * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
  137. * @gdb_regs: A pointer to hold the registers in the order GDB wants.
  138. * @p: The &struct task_struct of the desired process.
  139. *
  140. * Convert the register values of the sleeping process in @p to
  141. * the format that GDB expects.
  142. * This function is called when kgdb does not have access to the
  143. * &struct pt_regs and therefore it should fill the gdb registers
  144. * @gdb_regs with what has been saved in &struct thread_struct
  145. * thread field during switch_to.
  146. */
  147. void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
  148. {
  149. #ifndef CONFIG_X86_32
  150. u32 *gdb_regs32 = (u32 *)gdb_regs;
  151. #endif
  152. gdb_regs[GDB_AX] = 0;
  153. gdb_regs[GDB_BX] = 0;
  154. gdb_regs[GDB_CX] = 0;
  155. gdb_regs[GDB_DX] = 0;
  156. gdb_regs[GDB_SI] = 0;
  157. gdb_regs[GDB_DI] = 0;
  158. gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
  159. #ifdef CONFIG_X86_32
  160. gdb_regs[GDB_DS] = __KERNEL_DS;
  161. gdb_regs[GDB_ES] = __KERNEL_DS;
  162. gdb_regs[GDB_PS] = 0;
  163. gdb_regs[GDB_CS] = __KERNEL_CS;
  164. gdb_regs[GDB_PC] = p->thread.ip;
  165. gdb_regs[GDB_SS] = __KERNEL_DS;
  166. gdb_regs[GDB_FS] = 0xFFFF;
  167. gdb_regs[GDB_GS] = 0xFFFF;
  168. #else
  169. gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
  170. gdb_regs32[GDB_CS] = __KERNEL_CS;
  171. gdb_regs32[GDB_SS] = __KERNEL_DS;
  172. gdb_regs[GDB_PC] = 0;
  173. gdb_regs[GDB_R8] = 0;
  174. gdb_regs[GDB_R9] = 0;
  175. gdb_regs[GDB_R10] = 0;
  176. gdb_regs[GDB_R11] = 0;
  177. gdb_regs[GDB_R12] = 0;
  178. gdb_regs[GDB_R13] = 0;
  179. gdb_regs[GDB_R14] = 0;
  180. gdb_regs[GDB_R15] = 0;
  181. #endif
  182. gdb_regs[GDB_SP] = p->thread.sp;
  183. }
  184. static struct hw_breakpoint {
  185. unsigned enabled;
  186. unsigned long addr;
  187. int len;
  188. int type;
  189. struct perf_event * __percpu *pev;
  190. } breakinfo[HBP_NUM];
  191. static unsigned long early_dr7;
  192. static void kgdb_correct_hw_break(void)
  193. {
  194. int breakno;
  195. for (breakno = 0; breakno < HBP_NUM; breakno++) {
  196. struct perf_event *bp;
  197. struct arch_hw_breakpoint *info;
  198. int val;
  199. int cpu = raw_smp_processor_id();
  200. if (!breakinfo[breakno].enabled)
  201. continue;
  202. if (dbg_is_early) {
  203. set_debugreg(breakinfo[breakno].addr, breakno);
  204. early_dr7 |= encode_dr7(breakno,
  205. breakinfo[breakno].len,
  206. breakinfo[breakno].type);
  207. set_debugreg(early_dr7, 7);
  208. continue;
  209. }
  210. bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
  211. info = counter_arch_bp(bp);
  212. if (bp->attr.disabled != 1)
  213. continue;
  214. bp->attr.bp_addr = breakinfo[breakno].addr;
  215. bp->attr.bp_len = breakinfo[breakno].len;
  216. bp->attr.bp_type = breakinfo[breakno].type;
  217. info->address = breakinfo[breakno].addr;
  218. info->len = breakinfo[breakno].len;
  219. info->type = breakinfo[breakno].type;
  220. val = arch_install_hw_breakpoint(bp);
  221. if (!val)
  222. bp->attr.disabled = 0;
  223. }
  224. if (!dbg_is_early)
  225. hw_breakpoint_restore();
  226. }
  227. static int hw_break_reserve_slot(int breakno)
  228. {
  229. int cpu;
  230. int cnt = 0;
  231. struct perf_event **pevent;
  232. if (dbg_is_early)
  233. return 0;
  234. for_each_online_cpu(cpu) {
  235. cnt++;
  236. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  237. if (dbg_reserve_bp_slot(*pevent))
  238. goto fail;
  239. }
  240. return 0;
  241. fail:
  242. for_each_online_cpu(cpu) {
  243. cnt--;
  244. if (!cnt)
  245. break;
  246. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  247. dbg_release_bp_slot(*pevent);
  248. }
  249. return -1;
  250. }
  251. static int hw_break_release_slot(int breakno)
  252. {
  253. struct perf_event **pevent;
  254. int cpu;
  255. if (dbg_is_early)
  256. return 0;
  257. for_each_online_cpu(cpu) {
  258. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  259. if (dbg_release_bp_slot(*pevent))
  260. /*
  261. * The debugger is responsible for handing the retry on
  262. * remove failure.
  263. */
  264. return -1;
  265. }
  266. return 0;
  267. }
  268. static int
  269. kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  270. {
  271. int i;
  272. for (i = 0; i < HBP_NUM; i++)
  273. if (breakinfo[i].addr == addr && breakinfo[i].enabled)
  274. break;
  275. if (i == HBP_NUM)
  276. return -1;
  277. if (hw_break_release_slot(i)) {
  278. printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
  279. return -1;
  280. }
  281. breakinfo[i].enabled = 0;
  282. return 0;
  283. }
  284. static void kgdb_remove_all_hw_break(void)
  285. {
  286. int i;
  287. int cpu = raw_smp_processor_id();
  288. struct perf_event *bp;
  289. for (i = 0; i < HBP_NUM; i++) {
  290. if (!breakinfo[i].enabled)
  291. continue;
  292. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  293. if (!bp->attr.disabled) {
  294. arch_uninstall_hw_breakpoint(bp);
  295. bp->attr.disabled = 1;
  296. continue;
  297. }
  298. if (dbg_is_early)
  299. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  300. breakinfo[i].type);
  301. else if (hw_break_release_slot(i))
  302. printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
  303. breakinfo[i].addr);
  304. breakinfo[i].enabled = 0;
  305. }
  306. }
  307. static int
  308. kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  309. {
  310. int i;
  311. for (i = 0; i < HBP_NUM; i++)
  312. if (!breakinfo[i].enabled)
  313. break;
  314. if (i == HBP_NUM)
  315. return -1;
  316. switch (bptype) {
  317. case BP_HARDWARE_BREAKPOINT:
  318. len = 1;
  319. breakinfo[i].type = X86_BREAKPOINT_EXECUTE;
  320. break;
  321. case BP_WRITE_WATCHPOINT:
  322. breakinfo[i].type = X86_BREAKPOINT_WRITE;
  323. break;
  324. case BP_ACCESS_WATCHPOINT:
  325. breakinfo[i].type = X86_BREAKPOINT_RW;
  326. break;
  327. default:
  328. return -1;
  329. }
  330. switch (len) {
  331. case 1:
  332. breakinfo[i].len = X86_BREAKPOINT_LEN_1;
  333. break;
  334. case 2:
  335. breakinfo[i].len = X86_BREAKPOINT_LEN_2;
  336. break;
  337. case 4:
  338. breakinfo[i].len = X86_BREAKPOINT_LEN_4;
  339. break;
  340. #ifdef CONFIG_X86_64
  341. case 8:
  342. breakinfo[i].len = X86_BREAKPOINT_LEN_8;
  343. break;
  344. #endif
  345. default:
  346. return -1;
  347. }
  348. breakinfo[i].addr = addr;
  349. if (hw_break_reserve_slot(i)) {
  350. breakinfo[i].addr = 0;
  351. return -1;
  352. }
  353. breakinfo[i].enabled = 1;
  354. return 0;
  355. }
  356. /**
  357. * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
  358. * @regs: Current &struct pt_regs.
  359. *
  360. * This function will be called if the particular architecture must
  361. * disable hardware debugging while it is processing gdb packets or
  362. * handling exception.
  363. */
  364. static void kgdb_disable_hw_debug(struct pt_regs *regs)
  365. {
  366. int i;
  367. int cpu = raw_smp_processor_id();
  368. struct perf_event *bp;
  369. /* Disable hardware debugging while we are in kgdb: */
  370. set_debugreg(0UL, 7);
  371. for (i = 0; i < HBP_NUM; i++) {
  372. if (!breakinfo[i].enabled)
  373. continue;
  374. if (dbg_is_early) {
  375. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  376. breakinfo[i].type);
  377. continue;
  378. }
  379. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  380. if (bp->attr.disabled == 1)
  381. continue;
  382. arch_uninstall_hw_breakpoint(bp);
  383. bp->attr.disabled = 1;
  384. }
  385. }
  386. #ifdef CONFIG_SMP
  387. /**
  388. * kgdb_roundup_cpus - Get other CPUs into a holding pattern
  389. * @flags: Current IRQ state
  390. *
  391. * On SMP systems, we need to get the attention of the other CPUs
  392. * and get them be in a known state. This should do what is needed
  393. * to get the other CPUs to call kgdb_wait(). Note that on some arches,
  394. * the NMI approach is not used for rounding up all the CPUs. For example,
  395. * in case of MIPS, smp_call_function() is used to roundup CPUs. In
  396. * this case, we have to make sure that interrupts are enabled before
  397. * calling smp_call_function(). The argument to this function is
  398. * the flags that will be used when restoring the interrupts. There is
  399. * local_irq_save() call before kgdb_roundup_cpus().
  400. *
  401. * On non-SMP systems, this is not called.
  402. */
  403. void kgdb_roundup_cpus(unsigned long flags)
  404. {
  405. apic->send_IPI_allbutself(APIC_DM_NMI);
  406. }
  407. #endif
  408. /**
  409. * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
  410. * @vector: The error vector of the exception that happened.
  411. * @signo: The signal number of the exception that happened.
  412. * @err_code: The error code of the exception that happened.
  413. * @remcom_in_buffer: The buffer of the packet we have read.
  414. * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
  415. * @regs: The &struct pt_regs of the current process.
  416. *
  417. * This function MUST handle the 'c' and 's' command packets,
  418. * as well packets to set / remove a hardware breakpoint, if used.
  419. * If there are additional packets which the hardware needs to handle,
  420. * they are handled here. The code should return -1 if it wants to
  421. * process more packets, and a %0 or %1 if it wants to exit from the
  422. * kgdb callback.
  423. */
  424. int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
  425. char *remcomInBuffer, char *remcomOutBuffer,
  426. struct pt_regs *linux_regs)
  427. {
  428. unsigned long addr;
  429. char *ptr;
  430. switch (remcomInBuffer[0]) {
  431. case 'c':
  432. case 's':
  433. /* try to read optional parameter, pc unchanged if no parm */
  434. ptr = &remcomInBuffer[1];
  435. if (kgdb_hex2long(&ptr, &addr))
  436. linux_regs->ip = addr;
  437. case 'D':
  438. case 'k':
  439. /* clear the trace bit */
  440. linux_regs->flags &= ~X86_EFLAGS_TF;
  441. atomic_set(&kgdb_cpu_doing_single_step, -1);
  442. /* set the trace bit if we're stepping */
  443. if (remcomInBuffer[0] == 's') {
  444. linux_regs->flags |= X86_EFLAGS_TF;
  445. atomic_set(&kgdb_cpu_doing_single_step,
  446. raw_smp_processor_id());
  447. }
  448. return 0;
  449. }
  450. /* this means that we do not want to exit from the handler: */
  451. return -1;
  452. }
  453. static inline int
  454. single_step_cont(struct pt_regs *regs, struct die_args *args)
  455. {
  456. /*
  457. * Single step exception from kernel space to user space so
  458. * eat the exception and continue the process:
  459. */
  460. printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
  461. "resuming...\n");
  462. kgdb_arch_handle_exception(args->trapnr, args->signr,
  463. args->err, "c", "", regs);
  464. /*
  465. * Reset the BS bit in dr6 (pointed by args->err) to
  466. * denote completion of processing
  467. */
  468. (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
  469. return NOTIFY_STOP;
  470. }
  471. static int was_in_debug_nmi[NR_CPUS];
  472. static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  473. {
  474. switch (cmd) {
  475. case NMI_LOCAL:
  476. if (atomic_read(&kgdb_active) != -1) {
  477. /* KGDB CPU roundup */
  478. kgdb_nmicallback(raw_smp_processor_id(), regs);
  479. was_in_debug_nmi[raw_smp_processor_id()] = 1;
  480. touch_nmi_watchdog();
  481. return NMI_HANDLED;
  482. }
  483. break;
  484. case NMI_UNKNOWN:
  485. if (was_in_debug_nmi[raw_smp_processor_id()]) {
  486. was_in_debug_nmi[raw_smp_processor_id()] = 0;
  487. return NMI_HANDLED;
  488. }
  489. break;
  490. default:
  491. /* do nothing */
  492. break;
  493. }
  494. return NMI_DONE;
  495. }
  496. static int __kgdb_notify(struct die_args *args, unsigned long cmd)
  497. {
  498. struct pt_regs *regs = args->regs;
  499. switch (cmd) {
  500. case DIE_DEBUG:
  501. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  502. if (user_mode(regs))
  503. return single_step_cont(regs, args);
  504. break;
  505. } else if (test_thread_flag(TIF_SINGLESTEP))
  506. /* This means a user thread is single stepping
  507. * a system call which should be ignored
  508. */
  509. return NOTIFY_DONE;
  510. /* fall through */
  511. default:
  512. if (user_mode(regs))
  513. return NOTIFY_DONE;
  514. }
  515. if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
  516. return NOTIFY_DONE;
  517. /* Must touch watchdog before return to normal operation */
  518. touch_nmi_watchdog();
  519. return NOTIFY_STOP;
  520. }
  521. int kgdb_ll_trap(int cmd, const char *str,
  522. struct pt_regs *regs, long err, int trap, int sig)
  523. {
  524. struct die_args args = {
  525. .regs = regs,
  526. .str = str,
  527. .err = err,
  528. .trapnr = trap,
  529. .signr = sig,
  530. };
  531. if (!kgdb_io_module_registered)
  532. return NOTIFY_DONE;
  533. return __kgdb_notify(&args, cmd);
  534. }
  535. static int
  536. kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
  537. {
  538. unsigned long flags;
  539. int ret;
  540. local_irq_save(flags);
  541. ret = __kgdb_notify(ptr, cmd);
  542. local_irq_restore(flags);
  543. return ret;
  544. }
  545. static struct notifier_block kgdb_notifier = {
  546. .notifier_call = kgdb_notify,
  547. };
  548. /**
  549. * kgdb_arch_init - Perform any architecture specific initalization.
  550. *
  551. * This function will handle the initalization of any architecture
  552. * specific callbacks.
  553. */
  554. int kgdb_arch_init(void)
  555. {
  556. int retval;
  557. retval = register_die_notifier(&kgdb_notifier);
  558. if (retval)
  559. goto out;
  560. retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
  561. 0, "kgdb");
  562. if (retval)
  563. goto out1;
  564. retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
  565. 0, "kgdb");
  566. if (retval)
  567. goto out2;
  568. return retval;
  569. out2:
  570. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  571. out1:
  572. unregister_die_notifier(&kgdb_notifier);
  573. out:
  574. return retval;
  575. }
  576. static void kgdb_hw_overflow_handler(struct perf_event *event,
  577. struct perf_sample_data *data, struct pt_regs *regs)
  578. {
  579. struct task_struct *tsk = current;
  580. int i;
  581. for (i = 0; i < 4; i++)
  582. if (breakinfo[i].enabled)
  583. tsk->thread.debugreg6 |= (DR_TRAP0 << i);
  584. }
  585. void kgdb_arch_late(void)
  586. {
  587. int i, cpu;
  588. struct perf_event_attr attr;
  589. struct perf_event **pevent;
  590. /*
  591. * Pre-allocate the hw breakpoint structions in the non-atomic
  592. * portion of kgdb because this operation requires mutexs to
  593. * complete.
  594. */
  595. hw_breakpoint_init(&attr);
  596. attr.bp_addr = (unsigned long)kgdb_arch_init;
  597. attr.bp_len = HW_BREAKPOINT_LEN_1;
  598. attr.bp_type = HW_BREAKPOINT_W;
  599. attr.disabled = 1;
  600. for (i = 0; i < HBP_NUM; i++) {
  601. if (breakinfo[i].pev)
  602. continue;
  603. breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
  604. if (IS_ERR((void * __force)breakinfo[i].pev)) {
  605. printk(KERN_ERR "kgdb: Could not allocate hw"
  606. "breakpoints\nDisabling the kernel debugger\n");
  607. breakinfo[i].pev = NULL;
  608. kgdb_arch_exit();
  609. return;
  610. }
  611. for_each_online_cpu(cpu) {
  612. pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
  613. pevent[0]->hw.sample_period = 1;
  614. pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
  615. if (pevent[0]->destroy != NULL) {
  616. pevent[0]->destroy = NULL;
  617. release_bp_slot(*pevent);
  618. }
  619. }
  620. }
  621. }
  622. /**
  623. * kgdb_arch_exit - Perform any architecture specific uninitalization.
  624. *
  625. * This function will handle the uninitalization of any architecture
  626. * specific callbacks, for dynamic registration and unregistration.
  627. */
  628. void kgdb_arch_exit(void)
  629. {
  630. int i;
  631. for (i = 0; i < 4; i++) {
  632. if (breakinfo[i].pev) {
  633. unregister_wide_hw_breakpoint(breakinfo[i].pev);
  634. breakinfo[i].pev = NULL;
  635. }
  636. }
  637. unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
  638. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  639. unregister_die_notifier(&kgdb_notifier);
  640. }
  641. /**
  642. *
  643. * kgdb_skipexception - Bail out of KGDB when we've been triggered.
  644. * @exception: Exception vector number
  645. * @regs: Current &struct pt_regs.
  646. *
  647. * On some architectures we need to skip a breakpoint exception when
  648. * it occurs after a breakpoint has been removed.
  649. *
  650. * Skip an int3 exception when it occurs after a breakpoint has been
  651. * removed. Backtrack eip by 1 since the int3 would have caused it to
  652. * increment by 1.
  653. */
  654. int kgdb_skipexception(int exception, struct pt_regs *regs)
  655. {
  656. if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
  657. regs->ip -= 1;
  658. return 1;
  659. }
  660. return 0;
  661. }
  662. unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
  663. {
  664. if (exception == 3)
  665. return instruction_pointer(regs) - 1;
  666. return instruction_pointer(regs);
  667. }
  668. void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
  669. {
  670. regs->ip = ip;
  671. }
  672. struct kgdb_arch arch_kgdb_ops = {
  673. /* Breakpoint instruction: */
  674. .gdb_bpt_instr = { 0xcc },
  675. .flags = KGDB_HW_BREAKPOINT,
  676. .set_hw_breakpoint = kgdb_set_hw_break,
  677. .remove_hw_breakpoint = kgdb_remove_hw_break,
  678. .disable_hw_break = kgdb_disable_hw_debug,
  679. .remove_all_hw_break = kgdb_remove_all_hw_break,
  680. .correct_hw_break = kgdb_correct_hw_break,
  681. };