kgdb.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754
  1. /*
  2. * This program is free software; you can redistribute it and/or modify it
  3. * under the terms of the GNU General Public License as published by the
  4. * Free Software Foundation; either version 2, or (at your option) any
  5. * later version.
  6. *
  7. * This program is distributed in the hope that it will be useful, but
  8. * WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  10. * General Public License for more details.
  11. *
  12. */
  13. /*
  14. * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com>
  15. * Copyright (C) 2000-2001 VERITAS Software Corporation.
  16. * Copyright (C) 2002 Andi Kleen, SuSE Labs
  17. * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd.
  18. * Copyright (C) 2007 MontaVista Software, Inc.
  19. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc.
  20. */
  21. /****************************************************************************
  22. * Contributor: Lake Stevens Instrument Division$
  23. * Written by: Glenn Engel $
  24. * Updated by: Amit Kale<akale@veritas.com>
  25. * Updated by: Tom Rini <trini@kernel.crashing.org>
  26. * Updated by: Jason Wessel <jason.wessel@windriver.com>
  27. * Modified for 386 by Jim Kingdon, Cygnus Support.
  28. * Origianl kgdb, compatibility with 2.1.xx kernel by
  29. * David Grothe <dave@gcom.com>
  30. * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com>
  31. * X86_64 changes from Andi Kleen's patch merged by Jim Houston
  32. */
  33. #include <linux/spinlock.h>
  34. #include <linux/kdebug.h>
  35. #include <linux/string.h>
  36. #include <linux/kernel.h>
  37. #include <linux/ptrace.h>
  38. #include <linux/sched.h>
  39. #include <linux/delay.h>
  40. #include <linux/kgdb.h>
  41. #include <linux/init.h>
  42. #include <linux/smp.h>
  43. #include <linux/nmi.h>
  44. #include <linux/hw_breakpoint.h>
  45. #include <asm/debugreg.h>
  46. #include <asm/apicdef.h>
  47. #include <asm/system.h>
  48. #include <asm/apic.h>
  49. #include <asm/nmi.h>
  50. struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
  51. {
  52. #ifdef CONFIG_X86_32
  53. { "ax", 4, offsetof(struct pt_regs, ax) },
  54. { "cx", 4, offsetof(struct pt_regs, cx) },
  55. { "dx", 4, offsetof(struct pt_regs, dx) },
  56. { "bx", 4, offsetof(struct pt_regs, bx) },
  57. { "sp", 4, offsetof(struct pt_regs, sp) },
  58. { "bp", 4, offsetof(struct pt_regs, bp) },
  59. { "si", 4, offsetof(struct pt_regs, si) },
  60. { "di", 4, offsetof(struct pt_regs, di) },
  61. { "ip", 4, offsetof(struct pt_regs, ip) },
  62. { "flags", 4, offsetof(struct pt_regs, flags) },
  63. { "cs", 4, offsetof(struct pt_regs, cs) },
  64. { "ss", 4, offsetof(struct pt_regs, ss) },
  65. { "ds", 4, offsetof(struct pt_regs, ds) },
  66. { "es", 4, offsetof(struct pt_regs, es) },
  67. #else
  68. { "ax", 8, offsetof(struct pt_regs, ax) },
  69. { "bx", 8, offsetof(struct pt_regs, bx) },
  70. { "cx", 8, offsetof(struct pt_regs, cx) },
  71. { "dx", 8, offsetof(struct pt_regs, dx) },
  72. { "si", 8, offsetof(struct pt_regs, dx) },
  73. { "di", 8, offsetof(struct pt_regs, di) },
  74. { "bp", 8, offsetof(struct pt_regs, bp) },
  75. { "sp", 8, offsetof(struct pt_regs, sp) },
  76. { "r8", 8, offsetof(struct pt_regs, r8) },
  77. { "r9", 8, offsetof(struct pt_regs, r9) },
  78. { "r10", 8, offsetof(struct pt_regs, r10) },
  79. { "r11", 8, offsetof(struct pt_regs, r11) },
  80. { "r12", 8, offsetof(struct pt_regs, r12) },
  81. { "r13", 8, offsetof(struct pt_regs, r13) },
  82. { "r14", 8, offsetof(struct pt_regs, r14) },
  83. { "r15", 8, offsetof(struct pt_regs, r15) },
  84. { "ip", 8, offsetof(struct pt_regs, ip) },
  85. { "flags", 4, offsetof(struct pt_regs, flags) },
  86. { "cs", 4, offsetof(struct pt_regs, cs) },
  87. { "ss", 4, offsetof(struct pt_regs, ss) },
  88. { "ds", 4, -1 },
  89. { "es", 4, -1 },
  90. #endif
  91. { "fs", 4, -1 },
  92. { "gs", 4, -1 },
  93. };
  94. int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
  95. {
  96. if (
  97. #ifdef CONFIG_X86_32
  98. regno == GDB_SS || regno == GDB_FS || regno == GDB_GS ||
  99. #endif
  100. regno == GDB_SP || regno == GDB_ORIG_AX)
  101. return 0;
  102. if (dbg_reg_def[regno].offset != -1)
  103. memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
  104. dbg_reg_def[regno].size);
  105. return 0;
  106. }
  107. char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
  108. {
  109. if (regno == GDB_ORIG_AX) {
  110. memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax));
  111. return "orig_ax";
  112. }
  113. if (regno >= DBG_MAX_REG_NUM || regno < 0)
  114. return NULL;
  115. if (dbg_reg_def[regno].offset != -1)
  116. memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
  117. dbg_reg_def[regno].size);
  118. #ifdef CONFIG_X86_32
  119. switch (regno) {
  120. case GDB_SS:
  121. if (!user_mode_vm(regs))
  122. *(unsigned long *)mem = __KERNEL_DS;
  123. break;
  124. case GDB_SP:
  125. if (!user_mode_vm(regs))
  126. *(unsigned long *)mem = kernel_stack_pointer(regs);
  127. break;
  128. case GDB_GS:
  129. case GDB_FS:
  130. *(unsigned long *)mem = 0xFFFF;
  131. break;
  132. }
  133. #endif
  134. return dbg_reg_def[regno].name;
  135. }
  136. /**
  137. * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs
  138. * @gdb_regs: A pointer to hold the registers in the order GDB wants.
  139. * @p: The &struct task_struct of the desired process.
  140. *
  141. * Convert the register values of the sleeping process in @p to
  142. * the format that GDB expects.
  143. * This function is called when kgdb does not have access to the
  144. * &struct pt_regs and therefore it should fill the gdb registers
  145. * @gdb_regs with what has been saved in &struct thread_struct
  146. * thread field during switch_to.
  147. */
  148. void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
  149. {
  150. #ifndef CONFIG_X86_32
  151. u32 *gdb_regs32 = (u32 *)gdb_regs;
  152. #endif
  153. gdb_regs[GDB_AX] = 0;
  154. gdb_regs[GDB_BX] = 0;
  155. gdb_regs[GDB_CX] = 0;
  156. gdb_regs[GDB_DX] = 0;
  157. gdb_regs[GDB_SI] = 0;
  158. gdb_regs[GDB_DI] = 0;
  159. gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp;
  160. #ifdef CONFIG_X86_32
  161. gdb_regs[GDB_DS] = __KERNEL_DS;
  162. gdb_regs[GDB_ES] = __KERNEL_DS;
  163. gdb_regs[GDB_PS] = 0;
  164. gdb_regs[GDB_CS] = __KERNEL_CS;
  165. gdb_regs[GDB_PC] = p->thread.ip;
  166. gdb_regs[GDB_SS] = __KERNEL_DS;
  167. gdb_regs[GDB_FS] = 0xFFFF;
  168. gdb_regs[GDB_GS] = 0xFFFF;
  169. #else
  170. gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8);
  171. gdb_regs32[GDB_CS] = __KERNEL_CS;
  172. gdb_regs32[GDB_SS] = __KERNEL_DS;
  173. gdb_regs[GDB_PC] = 0;
  174. gdb_regs[GDB_R8] = 0;
  175. gdb_regs[GDB_R9] = 0;
  176. gdb_regs[GDB_R10] = 0;
  177. gdb_regs[GDB_R11] = 0;
  178. gdb_regs[GDB_R12] = 0;
  179. gdb_regs[GDB_R13] = 0;
  180. gdb_regs[GDB_R14] = 0;
  181. gdb_regs[GDB_R15] = 0;
  182. #endif
  183. gdb_regs[GDB_SP] = p->thread.sp;
  184. }
  185. static struct hw_breakpoint {
  186. unsigned enabled;
  187. unsigned long addr;
  188. int len;
  189. int type;
  190. struct perf_event * __percpu *pev;
  191. } breakinfo[HBP_NUM];
  192. static unsigned long early_dr7;
  193. static void kgdb_correct_hw_break(void)
  194. {
  195. int breakno;
  196. for (breakno = 0; breakno < HBP_NUM; breakno++) {
  197. struct perf_event *bp;
  198. struct arch_hw_breakpoint *info;
  199. int val;
  200. int cpu = raw_smp_processor_id();
  201. if (!breakinfo[breakno].enabled)
  202. continue;
  203. if (dbg_is_early) {
  204. set_debugreg(breakinfo[breakno].addr, breakno);
  205. early_dr7 |= encode_dr7(breakno,
  206. breakinfo[breakno].len,
  207. breakinfo[breakno].type);
  208. set_debugreg(early_dr7, 7);
  209. continue;
  210. }
  211. bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu);
  212. info = counter_arch_bp(bp);
  213. if (bp->attr.disabled != 1)
  214. continue;
  215. bp->attr.bp_addr = breakinfo[breakno].addr;
  216. bp->attr.bp_len = breakinfo[breakno].len;
  217. bp->attr.bp_type = breakinfo[breakno].type;
  218. info->address = breakinfo[breakno].addr;
  219. info->len = breakinfo[breakno].len;
  220. info->type = breakinfo[breakno].type;
  221. val = arch_install_hw_breakpoint(bp);
  222. if (!val)
  223. bp->attr.disabled = 0;
  224. }
  225. if (!dbg_is_early)
  226. hw_breakpoint_restore();
  227. }
  228. static int hw_break_reserve_slot(int breakno)
  229. {
  230. int cpu;
  231. int cnt = 0;
  232. struct perf_event **pevent;
  233. if (dbg_is_early)
  234. return 0;
  235. for_each_online_cpu(cpu) {
  236. cnt++;
  237. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  238. if (dbg_reserve_bp_slot(*pevent))
  239. goto fail;
  240. }
  241. return 0;
  242. fail:
  243. for_each_online_cpu(cpu) {
  244. cnt--;
  245. if (!cnt)
  246. break;
  247. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  248. dbg_release_bp_slot(*pevent);
  249. }
  250. return -1;
  251. }
  252. static int hw_break_release_slot(int breakno)
  253. {
  254. struct perf_event **pevent;
  255. int cpu;
  256. if (dbg_is_early)
  257. return 0;
  258. for_each_online_cpu(cpu) {
  259. pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
  260. if (dbg_release_bp_slot(*pevent))
  261. /*
  262. * The debugger is responsible for handing the retry on
  263. * remove failure.
  264. */
  265. return -1;
  266. }
  267. return 0;
  268. }
  269. static int
  270. kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  271. {
  272. int i;
  273. for (i = 0; i < HBP_NUM; i++)
  274. if (breakinfo[i].addr == addr && breakinfo[i].enabled)
  275. break;
  276. if (i == HBP_NUM)
  277. return -1;
  278. if (hw_break_release_slot(i)) {
  279. printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr);
  280. return -1;
  281. }
  282. breakinfo[i].enabled = 0;
  283. return 0;
  284. }
  285. static void kgdb_remove_all_hw_break(void)
  286. {
  287. int i;
  288. int cpu = raw_smp_processor_id();
  289. struct perf_event *bp;
  290. for (i = 0; i < HBP_NUM; i++) {
  291. if (!breakinfo[i].enabled)
  292. continue;
  293. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  294. if (!bp->attr.disabled) {
  295. arch_uninstall_hw_breakpoint(bp);
  296. bp->attr.disabled = 1;
  297. continue;
  298. }
  299. if (dbg_is_early)
  300. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  301. breakinfo[i].type);
  302. else if (hw_break_release_slot(i))
  303. printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n",
  304. breakinfo[i].addr);
  305. breakinfo[i].enabled = 0;
  306. }
  307. }
  308. static int
  309. kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
  310. {
  311. int i;
  312. for (i = 0; i < HBP_NUM; i++)
  313. if (!breakinfo[i].enabled)
  314. break;
  315. if (i == HBP_NUM)
  316. return -1;
  317. switch (bptype) {
  318. case BP_HARDWARE_BREAKPOINT:
  319. len = 1;
  320. breakinfo[i].type = X86_BREAKPOINT_EXECUTE;
  321. break;
  322. case BP_WRITE_WATCHPOINT:
  323. breakinfo[i].type = X86_BREAKPOINT_WRITE;
  324. break;
  325. case BP_ACCESS_WATCHPOINT:
  326. breakinfo[i].type = X86_BREAKPOINT_RW;
  327. break;
  328. default:
  329. return -1;
  330. }
  331. switch (len) {
  332. case 1:
  333. breakinfo[i].len = X86_BREAKPOINT_LEN_1;
  334. break;
  335. case 2:
  336. breakinfo[i].len = X86_BREAKPOINT_LEN_2;
  337. break;
  338. case 4:
  339. breakinfo[i].len = X86_BREAKPOINT_LEN_4;
  340. break;
  341. #ifdef CONFIG_X86_64
  342. case 8:
  343. breakinfo[i].len = X86_BREAKPOINT_LEN_8;
  344. break;
  345. #endif
  346. default:
  347. return -1;
  348. }
  349. breakinfo[i].addr = addr;
  350. if (hw_break_reserve_slot(i)) {
  351. breakinfo[i].addr = 0;
  352. return -1;
  353. }
  354. breakinfo[i].enabled = 1;
  355. return 0;
  356. }
  357. /**
  358. * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb.
  359. * @regs: Current &struct pt_regs.
  360. *
  361. * This function will be called if the particular architecture must
  362. * disable hardware debugging while it is processing gdb packets or
  363. * handling exception.
  364. */
  365. static void kgdb_disable_hw_debug(struct pt_regs *regs)
  366. {
  367. int i;
  368. int cpu = raw_smp_processor_id();
  369. struct perf_event *bp;
  370. /* Disable hardware debugging while we are in kgdb: */
  371. set_debugreg(0UL, 7);
  372. for (i = 0; i < HBP_NUM; i++) {
  373. if (!breakinfo[i].enabled)
  374. continue;
  375. if (dbg_is_early) {
  376. early_dr7 &= ~encode_dr7(i, breakinfo[i].len,
  377. breakinfo[i].type);
  378. continue;
  379. }
  380. bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
  381. if (bp->attr.disabled == 1)
  382. continue;
  383. arch_uninstall_hw_breakpoint(bp);
  384. bp->attr.disabled = 1;
  385. }
  386. }
  387. #ifdef CONFIG_SMP
  388. /**
  389. * kgdb_roundup_cpus - Get other CPUs into a holding pattern
  390. * @flags: Current IRQ state
  391. *
  392. * On SMP systems, we need to get the attention of the other CPUs
  393. * and get them be in a known state. This should do what is needed
  394. * to get the other CPUs to call kgdb_wait(). Note that on some arches,
  395. * the NMI approach is not used for rounding up all the CPUs. For example,
  396. * in case of MIPS, smp_call_function() is used to roundup CPUs. In
  397. * this case, we have to make sure that interrupts are enabled before
  398. * calling smp_call_function(). The argument to this function is
  399. * the flags that will be used when restoring the interrupts. There is
  400. * local_irq_save() call before kgdb_roundup_cpus().
  401. *
  402. * On non-SMP systems, this is not called.
  403. */
  404. void kgdb_roundup_cpus(unsigned long flags)
  405. {
  406. apic->send_IPI_allbutself(APIC_DM_NMI);
  407. }
  408. #endif
  409. /**
  410. * kgdb_arch_handle_exception - Handle architecture specific GDB packets.
  411. * @vector: The error vector of the exception that happened.
  412. * @signo: The signal number of the exception that happened.
  413. * @err_code: The error code of the exception that happened.
  414. * @remcom_in_buffer: The buffer of the packet we have read.
  415. * @remcom_out_buffer: The buffer of %BUFMAX bytes to write a packet into.
  416. * @regs: The &struct pt_regs of the current process.
  417. *
  418. * This function MUST handle the 'c' and 's' command packets,
  419. * as well packets to set / remove a hardware breakpoint, if used.
  420. * If there are additional packets which the hardware needs to handle,
  421. * they are handled here. The code should return -1 if it wants to
  422. * process more packets, and a %0 or %1 if it wants to exit from the
  423. * kgdb callback.
  424. */
  425. int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
  426. char *remcomInBuffer, char *remcomOutBuffer,
  427. struct pt_regs *linux_regs)
  428. {
  429. unsigned long addr;
  430. char *ptr;
  431. switch (remcomInBuffer[0]) {
  432. case 'c':
  433. case 's':
  434. /* try to read optional parameter, pc unchanged if no parm */
  435. ptr = &remcomInBuffer[1];
  436. if (kgdb_hex2long(&ptr, &addr))
  437. linux_regs->ip = addr;
  438. case 'D':
  439. case 'k':
  440. /* clear the trace bit */
  441. linux_regs->flags &= ~X86_EFLAGS_TF;
  442. atomic_set(&kgdb_cpu_doing_single_step, -1);
  443. /* set the trace bit if we're stepping */
  444. if (remcomInBuffer[0] == 's') {
  445. linux_regs->flags |= X86_EFLAGS_TF;
  446. atomic_set(&kgdb_cpu_doing_single_step,
  447. raw_smp_processor_id());
  448. }
  449. return 0;
  450. }
  451. /* this means that we do not want to exit from the handler: */
  452. return -1;
  453. }
  454. static inline int
  455. single_step_cont(struct pt_regs *regs, struct die_args *args)
  456. {
  457. /*
  458. * Single step exception from kernel space to user space so
  459. * eat the exception and continue the process:
  460. */
  461. printk(KERN_ERR "KGDB: trap/step from kernel to user space, "
  462. "resuming...\n");
  463. kgdb_arch_handle_exception(args->trapnr, args->signr,
  464. args->err, "c", "", regs);
  465. /*
  466. * Reset the BS bit in dr6 (pointed by args->err) to
  467. * denote completion of processing
  468. */
  469. (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP;
  470. return NOTIFY_STOP;
  471. }
  472. static int was_in_debug_nmi[NR_CPUS];
  473. static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs)
  474. {
  475. switch (cmd) {
  476. case NMI_LOCAL:
  477. if (atomic_read(&kgdb_active) != -1) {
  478. /* KGDB CPU roundup */
  479. kgdb_nmicallback(raw_smp_processor_id(), regs);
  480. was_in_debug_nmi[raw_smp_processor_id()] = 1;
  481. touch_nmi_watchdog();
  482. return NMI_HANDLED;
  483. }
  484. break;
  485. case NMI_UNKNOWN:
  486. if (was_in_debug_nmi[raw_smp_processor_id()]) {
  487. was_in_debug_nmi[raw_smp_processor_id()] = 0;
  488. return NMI_HANDLED;
  489. }
  490. break;
  491. default:
  492. /* do nothing */
  493. break;
  494. }
  495. return NMI_DONE;
  496. }
  497. static int __kgdb_notify(struct die_args *args, unsigned long cmd)
  498. {
  499. struct pt_regs *regs = args->regs;
  500. switch (cmd) {
  501. case DIE_DEBUG:
  502. if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
  503. if (user_mode(regs))
  504. return single_step_cont(regs, args);
  505. break;
  506. } else if (test_thread_flag(TIF_SINGLESTEP))
  507. /* This means a user thread is single stepping
  508. * a system call which should be ignored
  509. */
  510. return NOTIFY_DONE;
  511. /* fall through */
  512. default:
  513. if (user_mode(regs))
  514. return NOTIFY_DONE;
  515. }
  516. if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
  517. return NOTIFY_DONE;
  518. /* Must touch watchdog before return to normal operation */
  519. touch_nmi_watchdog();
  520. return NOTIFY_STOP;
  521. }
  522. int kgdb_ll_trap(int cmd, const char *str,
  523. struct pt_regs *regs, long err, int trap, int sig)
  524. {
  525. struct die_args args = {
  526. .regs = regs,
  527. .str = str,
  528. .err = err,
  529. .trapnr = trap,
  530. .signr = sig,
  531. };
  532. if (!kgdb_io_module_registered)
  533. return NOTIFY_DONE;
  534. return __kgdb_notify(&args, cmd);
  535. }
  536. static int
  537. kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
  538. {
  539. unsigned long flags;
  540. int ret;
  541. local_irq_save(flags);
  542. ret = __kgdb_notify(ptr, cmd);
  543. local_irq_restore(flags);
  544. return ret;
  545. }
  546. static struct notifier_block kgdb_notifier = {
  547. .notifier_call = kgdb_notify,
  548. };
  549. /**
  550. * kgdb_arch_init - Perform any architecture specific initalization.
  551. *
  552. * This function will handle the initalization of any architecture
  553. * specific callbacks.
  554. */
  555. int kgdb_arch_init(void)
  556. {
  557. int retval;
  558. retval = register_die_notifier(&kgdb_notifier);
  559. if (retval)
  560. goto out;
  561. retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler,
  562. 0, "kgdb");
  563. if (retval)
  564. goto out1;
  565. retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler,
  566. 0, "kgdb");
  567. if (retval)
  568. goto out2;
  569. return retval;
  570. out2:
  571. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  572. out1:
  573. unregister_die_notifier(&kgdb_notifier);
  574. out:
  575. return retval;
  576. }
  577. static void kgdb_hw_overflow_handler(struct perf_event *event,
  578. struct perf_sample_data *data, struct pt_regs *regs)
  579. {
  580. struct task_struct *tsk = current;
  581. int i;
  582. for (i = 0; i < 4; i++)
  583. if (breakinfo[i].enabled)
  584. tsk->thread.debugreg6 |= (DR_TRAP0 << i);
  585. }
  586. void kgdb_arch_late(void)
  587. {
  588. int i, cpu;
  589. struct perf_event_attr attr;
  590. struct perf_event **pevent;
  591. /*
  592. * Pre-allocate the hw breakpoint structions in the non-atomic
  593. * portion of kgdb because this operation requires mutexs to
  594. * complete.
  595. */
  596. hw_breakpoint_init(&attr);
  597. attr.bp_addr = (unsigned long)kgdb_arch_init;
  598. attr.bp_len = HW_BREAKPOINT_LEN_1;
  599. attr.bp_type = HW_BREAKPOINT_W;
  600. attr.disabled = 1;
  601. for (i = 0; i < HBP_NUM; i++) {
  602. if (breakinfo[i].pev)
  603. continue;
  604. breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
  605. if (IS_ERR((void * __force)breakinfo[i].pev)) {
  606. printk(KERN_ERR "kgdb: Could not allocate hw"
  607. "breakpoints\nDisabling the kernel debugger\n");
  608. breakinfo[i].pev = NULL;
  609. kgdb_arch_exit();
  610. return;
  611. }
  612. for_each_online_cpu(cpu) {
  613. pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
  614. pevent[0]->hw.sample_period = 1;
  615. pevent[0]->overflow_handler = kgdb_hw_overflow_handler;
  616. if (pevent[0]->destroy != NULL) {
  617. pevent[0]->destroy = NULL;
  618. release_bp_slot(*pevent);
  619. }
  620. }
  621. }
  622. }
  623. /**
  624. * kgdb_arch_exit - Perform any architecture specific uninitalization.
  625. *
  626. * This function will handle the uninitalization of any architecture
  627. * specific callbacks, for dynamic registration and unregistration.
  628. */
  629. void kgdb_arch_exit(void)
  630. {
  631. int i;
  632. for (i = 0; i < 4; i++) {
  633. if (breakinfo[i].pev) {
  634. unregister_wide_hw_breakpoint(breakinfo[i].pev);
  635. breakinfo[i].pev = NULL;
  636. }
  637. }
  638. unregister_nmi_handler(NMI_UNKNOWN, "kgdb");
  639. unregister_nmi_handler(NMI_LOCAL, "kgdb");
  640. unregister_die_notifier(&kgdb_notifier);
  641. }
  642. /**
  643. *
  644. * kgdb_skipexception - Bail out of KGDB when we've been triggered.
  645. * @exception: Exception vector number
  646. * @regs: Current &struct pt_regs.
  647. *
  648. * On some architectures we need to skip a breakpoint exception when
  649. * it occurs after a breakpoint has been removed.
  650. *
  651. * Skip an int3 exception when it occurs after a breakpoint has been
  652. * removed. Backtrack eip by 1 since the int3 would have caused it to
  653. * increment by 1.
  654. */
  655. int kgdb_skipexception(int exception, struct pt_regs *regs)
  656. {
  657. if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) {
  658. regs->ip -= 1;
  659. return 1;
  660. }
  661. return 0;
  662. }
  663. unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
  664. {
  665. if (exception == 3)
  666. return instruction_pointer(regs) - 1;
  667. return instruction_pointer(regs);
  668. }
  669. void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
  670. {
  671. regs->ip = ip;
  672. }
  673. struct kgdb_arch arch_kgdb_ops = {
  674. /* Breakpoint instruction: */
  675. .gdb_bpt_instr = { 0xcc },
  676. .flags = KGDB_HW_BREAKPOINT,
  677. .set_hw_breakpoint = kgdb_set_hw_break,
  678. .remove_hw_breakpoint = kgdb_remove_hw_break,
  679. .disable_hw_break = kgdb_disable_hw_debug,
  680. .remove_all_hw_break = kgdb_remove_all_hw_break,
  681. .correct_hw_break = kgdb_correct_hw_break,
  682. };