kgdb.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /*
  2. * SuperH KGDB support
  3. *
  4. * Copyright (C) 2008 - 2012 Paul Mundt
  5. *
  6. * Single stepping taken from the old stub by Henry Bell and Jeremy Siegel.
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/kgdb.h>
  13. #include <linux/kdebug.h>
  14. #include <linux/irq.h>
  15. #include <linux/io.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/traps.h>
  18. /* Macros for single step instruction identification */
  19. #define OPCODE_BT(op) (((op) & 0xff00) == 0x8900)
  20. #define OPCODE_BF(op) (((op) & 0xff00) == 0x8b00)
  21. #define OPCODE_BTF_DISP(op) (((op) & 0x80) ? (((op) | 0xffffff80) << 1) : \
  22. (((op) & 0x7f ) << 1))
  23. #define OPCODE_BFS(op) (((op) & 0xff00) == 0x8f00)
  24. #define OPCODE_BTS(op) (((op) & 0xff00) == 0x8d00)
  25. #define OPCODE_BRA(op) (((op) & 0xf000) == 0xa000)
  26. #define OPCODE_BRA_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
  27. (((op) & 0x7ff) << 1))
  28. #define OPCODE_BRAF(op) (((op) & 0xf0ff) == 0x0023)
  29. #define OPCODE_BRAF_REG(op) (((op) & 0x0f00) >> 8)
  30. #define OPCODE_BSR(op) (((op) & 0xf000) == 0xb000)
  31. #define OPCODE_BSR_DISP(op) (((op) & 0x800) ? (((op) | 0xfffff800) << 1) : \
  32. (((op) & 0x7ff) << 1))
  33. #define OPCODE_BSRF(op) (((op) & 0xf0ff) == 0x0003)
  34. #define OPCODE_BSRF_REG(op) (((op) >> 8) & 0xf)
  35. #define OPCODE_JMP(op) (((op) & 0xf0ff) == 0x402b)
  36. #define OPCODE_JMP_REG(op) (((op) >> 8) & 0xf)
  37. #define OPCODE_JSR(op) (((op) & 0xf0ff) == 0x400b)
  38. #define OPCODE_JSR_REG(op) (((op) >> 8) & 0xf)
  39. #define OPCODE_RTS(op) ((op) == 0xb)
  40. #define OPCODE_RTE(op) ((op) == 0x2b)
  41. #define SR_T_BIT_MASK 0x1
  42. #define STEP_OPCODE 0xc33d
  43. /* Calculate the new address for after a step */
  44. static short *get_step_address(struct pt_regs *linux_regs)
  45. {
  46. insn_size_t op = __raw_readw(linux_regs->pc);
  47. long addr;
  48. /* BT */
  49. if (OPCODE_BT(op)) {
  50. if (linux_regs->sr & SR_T_BIT_MASK)
  51. addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  52. else
  53. addr = linux_regs->pc + 2;
  54. }
  55. /* BTS */
  56. else if (OPCODE_BTS(op)) {
  57. if (linux_regs->sr & SR_T_BIT_MASK)
  58. addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  59. else
  60. addr = linux_regs->pc + 4; /* Not in delay slot */
  61. }
  62. /* BF */
  63. else if (OPCODE_BF(op)) {
  64. if (!(linux_regs->sr & SR_T_BIT_MASK))
  65. addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  66. else
  67. addr = linux_regs->pc + 2;
  68. }
  69. /* BFS */
  70. else if (OPCODE_BFS(op)) {
  71. if (!(linux_regs->sr & SR_T_BIT_MASK))
  72. addr = linux_regs->pc + 4 + OPCODE_BTF_DISP(op);
  73. else
  74. addr = linux_regs->pc + 4; /* Not in delay slot */
  75. }
  76. /* BRA */
  77. else if (OPCODE_BRA(op))
  78. addr = linux_regs->pc + 4 + OPCODE_BRA_DISP(op);
  79. /* BRAF */
  80. else if (OPCODE_BRAF(op))
  81. addr = linux_regs->pc + 4
  82. + linux_regs->regs[OPCODE_BRAF_REG(op)];
  83. /* BSR */
  84. else if (OPCODE_BSR(op))
  85. addr = linux_regs->pc + 4 + OPCODE_BSR_DISP(op);
  86. /* BSRF */
  87. else if (OPCODE_BSRF(op))
  88. addr = linux_regs->pc + 4
  89. + linux_regs->regs[OPCODE_BSRF_REG(op)];
  90. /* JMP */
  91. else if (OPCODE_JMP(op))
  92. addr = linux_regs->regs[OPCODE_JMP_REG(op)];
  93. /* JSR */
  94. else if (OPCODE_JSR(op))
  95. addr = linux_regs->regs[OPCODE_JSR_REG(op)];
  96. /* RTS */
  97. else if (OPCODE_RTS(op))
  98. addr = linux_regs->pr;
  99. /* RTE */
  100. else if (OPCODE_RTE(op))
  101. addr = linux_regs->regs[15];
  102. /* Other */
  103. else
  104. addr = linux_regs->pc + instruction_size(op);
  105. flush_icache_range(addr, addr + instruction_size(op));
  106. return (short *)addr;
  107. }
  108. /*
  109. * Replace the instruction immediately after the current instruction
  110. * (i.e. next in the expected flow of control) with a trap instruction,
  111. * so that returning will cause only a single instruction to be executed.
  112. * Note that this model is slightly broken for instructions with delay
  113. * slots (e.g. B[TF]S, BSR, BRA etc), where both the branch and the
  114. * instruction in the delay slot will be executed.
  115. */
  116. static unsigned long stepped_address;
  117. static insn_size_t stepped_opcode;
  118. static void do_single_step(struct pt_regs *linux_regs)
  119. {
  120. /* Determine where the target instruction will send us to */
  121. unsigned short *addr = get_step_address(linux_regs);
  122. stepped_address = (int)addr;
  123. /* Replace it */
  124. stepped_opcode = __raw_readw((long)addr);
  125. *addr = STEP_OPCODE;
  126. /* Flush and return */
  127. flush_icache_range((long)addr, (long)addr +
  128. instruction_size(stepped_opcode));
  129. }
  130. /* Undo a single step */
  131. static void undo_single_step(struct pt_regs *linux_regs)
  132. {
  133. /* If we have stepped, put back the old instruction */
  134. /* Use stepped_address in case we stopped elsewhere */
  135. if (stepped_opcode != 0) {
  136. __raw_writew(stepped_opcode, stepped_address);
  137. flush_icache_range(stepped_address, stepped_address + 2);
  138. }
  139. stepped_opcode = 0;
  140. }
  141. struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
  142. { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
  143. { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
  144. { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
  145. { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
  146. { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
  147. { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
  148. { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
  149. { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
  150. { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
  151. { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
  152. { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
  153. { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
  154. { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
  155. { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
  156. { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
  157. { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
  158. { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, pc) },
  159. { "pr", GDB_SIZEOF_REG, offsetof(struct pt_regs, pr) },
  160. { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, sr) },
  161. { "gbr", GDB_SIZEOF_REG, offsetof(struct pt_regs, gbr) },
  162. { "mach", GDB_SIZEOF_REG, offsetof(struct pt_regs, mach) },
  163. { "macl", GDB_SIZEOF_REG, offsetof(struct pt_regs, macl) },
  164. { "vbr", GDB_SIZEOF_REG, -1 },
  165. };
  166. int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
  167. {
  168. if (regno < 0 || regno >= DBG_MAX_REG_NUM)
  169. return -EINVAL;
  170. if (dbg_reg_def[regno].offset != -1)
  171. memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
  172. dbg_reg_def[regno].size);
  173. return 0;
  174. }
  175. char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
  176. {
  177. if (regno >= DBG_MAX_REG_NUM || regno < 0)
  178. return NULL;
  179. if (dbg_reg_def[regno].size != -1)
  180. memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
  181. dbg_reg_def[regno].size);
  182. switch (regno) {
  183. case GDB_VBR:
  184. __asm__ __volatile__ ("stc vbr, %0" : "=r" (mem));
  185. break;
  186. }
  187. return dbg_reg_def[regno].name;
  188. }
  189. void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
  190. {
  191. struct pt_regs *thread_regs = task_pt_regs(p);
  192. int reg;
  193. /* Initialize to zero */
  194. for (reg = 0; reg < DBG_MAX_REG_NUM; reg++)
  195. gdb_regs[reg] = 0;
  196. /*
  197. * Copy out GP regs 8 to 14.
  198. *
  199. * switch_to() relies on SR.RB toggling, so regs 0->7 are banked
  200. * and need privileged instructions to get to. The r15 value we
  201. * fetch from the thread info directly.
  202. */
  203. for (reg = GDB_R8; reg < GDB_R15; reg++)
  204. gdb_regs[reg] = thread_regs->regs[reg];
  205. gdb_regs[GDB_R15] = p->thread.sp;
  206. gdb_regs[GDB_PC] = p->thread.pc;
  207. /*
  208. * Additional registers we have context for
  209. */
  210. gdb_regs[GDB_PR] = thread_regs->pr;
  211. gdb_regs[GDB_GBR] = thread_regs->gbr;
  212. }
  213. int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
  214. char *remcomInBuffer, char *remcomOutBuffer,
  215. struct pt_regs *linux_regs)
  216. {
  217. unsigned long addr;
  218. char *ptr;
  219. /* Undo any stepping we may have done */
  220. undo_single_step(linux_regs);
  221. switch (remcomInBuffer[0]) {
  222. case 'c':
  223. case 's':
  224. /* try to read optional parameter, pc unchanged if no parm */
  225. ptr = &remcomInBuffer[1];
  226. if (kgdb_hex2long(&ptr, &addr))
  227. linux_regs->pc = addr;
  228. case 'D':
  229. case 'k':
  230. atomic_set(&kgdb_cpu_doing_single_step, -1);
  231. if (remcomInBuffer[0] == 's') {
  232. do_single_step(linux_regs);
  233. kgdb_single_step = 1;
  234. atomic_set(&kgdb_cpu_doing_single_step,
  235. raw_smp_processor_id());
  236. }
  237. return 0;
  238. }
  239. /* this means that we do not want to exit from the handler: */
  240. return -1;
  241. }
  242. unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
  243. {
  244. if (exception == 60)
  245. return instruction_pointer(regs) - 2;
  246. return instruction_pointer(regs);
  247. }
  248. void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
  249. {
  250. regs->pc = ip;
  251. }
  252. /*
  253. * The primary entry points for the kgdb debug trap table entries.
  254. */
  255. BUILD_TRAP_HANDLER(singlestep)
  256. {
  257. unsigned long flags;
  258. TRAP_HANDLER_DECL;
  259. local_irq_save(flags);
  260. regs->pc -= instruction_size(__raw_readw(regs->pc - 4));
  261. kgdb_handle_exception(0, SIGTRAP, 0, regs);
  262. local_irq_restore(flags);
  263. }
  264. static void kgdb_call_nmi_hook(void *ignored)
  265. {
  266. kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs());
  267. }
  268. void kgdb_roundup_cpus(unsigned long flags)
  269. {
  270. local_irq_enable();
  271. smp_call_function(kgdb_call_nmi_hook, NULL, 0);
  272. local_irq_disable();
  273. }
  274. static int __kgdb_notify(struct die_args *args, unsigned long cmd)
  275. {
  276. int ret;
  277. switch (cmd) {
  278. case DIE_BREAKPOINT:
  279. /*
  280. * This means a user thread is single stepping
  281. * a system call which should be ignored
  282. */
  283. if (test_thread_flag(TIF_SINGLESTEP))
  284. return NOTIFY_DONE;
  285. ret = kgdb_handle_exception(args->trapnr & 0xff, args->signr,
  286. args->err, args->regs);
  287. if (ret)
  288. return NOTIFY_DONE;
  289. break;
  290. }
  291. return NOTIFY_STOP;
  292. }
  293. static int
  294. kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
  295. {
  296. unsigned long flags;
  297. int ret;
  298. local_irq_save(flags);
  299. ret = __kgdb_notify(ptr, cmd);
  300. local_irq_restore(flags);
  301. return ret;
  302. }
  303. static struct notifier_block kgdb_notifier = {
  304. .notifier_call = kgdb_notify,
  305. /*
  306. * Lowest-prio notifier priority, we want to be notified last:
  307. */
  308. .priority = -INT_MAX,
  309. };
  310. int kgdb_arch_init(void)
  311. {
  312. return register_die_notifier(&kgdb_notifier);
  313. }
  314. void kgdb_arch_exit(void)
  315. {
  316. unregister_die_notifier(&kgdb_notifier);
  317. }
  318. struct kgdb_arch arch_kgdb_ops = {
  319. /* Breakpoint instruction: trapa #0x3c */
  320. #ifdef CONFIG_CPU_LITTLE_ENDIAN
  321. .gdb_bpt_instr = { 0x3c, 0xc3 },
  322. #else
  323. .gdb_bpt_instr = { 0xc3, 0x3c },
  324. #endif
  325. };