hw_breakpoint.c 8.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424
  1. /*
  2. * arch/sh/kernel/hw_breakpoint.c
  3. *
  4. * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
  5. *
  6. * Copyright (C) 2009 Paul Mundt
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/init.h>
  13. #include <linux/perf_event.h>
  14. #include <linux/hw_breakpoint.h>
  15. #include <linux/percpu.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/notifier.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/kdebug.h>
  20. #include <linux/io.h>
  21. #include <asm/hw_breakpoint.h>
  22. #include <asm/mmu_context.h>
  23. struct ubc_context {
  24. unsigned long pc;
  25. unsigned long state;
  26. };
  27. /* Per cpu ubc channel state */
  28. static DEFINE_PER_CPU(struct ubc_context, ubc_ctx[HBP_NUM]);
  29. /*
  30. * Stores the breakpoints currently in use on each breakpoint address
  31. * register for each cpus
  32. */
  33. static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
  34. static int __init ubc_init(void)
  35. {
  36. __raw_writel(0, UBC_CAMR0);
  37. __raw_writel(0, UBC_CBR0);
  38. __raw_writel(0, UBC_CBCR);
  39. __raw_writel(UBC_CRR_BIE | UBC_CRR_PCB, UBC_CRR0);
  40. /* dummy read for write posting */
  41. (void)__raw_readl(UBC_CRR0);
  42. return 0;
  43. }
  44. arch_initcall(ubc_init);
  45. /*
  46. * Install a perf counter breakpoint.
  47. *
  48. * We seek a free UBC channel and use it for this breakpoint.
  49. *
  50. * Atomic: we hold the counter->ctx->lock and we only handle variables
  51. * and registers local to this cpu.
  52. */
  53. int arch_install_hw_breakpoint(struct perf_event *bp)
  54. {
  55. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  56. struct ubc_context *ubc_ctx;
  57. int i;
  58. for (i = 0; i < HBP_NUM; i++) {
  59. struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
  60. if (!*slot) {
  61. *slot = bp;
  62. break;
  63. }
  64. }
  65. if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
  66. return -EBUSY;
  67. ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
  68. ubc_ctx->pc = info->address;
  69. ubc_ctx->state = info->len | info->type;
  70. __raw_writel(UBC_CBR_CE | ubc_ctx->state, UBC_CBR0);
  71. __raw_writel(ubc_ctx->pc, UBC_CAR0);
  72. return 0;
  73. }
  74. /*
  75. * Uninstall the breakpoint contained in the given counter.
  76. *
  77. * First we search the debug address register it uses and then we disable
  78. * it.
  79. *
  80. * Atomic: we hold the counter->ctx->lock and we only handle variables
  81. * and registers local to this cpu.
  82. */
  83. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  84. {
  85. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  86. struct ubc_context *ubc_ctx;
  87. int i;
  88. for (i = 0; i < HBP_NUM; i++) {
  89. struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
  90. if (*slot == bp) {
  91. *slot = NULL;
  92. break;
  93. }
  94. }
  95. if (WARN_ONCE(i == HBP_NUM, "Can't find any breakpoint slot"))
  96. return;
  97. ubc_ctx = &__get_cpu_var(ubc_ctx[i]);
  98. ubc_ctx->pc = 0;
  99. ubc_ctx->state &= ~(info->len | info->type);
  100. __raw_writel(ubc_ctx->pc, UBC_CBR0);
  101. __raw_writel(ubc_ctx->state, UBC_CAR0);
  102. }
  103. static int get_hbp_len(u16 hbp_len)
  104. {
  105. unsigned int len_in_bytes = 0;
  106. switch (hbp_len) {
  107. case SH_BREAKPOINT_LEN_1:
  108. len_in_bytes = 1;
  109. break;
  110. case SH_BREAKPOINT_LEN_2:
  111. len_in_bytes = 2;
  112. break;
  113. case SH_BREAKPOINT_LEN_4:
  114. len_in_bytes = 4;
  115. break;
  116. case SH_BREAKPOINT_LEN_8:
  117. len_in_bytes = 8;
  118. break;
  119. }
  120. return len_in_bytes;
  121. }
  122. /*
  123. * Check for virtual address in user space.
  124. */
  125. int arch_check_va_in_userspace(unsigned long va, u16 hbp_len)
  126. {
  127. unsigned int len;
  128. len = get_hbp_len(hbp_len);
  129. return (va <= TASK_SIZE - len);
  130. }
  131. /*
  132. * Check for virtual address in kernel space.
  133. */
  134. static int arch_check_va_in_kernelspace(unsigned long va, u8 hbp_len)
  135. {
  136. unsigned int len;
  137. len = get_hbp_len(hbp_len);
  138. return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
  139. }
  140. /*
  141. * Store a breakpoint's encoded address, length, and type.
  142. */
  143. static int arch_store_info(struct perf_event *bp)
  144. {
  145. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  146. /*
  147. * User-space requests will always have the address field populated
  148. * For kernel-addresses, either the address or symbol name can be
  149. * specified.
  150. */
  151. if (info->name)
  152. info->address = (unsigned long)kallsyms_lookup_name(info->name);
  153. if (info->address) {
  154. info->asid = get_asid();
  155. return 0;
  156. }
  157. return -EINVAL;
  158. }
  159. int arch_bp_generic_fields(int sh_len, int sh_type,
  160. int *gen_len, int *gen_type)
  161. {
  162. /* Len */
  163. switch (sh_len) {
  164. case SH_BREAKPOINT_LEN_1:
  165. *gen_len = HW_BREAKPOINT_LEN_1;
  166. break;
  167. case SH_BREAKPOINT_LEN_2:
  168. *gen_len = HW_BREAKPOINT_LEN_2;
  169. break;
  170. case SH_BREAKPOINT_LEN_4:
  171. *gen_len = HW_BREAKPOINT_LEN_4;
  172. break;
  173. case SH_BREAKPOINT_LEN_8:
  174. *gen_len = HW_BREAKPOINT_LEN_8;
  175. break;
  176. default:
  177. return -EINVAL;
  178. }
  179. /* Type */
  180. switch (sh_type) {
  181. case SH_BREAKPOINT_READ:
  182. *gen_type = HW_BREAKPOINT_R;
  183. case SH_BREAKPOINT_WRITE:
  184. *gen_type = HW_BREAKPOINT_W;
  185. break;
  186. case SH_BREAKPOINT_RW:
  187. *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
  188. break;
  189. default:
  190. return -EINVAL;
  191. }
  192. return 0;
  193. }
  194. static int arch_build_bp_info(struct perf_event *bp)
  195. {
  196. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  197. info->address = bp->attr.bp_addr;
  198. /* Len */
  199. switch (bp->attr.bp_len) {
  200. case HW_BREAKPOINT_LEN_1:
  201. info->len = SH_BREAKPOINT_LEN_1;
  202. break;
  203. case HW_BREAKPOINT_LEN_2:
  204. info->len = SH_BREAKPOINT_LEN_2;
  205. break;
  206. case HW_BREAKPOINT_LEN_4:
  207. info->len = SH_BREAKPOINT_LEN_4;
  208. break;
  209. case HW_BREAKPOINT_LEN_8:
  210. info->len = SH_BREAKPOINT_LEN_8;
  211. break;
  212. default:
  213. return -EINVAL;
  214. }
  215. /* Type */
  216. switch (bp->attr.bp_type) {
  217. case HW_BREAKPOINT_R:
  218. info->type = SH_BREAKPOINT_READ;
  219. break;
  220. case HW_BREAKPOINT_W:
  221. info->type = SH_BREAKPOINT_WRITE;
  222. break;
  223. case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
  224. info->type = SH_BREAKPOINT_RW;
  225. break;
  226. default:
  227. return -EINVAL;
  228. }
  229. return 0;
  230. }
  231. /*
  232. * Validate the arch-specific HW Breakpoint register settings
  233. */
  234. int arch_validate_hwbkpt_settings(struct perf_event *bp,
  235. struct task_struct *tsk)
  236. {
  237. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  238. unsigned int align;
  239. int ret;
  240. ret = arch_build_bp_info(bp);
  241. if (ret)
  242. return ret;
  243. ret = -EINVAL;
  244. switch (info->len) {
  245. case SH_BREAKPOINT_LEN_1:
  246. align = 0;
  247. break;
  248. case SH_BREAKPOINT_LEN_2:
  249. align = 1;
  250. break;
  251. case SH_BREAKPOINT_LEN_4:
  252. align = 3;
  253. break;
  254. case SH_BREAKPOINT_LEN_8:
  255. align = 7;
  256. break;
  257. default:
  258. return ret;
  259. }
  260. ret = arch_store_info(bp);
  261. if (ret < 0)
  262. return ret;
  263. /*
  264. * Check that the low-order bits of the address are appropriate
  265. * for the alignment implied by len.
  266. */
  267. if (info->address & align)
  268. return -EINVAL;
  269. /* Check that the virtual address is in the proper range */
  270. if (tsk) {
  271. if (!arch_check_va_in_userspace(info->address, info->len))
  272. return -EFAULT;
  273. } else {
  274. if (!arch_check_va_in_kernelspace(info->address, info->len))
  275. return -EFAULT;
  276. }
  277. return 0;
  278. }
  279. /*
  280. * Release the user breakpoints used by ptrace
  281. */
  282. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  283. {
  284. int i;
  285. struct thread_struct *t = &tsk->thread;
  286. for (i = 0; i < HBP_NUM; i++) {
  287. unregister_hw_breakpoint(t->ptrace_bps[i]);
  288. t->ptrace_bps[i] = NULL;
  289. }
  290. }
  291. static int __kprobes hw_breakpoint_handler(struct die_args *args)
  292. {
  293. int cpu, i, rc = NOTIFY_STOP;
  294. struct perf_event *bp;
  295. unsigned long val;
  296. val = __raw_readl(UBC_CBR0);
  297. __raw_writel(val & ~UBC_CBR_CE, UBC_CBR0);
  298. cpu = get_cpu();
  299. for (i = 0; i < HBP_NUM; i++) {
  300. /*
  301. * The counter may be concurrently released but that can only
  302. * occur from a call_rcu() path. We can then safely fetch
  303. * the breakpoint, use its callback, touch its counter
  304. * while we are in an rcu_read_lock() path.
  305. */
  306. rcu_read_lock();
  307. bp = per_cpu(bp_per_reg[i], cpu);
  308. if (bp) {
  309. rc = NOTIFY_DONE;
  310. } else {
  311. rcu_read_unlock();
  312. break;
  313. }
  314. perf_bp_event(bp, args->regs);
  315. rcu_read_unlock();
  316. }
  317. if (bp) {
  318. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  319. __raw_writel(UBC_CBR_CE | info->len | info->type, UBC_CBR0);
  320. __raw_writel(info->address, UBC_CAR0);
  321. }
  322. put_cpu();
  323. return rc;
  324. }
  325. BUILD_TRAP_HANDLER(breakpoint)
  326. {
  327. unsigned long ex = lookup_exception_vector();
  328. TRAP_HANDLER_DECL;
  329. notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
  330. }
  331. /*
  332. * Handle debug exception notifications.
  333. */
  334. int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
  335. unsigned long val, void *data)
  336. {
  337. struct die_args *args = data;
  338. if (val != DIE_BREAKPOINT)
  339. return NOTIFY_DONE;
  340. /*
  341. * If the breakpoint hasn't been triggered by the UBC, it's
  342. * probably from a debugger, so don't do anything more here.
  343. */
  344. if (args->trapnr != 0x1e0)
  345. return NOTIFY_DONE;
  346. return hw_breakpoint_handler(data);
  347. }
  348. void hw_breakpoint_pmu_read(struct perf_event *bp)
  349. {
  350. /* TODO */
  351. }
  352. void hw_breakpoint_pmu_unthrottle(struct perf_event *bp)
  353. {
  354. /* TODO */
  355. }