hw_breakpoint.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /*
  2. * arch/sh/kernel/hw_breakpoint.c
  3. *
  4. * Unified kernel/user-space hardware breakpoint facility for the on-chip UBC.
  5. *
  6. * Copyright (C) 2009 - 2010 Paul Mundt
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/init.h>
  13. #include <linux/perf_event.h>
  14. #include <linux/hw_breakpoint.h>
  15. #include <linux/percpu.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/notifier.h>
  18. #include <linux/kprobes.h>
  19. #include <linux/kdebug.h>
  20. #include <linux/io.h>
  21. #include <linux/clk.h>
  22. #include <asm/hw_breakpoint.h>
  23. #include <asm/mmu_context.h>
  24. #include <asm/ptrace.h>
  25. /*
  26. * Stores the breakpoints currently in use on each breakpoint address
  27. * register for each cpus
  28. */
  29. static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM]);
  30. /*
  31. * A dummy placeholder for early accesses until the CPUs get a chance to
  32. * register their UBCs later in the boot process.
  33. */
  34. static struct sh_ubc ubc_dummy = { .num_events = 0 };
  35. static struct sh_ubc *sh_ubc __read_mostly = &ubc_dummy;
  36. /*
  37. * Install a perf counter breakpoint.
  38. *
  39. * We seek a free UBC channel and use it for this breakpoint.
  40. *
  41. * Atomic: we hold the counter->ctx->lock and we only handle variables
  42. * and registers local to this cpu.
  43. */
  44. int arch_install_hw_breakpoint(struct perf_event *bp)
  45. {
  46. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  47. int i;
  48. for (i = 0; i < sh_ubc->num_events; i++) {
  49. struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
  50. if (!*slot) {
  51. *slot = bp;
  52. break;
  53. }
  54. }
  55. if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
  56. return -EBUSY;
  57. clk_enable(sh_ubc->clk);
  58. sh_ubc->enable(info, i);
  59. return 0;
  60. }
  61. /*
  62. * Uninstall the breakpoint contained in the given counter.
  63. *
  64. * First we search the debug address register it uses and then we disable
  65. * it.
  66. *
  67. * Atomic: we hold the counter->ctx->lock and we only handle variables
  68. * and registers local to this cpu.
  69. */
  70. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  71. {
  72. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  73. int i;
  74. for (i = 0; i < sh_ubc->num_events; i++) {
  75. struct perf_event **slot = &__get_cpu_var(bp_per_reg[i]);
  76. if (*slot == bp) {
  77. *slot = NULL;
  78. break;
  79. }
  80. }
  81. if (WARN_ONCE(i == sh_ubc->num_events, "Can't find any breakpoint slot"))
  82. return;
  83. sh_ubc->disable(info, i);
  84. clk_disable(sh_ubc->clk);
  85. }
  86. static int get_hbp_len(u16 hbp_len)
  87. {
  88. unsigned int len_in_bytes = 0;
  89. switch (hbp_len) {
  90. case SH_BREAKPOINT_LEN_1:
  91. len_in_bytes = 1;
  92. break;
  93. case SH_BREAKPOINT_LEN_2:
  94. len_in_bytes = 2;
  95. break;
  96. case SH_BREAKPOINT_LEN_4:
  97. len_in_bytes = 4;
  98. break;
  99. case SH_BREAKPOINT_LEN_8:
  100. len_in_bytes = 8;
  101. break;
  102. }
  103. return len_in_bytes;
  104. }
  105. /*
  106. * Check for virtual address in kernel space.
  107. */
  108. int arch_check_bp_in_kernelspace(struct perf_event *bp)
  109. {
  110. unsigned int len;
  111. unsigned long va;
  112. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  113. va = info->address;
  114. len = get_hbp_len(info->len);
  115. return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
  116. }
  117. int arch_bp_generic_fields(int sh_len, int sh_type,
  118. int *gen_len, int *gen_type)
  119. {
  120. /* Len */
  121. switch (sh_len) {
  122. case SH_BREAKPOINT_LEN_1:
  123. *gen_len = HW_BREAKPOINT_LEN_1;
  124. break;
  125. case SH_BREAKPOINT_LEN_2:
  126. *gen_len = HW_BREAKPOINT_LEN_2;
  127. break;
  128. case SH_BREAKPOINT_LEN_4:
  129. *gen_len = HW_BREAKPOINT_LEN_4;
  130. break;
  131. case SH_BREAKPOINT_LEN_8:
  132. *gen_len = HW_BREAKPOINT_LEN_8;
  133. break;
  134. default:
  135. return -EINVAL;
  136. }
  137. /* Type */
  138. switch (sh_type) {
  139. case SH_BREAKPOINT_READ:
  140. *gen_type = HW_BREAKPOINT_R;
  141. case SH_BREAKPOINT_WRITE:
  142. *gen_type = HW_BREAKPOINT_W;
  143. break;
  144. case SH_BREAKPOINT_RW:
  145. *gen_type = HW_BREAKPOINT_W | HW_BREAKPOINT_R;
  146. break;
  147. default:
  148. return -EINVAL;
  149. }
  150. return 0;
  151. }
  152. static int arch_build_bp_info(struct perf_event *bp)
  153. {
  154. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  155. info->address = bp->attr.bp_addr;
  156. /* Len */
  157. switch (bp->attr.bp_len) {
  158. case HW_BREAKPOINT_LEN_1:
  159. info->len = SH_BREAKPOINT_LEN_1;
  160. break;
  161. case HW_BREAKPOINT_LEN_2:
  162. info->len = SH_BREAKPOINT_LEN_2;
  163. break;
  164. case HW_BREAKPOINT_LEN_4:
  165. info->len = SH_BREAKPOINT_LEN_4;
  166. break;
  167. case HW_BREAKPOINT_LEN_8:
  168. info->len = SH_BREAKPOINT_LEN_8;
  169. break;
  170. default:
  171. return -EINVAL;
  172. }
  173. /* Type */
  174. switch (bp->attr.bp_type) {
  175. case HW_BREAKPOINT_R:
  176. info->type = SH_BREAKPOINT_READ;
  177. break;
  178. case HW_BREAKPOINT_W:
  179. info->type = SH_BREAKPOINT_WRITE;
  180. break;
  181. case HW_BREAKPOINT_W | HW_BREAKPOINT_R:
  182. info->type = SH_BREAKPOINT_RW;
  183. break;
  184. default:
  185. return -EINVAL;
  186. }
  187. return 0;
  188. }
  189. /*
  190. * Validate the arch-specific HW Breakpoint register settings
  191. */
  192. int arch_validate_hwbkpt_settings(struct perf_event *bp)
  193. {
  194. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  195. unsigned int align;
  196. int ret;
  197. ret = arch_build_bp_info(bp);
  198. if (ret)
  199. return ret;
  200. ret = -EINVAL;
  201. switch (info->len) {
  202. case SH_BREAKPOINT_LEN_1:
  203. align = 0;
  204. break;
  205. case SH_BREAKPOINT_LEN_2:
  206. align = 1;
  207. break;
  208. case SH_BREAKPOINT_LEN_4:
  209. align = 3;
  210. break;
  211. case SH_BREAKPOINT_LEN_8:
  212. align = 7;
  213. break;
  214. default:
  215. return ret;
  216. }
  217. /*
  218. * For kernel-addresses, either the address or symbol name can be
  219. * specified.
  220. */
  221. if (info->name)
  222. info->address = (unsigned long)kallsyms_lookup_name(info->name);
  223. /*
  224. * Check that the low-order bits of the address are appropriate
  225. * for the alignment implied by len.
  226. */
  227. if (info->address & align)
  228. return -EINVAL;
  229. return 0;
  230. }
  231. /*
  232. * Release the user breakpoints used by ptrace
  233. */
  234. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  235. {
  236. int i;
  237. struct thread_struct *t = &tsk->thread;
  238. for (i = 0; i < sh_ubc->num_events; i++) {
  239. unregister_hw_breakpoint(t->ptrace_bps[i]);
  240. t->ptrace_bps[i] = NULL;
  241. }
  242. }
  243. static int __kprobes hw_breakpoint_handler(struct die_args *args)
  244. {
  245. int cpu, i, rc = NOTIFY_STOP;
  246. struct perf_event *bp;
  247. unsigned int cmf, resume_mask;
  248. /*
  249. * Do an early return if none of the channels triggered.
  250. */
  251. cmf = sh_ubc->triggered_mask();
  252. if (unlikely(!cmf))
  253. return NOTIFY_DONE;
  254. /*
  255. * By default, resume all of the active channels.
  256. */
  257. resume_mask = sh_ubc->active_mask();
  258. /*
  259. * Disable breakpoints during exception handling.
  260. */
  261. sh_ubc->disable_all();
  262. cpu = get_cpu();
  263. for (i = 0; i < sh_ubc->num_events; i++) {
  264. unsigned long event_mask = (1 << i);
  265. if (likely(!(cmf & event_mask)))
  266. continue;
  267. /*
  268. * The counter may be concurrently released but that can only
  269. * occur from a call_rcu() path. We can then safely fetch
  270. * the breakpoint, use its callback, touch its counter
  271. * while we are in an rcu_read_lock() path.
  272. */
  273. rcu_read_lock();
  274. bp = per_cpu(bp_per_reg[i], cpu);
  275. if (bp)
  276. rc = NOTIFY_DONE;
  277. /*
  278. * Reset the condition match flag to denote completion of
  279. * exception handling.
  280. */
  281. sh_ubc->clear_triggered_mask(event_mask);
  282. /*
  283. * bp can be NULL due to concurrent perf counter
  284. * removing.
  285. */
  286. if (!bp) {
  287. rcu_read_unlock();
  288. break;
  289. }
  290. /*
  291. * Don't restore the channel if the breakpoint is from
  292. * ptrace, as it always operates in one-shot mode.
  293. */
  294. if (bp->overflow_handler == ptrace_triggered)
  295. resume_mask &= ~(1 << i);
  296. perf_bp_event(bp, args->regs);
  297. /* Deliver the signal to userspace */
  298. if (!arch_check_bp_in_kernelspace(bp)) {
  299. siginfo_t info;
  300. info.si_signo = args->signr;
  301. info.si_errno = notifier_to_errno(rc);
  302. info.si_code = TRAP_HWBKPT;
  303. force_sig_info(args->signr, &info, current);
  304. }
  305. rcu_read_unlock();
  306. }
  307. if (cmf == 0)
  308. rc = NOTIFY_DONE;
  309. sh_ubc->enable_all(resume_mask);
  310. put_cpu();
  311. return rc;
  312. }
  313. BUILD_TRAP_HANDLER(breakpoint)
  314. {
  315. unsigned long ex = lookup_exception_vector();
  316. TRAP_HANDLER_DECL;
  317. notify_die(DIE_BREAKPOINT, "breakpoint", regs, 0, ex, SIGTRAP);
  318. }
  319. /*
  320. * Handle debug exception notifications.
  321. */
  322. int __kprobes hw_breakpoint_exceptions_notify(struct notifier_block *unused,
  323. unsigned long val, void *data)
  324. {
  325. struct die_args *args = data;
  326. if (val != DIE_BREAKPOINT)
  327. return NOTIFY_DONE;
  328. /*
  329. * If the breakpoint hasn't been triggered by the UBC, it's
  330. * probably from a debugger, so don't do anything more here.
  331. *
  332. * This also permits the UBC interface clock to remain off for
  333. * non-UBC breakpoints, as we don't need to check the triggered
  334. * or active channel masks.
  335. */
  336. if (args->trapnr != sh_ubc->trap_nr)
  337. return NOTIFY_DONE;
  338. return hw_breakpoint_handler(data);
  339. }
  340. void hw_breakpoint_pmu_read(struct perf_event *bp)
  341. {
  342. /* TODO */
  343. }
  344. int register_sh_ubc(struct sh_ubc *ubc)
  345. {
  346. /* Bail if it's already assigned */
  347. if (sh_ubc != &ubc_dummy)
  348. return -EBUSY;
  349. sh_ubc = ubc;
  350. pr_info("HW Breakpoints: %s UBC support registered\n", ubc->name);
  351. WARN_ON(ubc->num_events > HBP_NUM);
  352. return 0;
  353. }