hw_breakpoint.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License as published by
  4. * the Free Software Foundation; either version 2 of the License, or
  5. * (at your option) any later version.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. * You should have received a copy of the GNU General Public License
  13. * along with this program; if not, write to the Free Software
  14. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  15. *
  16. * Copyright (C) 2007 Alan Stern
  17. * Copyright (C) IBM Corporation, 2009
  18. * Copyright (C) 2009, Frederic Weisbecker <fweisbec@gmail.com>
  19. *
  20. * Thanks to Ingo Molnar for his many suggestions.
  21. *
  22. * Authors: Alan Stern <stern@rowland.harvard.edu>
  23. * K.Prasad <prasad@linux.vnet.ibm.com>
  24. * Frederic Weisbecker <fweisbec@gmail.com>
  25. */
  26. /*
  27. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  28. * using the CPU's debug registers.
  29. * This file contains the arch-independent routines.
  30. */
  31. #include <linux/irqflags.h>
  32. #include <linux/kallsyms.h>
  33. #include <linux/notifier.h>
  34. #include <linux/kprobes.h>
  35. #include <linux/kdebug.h>
  36. #include <linux/kernel.h>
  37. #include <linux/module.h>
  38. #include <linux/percpu.h>
  39. #include <linux/sched.h>
  40. #include <linux/init.h>
  41. #include <linux/slab.h>
  42. #include <linux/cpu.h>
  43. #include <linux/smp.h>
  44. #include <linux/hw_breakpoint.h>
  45. /*
  46. * Constraints data
  47. */
  48. /* Number of pinned cpu breakpoints in a cpu */
  49. static DEFINE_PER_CPU(unsigned int, nr_cpu_bp_pinned[TYPE_MAX]);
  50. /* Number of pinned task breakpoints in a cpu */
  51. static DEFINE_PER_CPU(unsigned int *, nr_task_bp_pinned[TYPE_MAX]);
  52. /* Number of non-pinned cpu/task breakpoints in a cpu */
  53. static DEFINE_PER_CPU(unsigned int, nr_bp_flexible[TYPE_MAX]);
  54. static int nr_slots[TYPE_MAX];
  55. static int constraints_initialized;
  56. /* Gather the number of total pinned and un-pinned bp in a cpuset */
  57. struct bp_busy_slots {
  58. unsigned int pinned;
  59. unsigned int flexible;
  60. };
  61. /* Serialize accesses to the above constraints */
  62. static DEFINE_MUTEX(nr_bp_mutex);
  63. __weak int hw_breakpoint_weight(struct perf_event *bp)
  64. {
  65. return 1;
  66. }
  67. static inline enum bp_type_idx find_slot_idx(struct perf_event *bp)
  68. {
  69. if (bp->attr.bp_type & HW_BREAKPOINT_RW)
  70. return TYPE_DATA;
  71. return TYPE_INST;
  72. }
  73. /*
  74. * Report the maximum number of pinned breakpoints a task
  75. * have in this cpu
  76. */
  77. static unsigned int max_task_bp_pinned(int cpu, enum bp_type_idx type)
  78. {
  79. int i;
  80. unsigned int *tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
  81. for (i = nr_slots[type] - 1; i >= 0; i--) {
  82. if (tsk_pinned[i] > 0)
  83. return i + 1;
  84. }
  85. return 0;
  86. }
  87. static int task_bp_pinned(struct task_struct *tsk, enum bp_type_idx type)
  88. {
  89. struct perf_event_context *ctx = tsk->perf_event_ctxp;
  90. struct list_head *list;
  91. struct perf_event *bp;
  92. unsigned long flags;
  93. int count = 0;
  94. if (WARN_ONCE(!ctx, "No perf context for this task"))
  95. return 0;
  96. list = &ctx->event_list;
  97. raw_spin_lock_irqsave(&ctx->lock, flags);
  98. /*
  99. * The current breakpoint counter is not included in the list
  100. * at the open() callback time
  101. */
  102. list_for_each_entry(bp, list, event_entry) {
  103. if (bp->attr.type == PERF_TYPE_BREAKPOINT)
  104. if (find_slot_idx(bp) == type)
  105. count += hw_breakpoint_weight(bp);
  106. }
  107. raw_spin_unlock_irqrestore(&ctx->lock, flags);
  108. return count;
  109. }
  110. /*
  111. * Report the number of pinned/un-pinned breakpoints we have in
  112. * a given cpu (cpu > -1) or in all of them (cpu = -1).
  113. */
  114. static void
  115. fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp,
  116. enum bp_type_idx type)
  117. {
  118. int cpu = bp->cpu;
  119. struct task_struct *tsk = bp->ctx->task;
  120. if (cpu >= 0) {
  121. slots->pinned = per_cpu(nr_cpu_bp_pinned[type], cpu);
  122. if (!tsk)
  123. slots->pinned += max_task_bp_pinned(cpu, type);
  124. else
  125. slots->pinned += task_bp_pinned(tsk, type);
  126. slots->flexible = per_cpu(nr_bp_flexible[type], cpu);
  127. return;
  128. }
  129. for_each_online_cpu(cpu) {
  130. unsigned int nr;
  131. nr = per_cpu(nr_cpu_bp_pinned[type], cpu);
  132. if (!tsk)
  133. nr += max_task_bp_pinned(cpu, type);
  134. else
  135. nr += task_bp_pinned(tsk, type);
  136. if (nr > slots->pinned)
  137. slots->pinned = nr;
  138. nr = per_cpu(nr_bp_flexible[type], cpu);
  139. if (nr > slots->flexible)
  140. slots->flexible = nr;
  141. }
  142. }
  143. /*
  144. * For now, continue to consider flexible as pinned, until we can
  145. * ensure no flexible event can ever be scheduled before a pinned event
  146. * in a same cpu.
  147. */
  148. static void
  149. fetch_this_slot(struct bp_busy_slots *slots, int weight)
  150. {
  151. slots->pinned += weight;
  152. }
  153. /*
  154. * Add a pinned breakpoint for the given task in our constraint table
  155. */
  156. static void toggle_bp_task_slot(struct task_struct *tsk, int cpu, bool enable,
  157. enum bp_type_idx type, int weight)
  158. {
  159. unsigned int *tsk_pinned;
  160. int old_count = 0;
  161. int old_idx = 0;
  162. int idx = 0;
  163. old_count = task_bp_pinned(tsk, type);
  164. old_idx = old_count - 1;
  165. idx = old_idx + weight;
  166. tsk_pinned = per_cpu(nr_task_bp_pinned[type], cpu);
  167. if (enable) {
  168. tsk_pinned[idx]++;
  169. if (old_count > 0)
  170. tsk_pinned[old_idx]--;
  171. } else {
  172. tsk_pinned[idx]--;
  173. if (old_count > 0)
  174. tsk_pinned[old_idx]++;
  175. }
  176. }
  177. /*
  178. * Add/remove the given breakpoint in our constraint table
  179. */
  180. static void
  181. toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type,
  182. int weight)
  183. {
  184. int cpu = bp->cpu;
  185. struct task_struct *tsk = bp->ctx->task;
  186. /* Pinned counter task profiling */
  187. if (tsk) {
  188. if (cpu >= 0) {
  189. toggle_bp_task_slot(tsk, cpu, enable, type, weight);
  190. return;
  191. }
  192. for_each_online_cpu(cpu)
  193. toggle_bp_task_slot(tsk, cpu, enable, type, weight);
  194. return;
  195. }
  196. /* Pinned counter cpu profiling */
  197. if (enable)
  198. per_cpu(nr_cpu_bp_pinned[type], bp->cpu) += weight;
  199. else
  200. per_cpu(nr_cpu_bp_pinned[type], bp->cpu) -= weight;
  201. }
  202. /*
  203. * Contraints to check before allowing this new breakpoint counter:
  204. *
  205. * == Non-pinned counter == (Considered as pinned for now)
  206. *
  207. * - If attached to a single cpu, check:
  208. *
  209. * (per_cpu(nr_bp_flexible, cpu) || (per_cpu(nr_cpu_bp_pinned, cpu)
  210. * + max(per_cpu(nr_task_bp_pinned, cpu)))) < HBP_NUM
  211. *
  212. * -> If there are already non-pinned counters in this cpu, it means
  213. * there is already a free slot for them.
  214. * Otherwise, we check that the maximum number of per task
  215. * breakpoints (for this cpu) plus the number of per cpu breakpoint
  216. * (for this cpu) doesn't cover every registers.
  217. *
  218. * - If attached to every cpus, check:
  219. *
  220. * (per_cpu(nr_bp_flexible, *) || (max(per_cpu(nr_cpu_bp_pinned, *))
  221. * + max(per_cpu(nr_task_bp_pinned, *)))) < HBP_NUM
  222. *
  223. * -> This is roughly the same, except we check the number of per cpu
  224. * bp for every cpu and we keep the max one. Same for the per tasks
  225. * breakpoints.
  226. *
  227. *
  228. * == Pinned counter ==
  229. *
  230. * - If attached to a single cpu, check:
  231. *
  232. * ((per_cpu(nr_bp_flexible, cpu) > 1) + per_cpu(nr_cpu_bp_pinned, cpu)
  233. * + max(per_cpu(nr_task_bp_pinned, cpu))) < HBP_NUM
  234. *
  235. * -> Same checks as before. But now the nr_bp_flexible, if any, must keep
  236. * one register at least (or they will never be fed).
  237. *
  238. * - If attached to every cpus, check:
  239. *
  240. * ((per_cpu(nr_bp_flexible, *) > 1) + max(per_cpu(nr_cpu_bp_pinned, *))
  241. * + max(per_cpu(nr_task_bp_pinned, *))) < HBP_NUM
  242. */
  243. static int __reserve_bp_slot(struct perf_event *bp)
  244. {
  245. struct bp_busy_slots slots = {0};
  246. enum bp_type_idx type;
  247. int weight;
  248. /* We couldn't initialize breakpoint constraints on boot */
  249. if (!constraints_initialized)
  250. return -ENOMEM;
  251. /* Basic checks */
  252. if (bp->attr.bp_type == HW_BREAKPOINT_EMPTY ||
  253. bp->attr.bp_type == HW_BREAKPOINT_INVALID)
  254. return -EINVAL;
  255. type = find_slot_idx(bp);
  256. weight = hw_breakpoint_weight(bp);
  257. fetch_bp_busy_slots(&slots, bp, type);
  258. fetch_this_slot(&slots, weight);
  259. /* Flexible counters need to keep at least one slot */
  260. if (slots.pinned + (!!slots.flexible) > nr_slots[type])
  261. return -ENOSPC;
  262. toggle_bp_slot(bp, true, type, weight);
  263. return 0;
  264. }
  265. int reserve_bp_slot(struct perf_event *bp)
  266. {
  267. int ret;
  268. mutex_lock(&nr_bp_mutex);
  269. ret = __reserve_bp_slot(bp);
  270. mutex_unlock(&nr_bp_mutex);
  271. return ret;
  272. }
  273. static void __release_bp_slot(struct perf_event *bp)
  274. {
  275. enum bp_type_idx type;
  276. int weight;
  277. type = find_slot_idx(bp);
  278. weight = hw_breakpoint_weight(bp);
  279. toggle_bp_slot(bp, false, type, weight);
  280. }
  281. void release_bp_slot(struct perf_event *bp)
  282. {
  283. mutex_lock(&nr_bp_mutex);
  284. __release_bp_slot(bp);
  285. mutex_unlock(&nr_bp_mutex);
  286. }
  287. /*
  288. * Allow the kernel debugger to reserve breakpoint slots without
  289. * taking a lock using the dbg_* variant of for the reserve and
  290. * release breakpoint slots.
  291. */
  292. int dbg_reserve_bp_slot(struct perf_event *bp)
  293. {
  294. if (mutex_is_locked(&nr_bp_mutex))
  295. return -1;
  296. return __reserve_bp_slot(bp);
  297. }
  298. int dbg_release_bp_slot(struct perf_event *bp)
  299. {
  300. if (mutex_is_locked(&nr_bp_mutex))
  301. return -1;
  302. __release_bp_slot(bp);
  303. return 0;
  304. }
  305. static int validate_hw_breakpoint(struct perf_event *bp)
  306. {
  307. int ret;
  308. ret = arch_validate_hwbkpt_settings(bp);
  309. if (ret)
  310. return ret;
  311. if (arch_check_bp_in_kernelspace(bp)) {
  312. if (bp->attr.exclude_kernel)
  313. return -EINVAL;
  314. /*
  315. * Don't let unprivileged users set a breakpoint in the trap
  316. * path to avoid trap recursion attacks.
  317. */
  318. if (!capable(CAP_SYS_ADMIN))
  319. return -EPERM;
  320. }
  321. return 0;
  322. }
  323. int register_perf_hw_breakpoint(struct perf_event *bp)
  324. {
  325. int ret;
  326. ret = reserve_bp_slot(bp);
  327. if (ret)
  328. return ret;
  329. ret = validate_hw_breakpoint(bp);
  330. /* if arch_validate_hwbkpt_settings() fails then release bp slot */
  331. if (ret)
  332. release_bp_slot(bp);
  333. return ret;
  334. }
  335. /**
  336. * register_user_hw_breakpoint - register a hardware breakpoint for user space
  337. * @attr: breakpoint attributes
  338. * @triggered: callback to trigger when we hit the breakpoint
  339. * @tsk: pointer to 'task_struct' of the process to which the address belongs
  340. */
  341. struct perf_event *
  342. register_user_hw_breakpoint(struct perf_event_attr *attr,
  343. perf_overflow_handler_t triggered,
  344. struct task_struct *tsk)
  345. {
  346. return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
  347. }
  348. EXPORT_SYMBOL_GPL(register_user_hw_breakpoint);
  349. /**
  350. * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
  351. * @bp: the breakpoint structure to modify
  352. * @attr: new breakpoint attributes
  353. * @triggered: callback to trigger when we hit the breakpoint
  354. * @tsk: pointer to 'task_struct' of the process to which the address belongs
  355. */
  356. int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
  357. {
  358. u64 old_addr = bp->attr.bp_addr;
  359. u64 old_len = bp->attr.bp_len;
  360. int old_type = bp->attr.bp_type;
  361. int err = 0;
  362. perf_event_disable(bp);
  363. bp->attr.bp_addr = attr->bp_addr;
  364. bp->attr.bp_type = attr->bp_type;
  365. bp->attr.bp_len = attr->bp_len;
  366. if (attr->disabled)
  367. goto end;
  368. err = validate_hw_breakpoint(bp);
  369. if (!err)
  370. perf_event_enable(bp);
  371. if (err) {
  372. bp->attr.bp_addr = old_addr;
  373. bp->attr.bp_type = old_type;
  374. bp->attr.bp_len = old_len;
  375. if (!bp->attr.disabled)
  376. perf_event_enable(bp);
  377. return err;
  378. }
  379. end:
  380. bp->attr.disabled = attr->disabled;
  381. return 0;
  382. }
  383. EXPORT_SYMBOL_GPL(modify_user_hw_breakpoint);
  384. /**
  385. * unregister_hw_breakpoint - unregister a user-space hardware breakpoint
  386. * @bp: the breakpoint structure to unregister
  387. */
  388. void unregister_hw_breakpoint(struct perf_event *bp)
  389. {
  390. if (!bp)
  391. return;
  392. perf_event_release_kernel(bp);
  393. }
  394. EXPORT_SYMBOL_GPL(unregister_hw_breakpoint);
  395. /**
  396. * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
  397. * @attr: breakpoint attributes
  398. * @triggered: callback to trigger when we hit the breakpoint
  399. *
  400. * @return a set of per_cpu pointers to perf events
  401. */
  402. struct perf_event * __percpu *
  403. register_wide_hw_breakpoint(struct perf_event_attr *attr,
  404. perf_overflow_handler_t triggered)
  405. {
  406. struct perf_event * __percpu *cpu_events, **pevent, *bp;
  407. long err;
  408. int cpu;
  409. cpu_events = alloc_percpu(typeof(*cpu_events));
  410. if (!cpu_events)
  411. return (void __percpu __force *)ERR_PTR(-ENOMEM);
  412. get_online_cpus();
  413. for_each_online_cpu(cpu) {
  414. pevent = per_cpu_ptr(cpu_events, cpu);
  415. bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);
  416. *pevent = bp;
  417. if (IS_ERR(bp)) {
  418. err = PTR_ERR(bp);
  419. goto fail;
  420. }
  421. }
  422. put_online_cpus();
  423. return cpu_events;
  424. fail:
  425. for_each_online_cpu(cpu) {
  426. pevent = per_cpu_ptr(cpu_events, cpu);
  427. if (IS_ERR(*pevent))
  428. break;
  429. unregister_hw_breakpoint(*pevent);
  430. }
  431. put_online_cpus();
  432. free_percpu(cpu_events);
  433. return (void __percpu __force *)ERR_PTR(err);
  434. }
  435. EXPORT_SYMBOL_GPL(register_wide_hw_breakpoint);
  436. /**
  437. * unregister_wide_hw_breakpoint - unregister a wide breakpoint in the kernel
  438. * @cpu_events: the per cpu set of events to unregister
  439. */
  440. void unregister_wide_hw_breakpoint(struct perf_event * __percpu *cpu_events)
  441. {
  442. int cpu;
  443. struct perf_event **pevent;
  444. for_each_possible_cpu(cpu) {
  445. pevent = per_cpu_ptr(cpu_events, cpu);
  446. unregister_hw_breakpoint(*pevent);
  447. }
  448. free_percpu(cpu_events);
  449. }
  450. EXPORT_SYMBOL_GPL(unregister_wide_hw_breakpoint);
  451. static struct notifier_block hw_breakpoint_exceptions_nb = {
  452. .notifier_call = hw_breakpoint_exceptions_notify,
  453. /* we need to be notified first */
  454. .priority = 0x7fffffff
  455. };
  456. static int __init init_hw_breakpoint(void)
  457. {
  458. unsigned int **task_bp_pinned;
  459. int cpu, err_cpu;
  460. int i;
  461. for (i = 0; i < TYPE_MAX; i++)
  462. nr_slots[i] = hw_breakpoint_slots(i);
  463. for_each_possible_cpu(cpu) {
  464. for (i = 0; i < TYPE_MAX; i++) {
  465. task_bp_pinned = &per_cpu(nr_task_bp_pinned[i], cpu);
  466. *task_bp_pinned = kzalloc(sizeof(int) * nr_slots[i],
  467. GFP_KERNEL);
  468. if (!*task_bp_pinned)
  469. goto err_alloc;
  470. }
  471. }
  472. constraints_initialized = 1;
  473. return register_die_notifier(&hw_breakpoint_exceptions_nb);
  474. err_alloc:
  475. for_each_possible_cpu(err_cpu) {
  476. if (err_cpu == cpu)
  477. break;
  478. for (i = 0; i < TYPE_MAX; i++)
  479. kfree(per_cpu(nr_task_bp_pinned[i], cpu));
  480. }
  481. return -ENOMEM;
  482. }
  483. core_initcall(init_hw_breakpoint);
  484. struct pmu perf_ops_bp = {
  485. .enable = arch_install_hw_breakpoint,
  486. .disable = arch_uninstall_hw_breakpoint,
  487. .read = hw_breakpoint_pmu_read,
  488. };