hw_breakpoint.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943
  1. /*
  2. * This program is free software; you can redistribute it and/or modify
  3. * it under the terms of the GNU General Public License version 2 as
  4. * published by the Free Software Foundation.
  5. *
  6. * This program is distributed in the hope that it will be useful,
  7. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  9. * GNU General Public License for more details.
  10. *
  11. * You should have received a copy of the GNU General Public License
  12. * along with this program; if not, write to the Free Software
  13. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  14. *
  15. * Copyright (C) 2009, 2010 ARM Limited
  16. *
  17. * Author: Will Deacon <will.deacon@arm.com>
  18. */
  19. /*
  20. * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
  21. * using the CPU's debug registers.
  22. */
  23. #define pr_fmt(fmt) "hw-breakpoint: " fmt
  24. #include <linux/errno.h>
  25. #include <linux/hardirq.h>
  26. #include <linux/perf_event.h>
  27. #include <linux/hw_breakpoint.h>
  28. #include <linux/smp.h>
  29. #include <asm/cacheflush.h>
  30. #include <asm/cputype.h>
  31. #include <asm/current.h>
  32. #include <asm/hw_breakpoint.h>
  33. #include <asm/kdebug.h>
  34. #include <asm/system.h>
  35. #include <asm/traps.h>
  36. /* Breakpoint currently in use for each BRP. */
  37. static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
  38. /* Watchpoint currently in use for each WRP. */
  39. static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
  40. /* Number of BRP/WRP registers on this CPU. */
  41. static int core_num_brps;
  42. static int core_num_reserved_brps;
  43. static int core_num_wrps;
  44. /* Debug architecture version. */
  45. static u8 debug_arch;
  46. /* Maximum supported watchpoint length. */
  47. static u8 max_watchpoint_len;
  48. #define READ_WB_REG_CASE(OP2, M, VAL) \
  49. case ((OP2 << 4) + M): \
  50. ARM_DBG_READ(c ## M, OP2, VAL); \
  51. break
  52. #define WRITE_WB_REG_CASE(OP2, M, VAL) \
  53. case ((OP2 << 4) + M): \
  54. ARM_DBG_WRITE(c ## M, OP2, VAL);\
  55. break
  56. #define GEN_READ_WB_REG_CASES(OP2, VAL) \
  57. READ_WB_REG_CASE(OP2, 0, VAL); \
  58. READ_WB_REG_CASE(OP2, 1, VAL); \
  59. READ_WB_REG_CASE(OP2, 2, VAL); \
  60. READ_WB_REG_CASE(OP2, 3, VAL); \
  61. READ_WB_REG_CASE(OP2, 4, VAL); \
  62. READ_WB_REG_CASE(OP2, 5, VAL); \
  63. READ_WB_REG_CASE(OP2, 6, VAL); \
  64. READ_WB_REG_CASE(OP2, 7, VAL); \
  65. READ_WB_REG_CASE(OP2, 8, VAL); \
  66. READ_WB_REG_CASE(OP2, 9, VAL); \
  67. READ_WB_REG_CASE(OP2, 10, VAL); \
  68. READ_WB_REG_CASE(OP2, 11, VAL); \
  69. READ_WB_REG_CASE(OP2, 12, VAL); \
  70. READ_WB_REG_CASE(OP2, 13, VAL); \
  71. READ_WB_REG_CASE(OP2, 14, VAL); \
  72. READ_WB_REG_CASE(OP2, 15, VAL)
  73. #define GEN_WRITE_WB_REG_CASES(OP2, VAL) \
  74. WRITE_WB_REG_CASE(OP2, 0, VAL); \
  75. WRITE_WB_REG_CASE(OP2, 1, VAL); \
  76. WRITE_WB_REG_CASE(OP2, 2, VAL); \
  77. WRITE_WB_REG_CASE(OP2, 3, VAL); \
  78. WRITE_WB_REG_CASE(OP2, 4, VAL); \
  79. WRITE_WB_REG_CASE(OP2, 5, VAL); \
  80. WRITE_WB_REG_CASE(OP2, 6, VAL); \
  81. WRITE_WB_REG_CASE(OP2, 7, VAL); \
  82. WRITE_WB_REG_CASE(OP2, 8, VAL); \
  83. WRITE_WB_REG_CASE(OP2, 9, VAL); \
  84. WRITE_WB_REG_CASE(OP2, 10, VAL); \
  85. WRITE_WB_REG_CASE(OP2, 11, VAL); \
  86. WRITE_WB_REG_CASE(OP2, 12, VAL); \
  87. WRITE_WB_REG_CASE(OP2, 13, VAL); \
  88. WRITE_WB_REG_CASE(OP2, 14, VAL); \
  89. WRITE_WB_REG_CASE(OP2, 15, VAL)
  90. static u32 read_wb_reg(int n)
  91. {
  92. u32 val = 0;
  93. switch (n) {
  94. GEN_READ_WB_REG_CASES(ARM_OP2_BVR, val);
  95. GEN_READ_WB_REG_CASES(ARM_OP2_BCR, val);
  96. GEN_READ_WB_REG_CASES(ARM_OP2_WVR, val);
  97. GEN_READ_WB_REG_CASES(ARM_OP2_WCR, val);
  98. default:
  99. pr_warning("attempt to read from unknown breakpoint "
  100. "register %d\n", n);
  101. }
  102. return val;
  103. }
  104. static void write_wb_reg(int n, u32 val)
  105. {
  106. switch (n) {
  107. GEN_WRITE_WB_REG_CASES(ARM_OP2_BVR, val);
  108. GEN_WRITE_WB_REG_CASES(ARM_OP2_BCR, val);
  109. GEN_WRITE_WB_REG_CASES(ARM_OP2_WVR, val);
  110. GEN_WRITE_WB_REG_CASES(ARM_OP2_WCR, val);
  111. default:
  112. pr_warning("attempt to write to unknown breakpoint "
  113. "register %d\n", n);
  114. }
  115. isb();
  116. }
  117. /* Determine debug architecture. */
  118. static u8 get_debug_arch(void)
  119. {
  120. u32 didr;
  121. /* Do we implement the extended CPUID interface? */
  122. if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
  123. pr_warning("CPUID feature registers not supported. "
  124. "Assuming v6 debug is present.\n");
  125. return ARM_DEBUG_ARCH_V6;
  126. }
  127. ARM_DBG_READ(c0, 0, didr);
  128. return (didr >> 16) & 0xf;
  129. }
  130. u8 arch_get_debug_arch(void)
  131. {
  132. return debug_arch;
  133. }
  134. /* Determine number of BRP register available. */
  135. static int get_num_brp_resources(void)
  136. {
  137. u32 didr;
  138. ARM_DBG_READ(c0, 0, didr);
  139. return ((didr >> 24) & 0xf) + 1;
  140. }
  141. /* Does this core support mismatch breakpoints? */
  142. static int core_has_mismatch_brps(void)
  143. {
  144. return (get_debug_arch() >= ARM_DEBUG_ARCH_V7_ECP14 &&
  145. get_num_brp_resources() > 1);
  146. }
  147. /* Determine number of usable WRPs available. */
  148. static int get_num_wrps(void)
  149. {
  150. /*
  151. * FIXME: When a watchpoint fires, the only way to work out which
  152. * watchpoint it was is by disassembling the faulting instruction
  153. * and working out the address of the memory access.
  154. *
  155. * Furthermore, we can only do this if the watchpoint was precise
  156. * since imprecise watchpoints prevent us from calculating register
  157. * based addresses.
  158. *
  159. * Providing we have more than 1 breakpoint register, we only report
  160. * a single watchpoint register for the time being. This way, we always
  161. * know which watchpoint fired. In the future we can either add a
  162. * disassembler and address generation emulator, or we can insert a
  163. * check to see if the DFAR is set on watchpoint exception entry
  164. * [the ARM ARM states that the DFAR is UNKNOWN, but experience shows
  165. * that it is set on some implementations].
  166. */
  167. #if 0
  168. int wrps;
  169. u32 didr;
  170. ARM_DBG_READ(c0, 0, didr);
  171. wrps = ((didr >> 28) & 0xf) + 1;
  172. #endif
  173. int wrps = 1;
  174. if (core_has_mismatch_brps() && wrps >= get_num_brp_resources())
  175. wrps = get_num_brp_resources() - 1;
  176. return wrps;
  177. }
  178. /* We reserve one breakpoint for each watchpoint. */
  179. static int get_num_reserved_brps(void)
  180. {
  181. if (core_has_mismatch_brps())
  182. return get_num_wrps();
  183. return 0;
  184. }
  185. /* Determine number of usable BRPs available. */
  186. static int get_num_brps(void)
  187. {
  188. int brps = get_num_brp_resources();
  189. if (core_has_mismatch_brps())
  190. brps -= get_num_reserved_brps();
  191. return brps;
  192. }
  193. /*
  194. * In order to access the breakpoint/watchpoint control registers,
  195. * we must be running in debug monitor mode. Unfortunately, we can
  196. * be put into halting debug mode at any time by an external debugger
  197. * but there is nothing we can do to prevent that.
  198. */
  199. static int enable_monitor_mode(void)
  200. {
  201. u32 dscr;
  202. int ret = 0;
  203. ARM_DBG_READ(c1, 0, dscr);
  204. /* Ensure that halting mode is disabled. */
  205. if (WARN_ONCE(dscr & ARM_DSCR_HDBGEN, "halting debug mode enabled."
  206. "Unable to access hardware resources.")) {
  207. ret = -EPERM;
  208. goto out;
  209. }
  210. /* If monitor mode is already enabled, just return. */
  211. if (dscr & ARM_DSCR_MDBGEN)
  212. goto out;
  213. /* Write to the corresponding DSCR. */
  214. switch (get_debug_arch()) {
  215. case ARM_DEBUG_ARCH_V6:
  216. case ARM_DEBUG_ARCH_V6_1:
  217. ARM_DBG_WRITE(c1, 0, (dscr | ARM_DSCR_MDBGEN));
  218. break;
  219. case ARM_DEBUG_ARCH_V7_ECP14:
  220. ARM_DBG_WRITE(c2, 2, (dscr | ARM_DSCR_MDBGEN));
  221. break;
  222. default:
  223. ret = -ENODEV;
  224. goto out;
  225. }
  226. /* Check that the write made it through. */
  227. ARM_DBG_READ(c1, 0, dscr);
  228. if (!(dscr & ARM_DSCR_MDBGEN))
  229. ret = -EPERM;
  230. out:
  231. return ret;
  232. }
  233. int hw_breakpoint_slots(int type)
  234. {
  235. /*
  236. * We can be called early, so don't rely on
  237. * our static variables being initialised.
  238. */
  239. switch (type) {
  240. case TYPE_INST:
  241. return get_num_brps();
  242. case TYPE_DATA:
  243. return get_num_wrps();
  244. default:
  245. pr_warning("unknown slot type: %d\n", type);
  246. return 0;
  247. }
  248. }
  249. /*
  250. * Check if 8-bit byte-address select is available.
  251. * This clobbers WRP 0.
  252. */
  253. static u8 get_max_wp_len(void)
  254. {
  255. u32 ctrl_reg;
  256. struct arch_hw_breakpoint_ctrl ctrl;
  257. u8 size = 4;
  258. if (debug_arch < ARM_DEBUG_ARCH_V7_ECP14)
  259. goto out;
  260. memset(&ctrl, 0, sizeof(ctrl));
  261. ctrl.len = ARM_BREAKPOINT_LEN_8;
  262. ctrl_reg = encode_ctrl_reg(ctrl);
  263. write_wb_reg(ARM_BASE_WVR, 0);
  264. write_wb_reg(ARM_BASE_WCR, ctrl_reg);
  265. if ((read_wb_reg(ARM_BASE_WCR) & ctrl_reg) == ctrl_reg)
  266. size = 8;
  267. out:
  268. return size;
  269. }
  270. u8 arch_get_max_wp_len(void)
  271. {
  272. return max_watchpoint_len;
  273. }
  274. /*
  275. * Install a perf counter breakpoint.
  276. */
  277. int arch_install_hw_breakpoint(struct perf_event *bp)
  278. {
  279. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  280. struct perf_event **slot, **slots;
  281. int i, max_slots, ctrl_base, val_base, ret = 0;
  282. u32 addr, ctrl;
  283. /* Ensure that we are in monitor mode and halting mode is disabled. */
  284. ret = enable_monitor_mode();
  285. if (ret)
  286. goto out;
  287. addr = info->address;
  288. ctrl = encode_ctrl_reg(info->ctrl) | 0x1;
  289. if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
  290. /* Breakpoint */
  291. ctrl_base = ARM_BASE_BCR;
  292. val_base = ARM_BASE_BVR;
  293. slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
  294. max_slots = core_num_brps;
  295. if (info->step_ctrl.enabled) {
  296. /* Override the breakpoint data with the step data. */
  297. addr = info->trigger & ~0x3;
  298. ctrl = encode_ctrl_reg(info->step_ctrl);
  299. }
  300. } else {
  301. /* Watchpoint */
  302. if (info->step_ctrl.enabled) {
  303. /* Install into the reserved breakpoint region. */
  304. ctrl_base = ARM_BASE_BCR + core_num_brps;
  305. val_base = ARM_BASE_BVR + core_num_brps;
  306. /* Override the watchpoint data with the step data. */
  307. addr = info->trigger & ~0x3;
  308. ctrl = encode_ctrl_reg(info->step_ctrl);
  309. } else {
  310. ctrl_base = ARM_BASE_WCR;
  311. val_base = ARM_BASE_WVR;
  312. }
  313. slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
  314. max_slots = core_num_wrps;
  315. }
  316. for (i = 0; i < max_slots; ++i) {
  317. slot = &slots[i];
  318. if (!*slot) {
  319. *slot = bp;
  320. break;
  321. }
  322. }
  323. if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot")) {
  324. ret = -EBUSY;
  325. goto out;
  326. }
  327. /* Setup the address register. */
  328. write_wb_reg(val_base + i, addr);
  329. /* Setup the control register. */
  330. write_wb_reg(ctrl_base + i, ctrl);
  331. out:
  332. return ret;
  333. }
  334. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  335. {
  336. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  337. struct perf_event **slot, **slots;
  338. int i, max_slots, base;
  339. if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
  340. /* Breakpoint */
  341. base = ARM_BASE_BCR;
  342. slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
  343. max_slots = core_num_brps;
  344. } else {
  345. /* Watchpoint */
  346. if (info->step_ctrl.enabled)
  347. base = ARM_BASE_BCR + core_num_brps;
  348. else
  349. base = ARM_BASE_WCR;
  350. slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
  351. max_slots = core_num_wrps;
  352. }
  353. /* Remove the breakpoint. */
  354. for (i = 0; i < max_slots; ++i) {
  355. slot = &slots[i];
  356. if (*slot == bp) {
  357. *slot = NULL;
  358. break;
  359. }
  360. }
  361. if (WARN_ONCE(i == max_slots, "Can't find any breakpoint slot"))
  362. return;
  363. /* Reset the control register. */
  364. write_wb_reg(base + i, 0);
  365. }
  366. static int get_hbp_len(u8 hbp_len)
  367. {
  368. unsigned int len_in_bytes = 0;
  369. switch (hbp_len) {
  370. case ARM_BREAKPOINT_LEN_1:
  371. len_in_bytes = 1;
  372. break;
  373. case ARM_BREAKPOINT_LEN_2:
  374. len_in_bytes = 2;
  375. break;
  376. case ARM_BREAKPOINT_LEN_4:
  377. len_in_bytes = 4;
  378. break;
  379. case ARM_BREAKPOINT_LEN_8:
  380. len_in_bytes = 8;
  381. break;
  382. }
  383. return len_in_bytes;
  384. }
  385. /*
  386. * Check whether bp virtual address is in kernel space.
  387. */
  388. int arch_check_bp_in_kernelspace(struct perf_event *bp)
  389. {
  390. unsigned int len;
  391. unsigned long va;
  392. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  393. va = info->address;
  394. len = get_hbp_len(info->ctrl.len);
  395. return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
  396. }
  397. /*
  398. * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
  399. * Hopefully this will disappear when ptrace can bypass the conversion
  400. * to generic breakpoint descriptions.
  401. */
  402. int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
  403. int *gen_len, int *gen_type)
  404. {
  405. /* Type */
  406. switch (ctrl.type) {
  407. case ARM_BREAKPOINT_EXECUTE:
  408. *gen_type = HW_BREAKPOINT_X;
  409. break;
  410. case ARM_BREAKPOINT_LOAD:
  411. *gen_type = HW_BREAKPOINT_R;
  412. break;
  413. case ARM_BREAKPOINT_STORE:
  414. *gen_type = HW_BREAKPOINT_W;
  415. break;
  416. case ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE:
  417. *gen_type = HW_BREAKPOINT_RW;
  418. break;
  419. default:
  420. return -EINVAL;
  421. }
  422. /* Len */
  423. switch (ctrl.len) {
  424. case ARM_BREAKPOINT_LEN_1:
  425. *gen_len = HW_BREAKPOINT_LEN_1;
  426. break;
  427. case ARM_BREAKPOINT_LEN_2:
  428. *gen_len = HW_BREAKPOINT_LEN_2;
  429. break;
  430. case ARM_BREAKPOINT_LEN_4:
  431. *gen_len = HW_BREAKPOINT_LEN_4;
  432. break;
  433. case ARM_BREAKPOINT_LEN_8:
  434. *gen_len = HW_BREAKPOINT_LEN_8;
  435. break;
  436. default:
  437. return -EINVAL;
  438. }
  439. return 0;
  440. }
  441. /*
  442. * Construct an arch_hw_breakpoint from a perf_event.
  443. */
  444. static int arch_build_bp_info(struct perf_event *bp)
  445. {
  446. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  447. /* Type */
  448. switch (bp->attr.bp_type) {
  449. case HW_BREAKPOINT_X:
  450. info->ctrl.type = ARM_BREAKPOINT_EXECUTE;
  451. break;
  452. case HW_BREAKPOINT_R:
  453. info->ctrl.type = ARM_BREAKPOINT_LOAD;
  454. break;
  455. case HW_BREAKPOINT_W:
  456. info->ctrl.type = ARM_BREAKPOINT_STORE;
  457. break;
  458. case HW_BREAKPOINT_RW:
  459. info->ctrl.type = ARM_BREAKPOINT_LOAD | ARM_BREAKPOINT_STORE;
  460. break;
  461. default:
  462. return -EINVAL;
  463. }
  464. /* Len */
  465. switch (bp->attr.bp_len) {
  466. case HW_BREAKPOINT_LEN_1:
  467. info->ctrl.len = ARM_BREAKPOINT_LEN_1;
  468. break;
  469. case HW_BREAKPOINT_LEN_2:
  470. info->ctrl.len = ARM_BREAKPOINT_LEN_2;
  471. break;
  472. case HW_BREAKPOINT_LEN_4:
  473. info->ctrl.len = ARM_BREAKPOINT_LEN_4;
  474. break;
  475. case HW_BREAKPOINT_LEN_8:
  476. info->ctrl.len = ARM_BREAKPOINT_LEN_8;
  477. if ((info->ctrl.type != ARM_BREAKPOINT_EXECUTE)
  478. && max_watchpoint_len >= 8)
  479. break;
  480. default:
  481. return -EINVAL;
  482. }
  483. /*
  484. * Breakpoints must be of length 2 (thumb) or 4 (ARM) bytes.
  485. * Watchpoints can be of length 1, 2, 4 or 8 bytes if supported
  486. * by the hardware and must be aligned to the appropriate number of
  487. * bytes.
  488. */
  489. if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE &&
  490. info->ctrl.len != ARM_BREAKPOINT_LEN_2 &&
  491. info->ctrl.len != ARM_BREAKPOINT_LEN_4)
  492. return -EINVAL;
  493. /* Address */
  494. info->address = bp->attr.bp_addr;
  495. /* Privilege */
  496. info->ctrl.privilege = ARM_BREAKPOINT_USER;
  497. if (arch_check_bp_in_kernelspace(bp))
  498. info->ctrl.privilege |= ARM_BREAKPOINT_PRIV;
  499. /* Enabled? */
  500. info->ctrl.enabled = !bp->attr.disabled;
  501. /* Mismatch */
  502. info->ctrl.mismatch = 0;
  503. return 0;
  504. }
  505. /*
  506. * Validate the arch-specific HW Breakpoint register settings.
  507. */
  508. int arch_validate_hwbkpt_settings(struct perf_event *bp)
  509. {
  510. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  511. int ret = 0;
  512. u32 offset, alignment_mask = 0x3;
  513. /* Build the arch_hw_breakpoint. */
  514. ret = arch_build_bp_info(bp);
  515. if (ret)
  516. goto out;
  517. /* Check address alignment. */
  518. if (info->ctrl.len == ARM_BREAKPOINT_LEN_8)
  519. alignment_mask = 0x7;
  520. offset = info->address & alignment_mask;
  521. switch (offset) {
  522. case 0:
  523. /* Aligned */
  524. break;
  525. case 1:
  526. /* Allow single byte watchpoint. */
  527. if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
  528. break;
  529. case 2:
  530. /* Allow halfword watchpoints and breakpoints. */
  531. if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
  532. break;
  533. default:
  534. ret = -EINVAL;
  535. goto out;
  536. }
  537. info->address &= ~alignment_mask;
  538. info->ctrl.len <<= offset;
  539. /*
  540. * Currently we rely on an overflow handler to take
  541. * care of single-stepping the breakpoint when it fires.
  542. * In the case of userspace breakpoints on a core with V7 debug,
  543. * we can use the mismatch feature as a poor-man's hardware
  544. * single-step, but this only works for per-task breakpoints.
  545. */
  546. if (WARN_ONCE(!bp->overflow_handler &&
  547. (arch_check_bp_in_kernelspace(bp) || !core_has_mismatch_brps()
  548. || !bp->hw.bp_target),
  549. "overflow handler required but none found")) {
  550. ret = -EINVAL;
  551. }
  552. out:
  553. return ret;
  554. }
  555. /*
  556. * Enable/disable single-stepping over the breakpoint bp at address addr.
  557. */
  558. static void enable_single_step(struct perf_event *bp, u32 addr)
  559. {
  560. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  561. arch_uninstall_hw_breakpoint(bp);
  562. info->step_ctrl.mismatch = 1;
  563. info->step_ctrl.len = ARM_BREAKPOINT_LEN_4;
  564. info->step_ctrl.type = ARM_BREAKPOINT_EXECUTE;
  565. info->step_ctrl.privilege = info->ctrl.privilege;
  566. info->step_ctrl.enabled = 1;
  567. info->trigger = addr;
  568. arch_install_hw_breakpoint(bp);
  569. }
  570. static void disable_single_step(struct perf_event *bp)
  571. {
  572. arch_uninstall_hw_breakpoint(bp);
  573. counter_arch_bp(bp)->step_ctrl.enabled = 0;
  574. arch_install_hw_breakpoint(bp);
  575. }
  576. static void watchpoint_handler(unsigned long unknown, struct pt_regs *regs)
  577. {
  578. int i;
  579. struct perf_event *wp, **slots;
  580. struct arch_hw_breakpoint *info;
  581. slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
  582. /* Without a disassembler, we can only handle 1 watchpoint. */
  583. BUG_ON(core_num_wrps > 1);
  584. for (i = 0; i < core_num_wrps; ++i) {
  585. rcu_read_lock();
  586. wp = slots[i];
  587. if (wp == NULL) {
  588. rcu_read_unlock();
  589. continue;
  590. }
  591. /*
  592. * The DFAR is an unknown value. Since we only allow a
  593. * single watchpoint, we can set the trigger to the lowest
  594. * possible faulting address.
  595. */
  596. info = counter_arch_bp(wp);
  597. info->trigger = wp->attr.bp_addr;
  598. pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
  599. perf_bp_event(wp, regs);
  600. /*
  601. * If no overflow handler is present, insert a temporary
  602. * mismatch breakpoint so we can single-step over the
  603. * watchpoint trigger.
  604. */
  605. if (!wp->overflow_handler)
  606. enable_single_step(wp, instruction_pointer(regs));
  607. rcu_read_unlock();
  608. }
  609. }
  610. static void watchpoint_single_step_handler(unsigned long pc)
  611. {
  612. int i;
  613. struct perf_event *wp, **slots;
  614. struct arch_hw_breakpoint *info;
  615. slots = (struct perf_event **)__get_cpu_var(wp_on_reg);
  616. for (i = 0; i < core_num_reserved_brps; ++i) {
  617. rcu_read_lock();
  618. wp = slots[i];
  619. if (wp == NULL)
  620. goto unlock;
  621. info = counter_arch_bp(wp);
  622. if (!info->step_ctrl.enabled)
  623. goto unlock;
  624. /*
  625. * Restore the original watchpoint if we've completed the
  626. * single-step.
  627. */
  628. if (info->trigger != pc)
  629. disable_single_step(wp);
  630. unlock:
  631. rcu_read_unlock();
  632. }
  633. }
  634. static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
  635. {
  636. int i;
  637. u32 ctrl_reg, val, addr;
  638. struct perf_event *bp, **slots;
  639. struct arch_hw_breakpoint *info;
  640. struct arch_hw_breakpoint_ctrl ctrl;
  641. slots = (struct perf_event **)__get_cpu_var(bp_on_reg);
  642. /* The exception entry code places the amended lr in the PC. */
  643. addr = regs->ARM_pc;
  644. /* Check the currently installed breakpoints first. */
  645. for (i = 0; i < core_num_brps; ++i) {
  646. rcu_read_lock();
  647. bp = slots[i];
  648. if (bp == NULL)
  649. goto unlock;
  650. info = counter_arch_bp(bp);
  651. /* Check if the breakpoint value matches. */
  652. val = read_wb_reg(ARM_BASE_BVR + i);
  653. if (val != (addr & ~0x3))
  654. goto mismatch;
  655. /* Possible match, check the byte address select to confirm. */
  656. ctrl_reg = read_wb_reg(ARM_BASE_BCR + i);
  657. decode_ctrl_reg(ctrl_reg, &ctrl);
  658. if ((1 << (addr & 0x3)) & ctrl.len) {
  659. info->trigger = addr;
  660. pr_debug("breakpoint fired: address = 0x%x\n", addr);
  661. perf_bp_event(bp, regs);
  662. if (!bp->overflow_handler)
  663. enable_single_step(bp, addr);
  664. goto unlock;
  665. }
  666. mismatch:
  667. /* If we're stepping a breakpoint, it can now be restored. */
  668. if (info->step_ctrl.enabled)
  669. disable_single_step(bp);
  670. unlock:
  671. rcu_read_unlock();
  672. }
  673. /* Handle any pending watchpoint single-step breakpoints. */
  674. watchpoint_single_step_handler(addr);
  675. }
  676. /*
  677. * Called from either the Data Abort Handler [watchpoint] or the
  678. * Prefetch Abort Handler [breakpoint] with preemption disabled.
  679. */
  680. static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
  681. struct pt_regs *regs)
  682. {
  683. int ret = 0;
  684. u32 dscr;
  685. /* We must be called with preemption disabled. */
  686. WARN_ON(preemptible());
  687. /* We only handle watchpoints and hardware breakpoints. */
  688. ARM_DBG_READ(c1, 0, dscr);
  689. /* Perform perf callbacks. */
  690. switch (ARM_DSCR_MOE(dscr)) {
  691. case ARM_ENTRY_BREAKPOINT:
  692. breakpoint_handler(addr, regs);
  693. break;
  694. case ARM_ENTRY_ASYNC_WATCHPOINT:
  695. WARN(1, "Asynchronous watchpoint exception taken. Debugging results may be unreliable\n");
  696. case ARM_ENTRY_SYNC_WATCHPOINT:
  697. watchpoint_handler(addr, regs);
  698. break;
  699. default:
  700. ret = 1; /* Unhandled fault. */
  701. }
  702. /*
  703. * Re-enable preemption after it was disabled in the
  704. * low-level exception handling code.
  705. */
  706. preempt_enable();
  707. return ret;
  708. }
  709. /*
  710. * One-time initialisation.
  711. */
  712. static void reset_ctrl_regs(void *unused)
  713. {
  714. int i;
  715. /*
  716. * v7 debug contains save and restore registers so that debug state
  717. * can be maintained across low-power modes without leaving
  718. * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
  719. * we can write to the debug registers out of reset, so we must
  720. * unlock the OS Lock Access Register to avoid taking undefined
  721. * instruction exceptions later on.
  722. */
  723. if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
  724. /*
  725. * Unconditionally clear the lock by writing a value
  726. * other than 0xC5ACCE55 to the access register.
  727. */
  728. asm volatile("mcr p14, 0, %0, c1, c0, 4" : : "r" (0));
  729. isb();
  730. }
  731. if (enable_monitor_mode())
  732. return;
  733. /* We must also reset any reserved registers. */
  734. for (i = 0; i < core_num_brps + core_num_reserved_brps; ++i) {
  735. write_wb_reg(ARM_BASE_BCR + i, 0UL);
  736. write_wb_reg(ARM_BASE_BVR + i, 0UL);
  737. }
  738. for (i = 0; i < core_num_wrps; ++i) {
  739. write_wb_reg(ARM_BASE_WCR + i, 0UL);
  740. write_wb_reg(ARM_BASE_WVR + i, 0UL);
  741. }
  742. }
  743. static int __cpuinit dbg_reset_notify(struct notifier_block *self,
  744. unsigned long action, void *cpu)
  745. {
  746. if (action == CPU_ONLINE)
  747. smp_call_function_single((int)cpu, reset_ctrl_regs, NULL, 1);
  748. return NOTIFY_OK;
  749. }
  750. static struct notifier_block __cpuinitdata dbg_reset_nb = {
  751. .notifier_call = dbg_reset_notify,
  752. };
  753. static int __init arch_hw_breakpoint_init(void)
  754. {
  755. u32 dscr;
  756. debug_arch = get_debug_arch();
  757. if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
  758. pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
  759. return 0;
  760. }
  761. /* Determine how many BRPs/WRPs are available. */
  762. core_num_brps = get_num_brps();
  763. core_num_reserved_brps = get_num_reserved_brps();
  764. core_num_wrps = get_num_wrps();
  765. pr_info("found %d breakpoint and %d watchpoint registers.\n",
  766. core_num_brps + core_num_reserved_brps, core_num_wrps);
  767. if (core_num_reserved_brps)
  768. pr_info("%d breakpoint(s) reserved for watchpoint "
  769. "single-step.\n", core_num_reserved_brps);
  770. ARM_DBG_READ(c1, 0, dscr);
  771. if (dscr & ARM_DSCR_HDBGEN) {
  772. pr_warning("halting debug mode enabled. Assuming maximum "
  773. "watchpoint size of 4 bytes.");
  774. } else {
  775. /*
  776. * Reset the breakpoint resources. We assume that a halting
  777. * debugger will leave the world in a nice state for us.
  778. */
  779. smp_call_function(reset_ctrl_regs, NULL, 1);
  780. reset_ctrl_regs(NULL);
  781. /* Work out the maximum supported watchpoint length. */
  782. max_watchpoint_len = get_max_wp_len();
  783. pr_info("maximum watchpoint size is %u bytes.\n",
  784. max_watchpoint_len);
  785. }
  786. /* Register debug fault handler. */
  787. hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
  788. "watchpoint debug exception");
  789. hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT,
  790. "breakpoint debug exception");
  791. /* Register hotplug notifier. */
  792. register_cpu_notifier(&dbg_reset_nb);
  793. return 0;
  794. }
  795. arch_initcall(arch_hw_breakpoint_init);
  796. void hw_breakpoint_pmu_read(struct perf_event *bp)
  797. {
  798. }
  799. /*
  800. * Dummy function to register with die_notifier.
  801. */
  802. int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
  803. unsigned long val, void *data)
  804. {
  805. return NOTIFY_DONE;
  806. }