kprobes.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581
  1. /*
  2. * Kernel Probes (KProbes)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2002, 2004
  19. *
  20. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  21. * Probes initial implementation ( includes contributions from
  22. * Rusty Russell).
  23. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  24. * interface to access function arguments.
  25. * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
  26. * for PPC64
  27. */
  28. #include <linux/kprobes.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/preempt.h>
  31. #include <linux/module.h>
  32. #include <linux/kdebug.h>
  33. #include <asm/cacheflush.h>
  34. #include <asm/sstep.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/system.h>
  37. #ifdef CONFIG_BOOKE
  38. #define MSR_SINGLESTEP (MSR_DE)
  39. #else
  40. #define MSR_SINGLESTEP (MSR_SE)
  41. #endif
  42. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  43. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  44. struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  45. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  46. {
  47. int ret = 0;
  48. kprobe_opcode_t insn = *p->addr;
  49. if ((unsigned long)p->addr & 0x03) {
  50. printk("Attempt to register kprobe at an unaligned address\n");
  51. ret = -EINVAL;
  52. } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
  53. printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
  54. ret = -EINVAL;
  55. }
  56. /* insn must be on a special executable page on ppc64. This is
  57. * not explicitly required on ppc32 (right now), but it doesn't hurt */
  58. if (!ret) {
  59. p->ainsn.insn = get_insn_slot();
  60. if (!p->ainsn.insn)
  61. ret = -ENOMEM;
  62. }
  63. if (!ret) {
  64. memcpy(p->ainsn.insn, p->addr,
  65. MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  66. p->opcode = *p->addr;
  67. flush_icache_range((unsigned long)p->ainsn.insn,
  68. (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
  69. }
  70. p->ainsn.boostable = 0;
  71. return ret;
  72. }
  73. void __kprobes arch_arm_kprobe(struct kprobe *p)
  74. {
  75. *p->addr = BREAKPOINT_INSTRUCTION;
  76. flush_icache_range((unsigned long) p->addr,
  77. (unsigned long) p->addr + sizeof(kprobe_opcode_t));
  78. }
  79. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  80. {
  81. *p->addr = p->opcode;
  82. flush_icache_range((unsigned long) p->addr,
  83. (unsigned long) p->addr + sizeof(kprobe_opcode_t));
  84. }
  85. void __kprobes arch_remove_kprobe(struct kprobe *p)
  86. {
  87. if (p->ainsn.insn) {
  88. free_insn_slot(p->ainsn.insn, 0);
  89. p->ainsn.insn = NULL;
  90. }
  91. }
  92. static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
  93. {
  94. /* We turn off async exceptions to ensure that the single step will
  95. * be for the instruction we have the kprobe on, if we dont its
  96. * possible we'd get the single step reported for an exception handler
  97. * like Decrementer or External Interrupt */
  98. regs->msr &= ~MSR_EE;
  99. regs->msr |= MSR_SINGLESTEP;
  100. #ifdef CONFIG_BOOKE
  101. regs->msr &= ~MSR_CE;
  102. mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
  103. #endif
  104. /*
  105. * On powerpc we should single step on the original
  106. * instruction even if the probed insn is a trap
  107. * variant as values in regs could play a part in
  108. * if the trap is taken or not
  109. */
  110. regs->nip = (unsigned long)p->ainsn.insn;
  111. }
  112. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  113. {
  114. kcb->prev_kprobe.kp = kprobe_running();
  115. kcb->prev_kprobe.status = kcb->kprobe_status;
  116. kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
  117. }
  118. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  119. {
  120. __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
  121. kcb->kprobe_status = kcb->prev_kprobe.status;
  122. kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
  123. }
  124. static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  125. struct kprobe_ctlblk *kcb)
  126. {
  127. __get_cpu_var(current_kprobe) = p;
  128. kcb->kprobe_saved_msr = regs->msr;
  129. }
  130. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  131. struct pt_regs *regs)
  132. {
  133. ri->ret_addr = (kprobe_opcode_t *)regs->link;
  134. /* Replace the return addr with trampoline addr */
  135. regs->link = (unsigned long)kretprobe_trampoline;
  136. }
  137. static int __kprobes kprobe_handler(struct pt_regs *regs)
  138. {
  139. struct kprobe *p;
  140. int ret = 0;
  141. unsigned int *addr = (unsigned int *)regs->nip;
  142. struct kprobe_ctlblk *kcb;
  143. /*
  144. * We don't want to be preempted for the entire
  145. * duration of kprobe processing
  146. */
  147. preempt_disable();
  148. kcb = get_kprobe_ctlblk();
  149. /* Check we're not actually recursing */
  150. if (kprobe_running()) {
  151. p = get_kprobe(addr);
  152. if (p) {
  153. kprobe_opcode_t insn = *p->ainsn.insn;
  154. if (kcb->kprobe_status == KPROBE_HIT_SS &&
  155. is_trap(insn)) {
  156. /* Turn off 'trace' bits */
  157. regs->msr &= ~MSR_SINGLESTEP;
  158. regs->msr |= kcb->kprobe_saved_msr;
  159. goto no_kprobe;
  160. }
  161. /* We have reentered the kprobe_handler(), since
  162. * another probe was hit while within the handler.
  163. * We here save the original kprobes variables and
  164. * just single step on the instruction of the new probe
  165. * without calling any user handlers.
  166. */
  167. save_previous_kprobe(kcb);
  168. set_current_kprobe(p, regs, kcb);
  169. kcb->kprobe_saved_msr = regs->msr;
  170. kprobes_inc_nmissed_count(p);
  171. prepare_singlestep(p, regs);
  172. kcb->kprobe_status = KPROBE_REENTER;
  173. return 1;
  174. } else {
  175. if (*addr != BREAKPOINT_INSTRUCTION) {
  176. /* If trap variant, then it belongs not to us */
  177. kprobe_opcode_t cur_insn = *addr;
  178. if (is_trap(cur_insn))
  179. goto no_kprobe;
  180. /* The breakpoint instruction was removed by
  181. * another cpu right after we hit, no further
  182. * handling of this interrupt is appropriate
  183. */
  184. ret = 1;
  185. goto no_kprobe;
  186. }
  187. p = __get_cpu_var(current_kprobe);
  188. if (p->break_handler && p->break_handler(p, regs)) {
  189. goto ss_probe;
  190. }
  191. }
  192. goto no_kprobe;
  193. }
  194. p = get_kprobe(addr);
  195. if (!p) {
  196. if (*addr != BREAKPOINT_INSTRUCTION) {
  197. /*
  198. * PowerPC has multiple variants of the "trap"
  199. * instruction. If the current instruction is a
  200. * trap variant, it could belong to someone else
  201. */
  202. kprobe_opcode_t cur_insn = *addr;
  203. if (is_trap(cur_insn))
  204. goto no_kprobe;
  205. /*
  206. * The breakpoint instruction was removed right
  207. * after we hit it. Another cpu has removed
  208. * either a probepoint or a debugger breakpoint
  209. * at this address. In either case, no further
  210. * handling of this interrupt is appropriate.
  211. */
  212. ret = 1;
  213. }
  214. /* Not one of ours: let kernel handle it */
  215. goto no_kprobe;
  216. }
  217. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  218. set_current_kprobe(p, regs, kcb);
  219. if (p->pre_handler && p->pre_handler(p, regs))
  220. /* handler has already set things up, so skip ss setup */
  221. return 1;
  222. ss_probe:
  223. if (p->ainsn.boostable >= 0) {
  224. unsigned int insn = *p->ainsn.insn;
  225. /* regs->nip is also adjusted if emulate_step returns 1 */
  226. ret = emulate_step(regs, insn);
  227. if (ret > 0) {
  228. /*
  229. * Once this instruction has been boosted
  230. * successfully, set the boostable flag
  231. */
  232. if (unlikely(p->ainsn.boostable == 0))
  233. p->ainsn.boostable = 1;
  234. if (p->post_handler)
  235. p->post_handler(p, regs, 0);
  236. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  237. reset_current_kprobe();
  238. preempt_enable_no_resched();
  239. return 1;
  240. } else if (ret < 0) {
  241. /*
  242. * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
  243. * So, we should never get here... but, its still
  244. * good to catch them, just in case...
  245. */
  246. printk("Can't step on instruction %x\n", insn);
  247. BUG();
  248. } else if (ret == 0)
  249. /* This instruction can't be boosted */
  250. p->ainsn.boostable = -1;
  251. }
  252. prepare_singlestep(p, regs);
  253. kcb->kprobe_status = KPROBE_HIT_SS;
  254. return 1;
  255. no_kprobe:
  256. preempt_enable_no_resched();
  257. return ret;
  258. }
  259. /*
  260. * Function return probe trampoline:
  261. * - init_kprobes() establishes a probepoint here
  262. * - When the probed function returns, this probe
  263. * causes the handlers to fire
  264. */
  265. static void __used kretprobe_trampoline_holder(void)
  266. {
  267. asm volatile(".global kretprobe_trampoline\n"
  268. "kretprobe_trampoline:\n"
  269. "nop\n");
  270. }
  271. /*
  272. * Called when the probe at kretprobe trampoline is hit
  273. */
  274. static int __kprobes trampoline_probe_handler(struct kprobe *p,
  275. struct pt_regs *regs)
  276. {
  277. struct kretprobe_instance *ri = NULL;
  278. struct hlist_head *head, empty_rp;
  279. struct hlist_node *node, *tmp;
  280. unsigned long flags, orig_ret_address = 0;
  281. unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
  282. INIT_HLIST_HEAD(&empty_rp);
  283. kretprobe_hash_lock(current, &head, &flags);
  284. /*
  285. * It is possible to have multiple instances associated with a given
  286. * task either because an multiple functions in the call path
  287. * have a return probe installed on them, and/or more than one return
  288. * return probe was registered for a target function.
  289. *
  290. * We can handle this because:
  291. * - instances are always inserted at the head of the list
  292. * - when multiple return probes are registered for the same
  293. * function, the first instance's ret_addr will point to the
  294. * real return address, and all the rest will point to
  295. * kretprobe_trampoline
  296. */
  297. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  298. if (ri->task != current)
  299. /* another task is sharing our hash bucket */
  300. continue;
  301. if (ri->rp && ri->rp->handler)
  302. ri->rp->handler(ri, regs);
  303. orig_ret_address = (unsigned long)ri->ret_addr;
  304. recycle_rp_inst(ri, &empty_rp);
  305. if (orig_ret_address != trampoline_address)
  306. /*
  307. * This is the real return address. Any other
  308. * instances associated with this task are for
  309. * other calls deeper on the call stack
  310. */
  311. break;
  312. }
  313. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  314. regs->nip = orig_ret_address;
  315. reset_current_kprobe();
  316. kretprobe_hash_unlock(current, &flags);
  317. preempt_enable_no_resched();
  318. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  319. hlist_del(&ri->hlist);
  320. kfree(ri);
  321. }
  322. /*
  323. * By returning a non-zero value, we are telling
  324. * kprobe_handler() that we don't want the post_handler
  325. * to run (and have re-enabled preemption)
  326. */
  327. return 1;
  328. }
  329. /*
  330. * Called after single-stepping. p->addr is the address of the
  331. * instruction whose first byte has been replaced by the "breakpoint"
  332. * instruction. To avoid the SMP problems that can occur when we
  333. * temporarily put back the original opcode to single-step, we
  334. * single-stepped a copy of the instruction. The address of this
  335. * copy is p->ainsn.insn.
  336. */
  337. static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
  338. {
  339. int ret;
  340. unsigned int insn = *p->ainsn.insn;
  341. regs->nip = (unsigned long)p->addr;
  342. ret = emulate_step(regs, insn);
  343. if (ret == 0)
  344. regs->nip = (unsigned long)p->addr + 4;
  345. }
  346. static int __kprobes post_kprobe_handler(struct pt_regs *regs)
  347. {
  348. struct kprobe *cur = kprobe_running();
  349. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  350. if (!cur)
  351. return 0;
  352. /* make sure we got here for instruction we have a kprobe on */
  353. if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
  354. return 0;
  355. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  356. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  357. cur->post_handler(cur, regs, 0);
  358. }
  359. resume_execution(cur, regs);
  360. regs->msr |= kcb->kprobe_saved_msr;
  361. /*Restore back the original saved kprobes variables and continue. */
  362. if (kcb->kprobe_status == KPROBE_REENTER) {
  363. restore_previous_kprobe(kcb);
  364. goto out;
  365. }
  366. reset_current_kprobe();
  367. out:
  368. preempt_enable_no_resched();
  369. /*
  370. * if somebody else is singlestepping across a probe point, msr
  371. * will have DE/SE set, in which case, continue the remaining processing
  372. * of do_debug, as if this is not a probe hit.
  373. */
  374. if (regs->msr & MSR_SINGLESTEP)
  375. return 0;
  376. return 1;
  377. }
  378. int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  379. {
  380. struct kprobe *cur = kprobe_running();
  381. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  382. const struct exception_table_entry *entry;
  383. switch(kcb->kprobe_status) {
  384. case KPROBE_HIT_SS:
  385. case KPROBE_REENTER:
  386. /*
  387. * We are here because the instruction being single
  388. * stepped caused a page fault. We reset the current
  389. * kprobe and the nip points back to the probe address
  390. * and allow the page fault handler to continue as a
  391. * normal page fault.
  392. */
  393. regs->nip = (unsigned long)cur->addr;
  394. regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
  395. regs->msr |= kcb->kprobe_saved_msr;
  396. if (kcb->kprobe_status == KPROBE_REENTER)
  397. restore_previous_kprobe(kcb);
  398. else
  399. reset_current_kprobe();
  400. preempt_enable_no_resched();
  401. break;
  402. case KPROBE_HIT_ACTIVE:
  403. case KPROBE_HIT_SSDONE:
  404. /*
  405. * We increment the nmissed count for accounting,
  406. * we can also use npre/npostfault count for accouting
  407. * these specific fault cases.
  408. */
  409. kprobes_inc_nmissed_count(cur);
  410. /*
  411. * We come here because instructions in the pre/post
  412. * handler caused the page_fault, this could happen
  413. * if handler tries to access user space by
  414. * copy_from_user(), get_user() etc. Let the
  415. * user-specified handler try to fix it first.
  416. */
  417. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  418. return 1;
  419. /*
  420. * In case the user-specified fault handler returned
  421. * zero, try to fix up.
  422. */
  423. if ((entry = search_exception_tables(regs->nip)) != NULL) {
  424. regs->nip = entry->fixup;
  425. return 1;
  426. }
  427. /*
  428. * fixup_exception() could not handle it,
  429. * Let do_page_fault() fix it.
  430. */
  431. break;
  432. default:
  433. break;
  434. }
  435. return 0;
  436. }
  437. /*
  438. * Wrapper routine to for handling exceptions.
  439. */
  440. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  441. unsigned long val, void *data)
  442. {
  443. struct die_args *args = (struct die_args *)data;
  444. int ret = NOTIFY_DONE;
  445. if (args->regs && user_mode(args->regs))
  446. return ret;
  447. switch (val) {
  448. case DIE_BPT:
  449. if (kprobe_handler(args->regs))
  450. ret = NOTIFY_STOP;
  451. break;
  452. case DIE_SSTEP:
  453. if (post_kprobe_handler(args->regs))
  454. ret = NOTIFY_STOP;
  455. break;
  456. default:
  457. break;
  458. }
  459. return ret;
  460. }
  461. #ifdef CONFIG_PPC64
  462. unsigned long arch_deref_entry_point(void *entry)
  463. {
  464. return ((func_descr_t *)entry)->entry;
  465. }
  466. #endif
  467. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  468. {
  469. struct jprobe *jp = container_of(p, struct jprobe, kp);
  470. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  471. memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
  472. /* setup return addr to the jprobe handler routine */
  473. regs->nip = arch_deref_entry_point(jp->entry);
  474. #ifdef CONFIG_PPC64
  475. regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
  476. #endif
  477. return 1;
  478. }
  479. void __used __kprobes jprobe_return(void)
  480. {
  481. asm volatile("trap" ::: "memory");
  482. }
  483. static void __used __kprobes jprobe_return_end(void)
  484. {
  485. };
  486. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  487. {
  488. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  489. /*
  490. * FIXME - we should ideally be validating that we got here 'cos
  491. * of the "trap" in jprobe_return() above, before restoring the
  492. * saved regs...
  493. */
  494. memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
  495. preempt_enable_no_resched();
  496. return 1;
  497. }
  498. static struct kprobe trampoline_p = {
  499. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  500. .pre_handler = trampoline_probe_handler
  501. };
  502. int __init arch_init_kprobes(void)
  503. {
  504. return register_kprobe(&trampoline_p);
  505. }
  506. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  507. {
  508. if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
  509. return 1;
  510. return 0;
  511. }