kprobes.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * Kernel Probes (KProbes)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2002, 2004
  19. *
  20. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  21. * Probes initial implementation ( includes contributions from
  22. * Rusty Russell).
  23. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  24. * interface to access function arguments.
  25. * 2004-Nov Ananth N Mavinakayanahalli <ananth@in.ibm.com> kprobes port
  26. * for PPC64
  27. */
  28. #include <linux/kprobes.h>
  29. #include <linux/ptrace.h>
  30. #include <linux/preempt.h>
  31. #include <linux/module.h>
  32. #include <linux/kdebug.h>
  33. #include <linux/slab.h>
  34. #include <asm/cacheflush.h>
  35. #include <asm/sstep.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/system.h>
  38. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  39. #define MSR_SINGLESTEP (MSR_DE)
  40. #else
  41. #define MSR_SINGLESTEP (MSR_SE)
  42. #endif
  43. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  44. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  45. struct kretprobe_blackpoint kretprobe_blacklist[] = {{NULL, NULL}};
  46. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  47. {
  48. int ret = 0;
  49. kprobe_opcode_t insn = *p->addr;
  50. if ((unsigned long)p->addr & 0x03) {
  51. printk("Attempt to register kprobe at an unaligned address\n");
  52. ret = -EINVAL;
  53. } else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
  54. printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
  55. ret = -EINVAL;
  56. }
  57. /* insn must be on a special executable page on ppc64. This is
  58. * not explicitly required on ppc32 (right now), but it doesn't hurt */
  59. if (!ret) {
  60. p->ainsn.insn = get_insn_slot();
  61. if (!p->ainsn.insn)
  62. ret = -ENOMEM;
  63. }
  64. if (!ret) {
  65. memcpy(p->ainsn.insn, p->addr,
  66. MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
  67. p->opcode = *p->addr;
  68. flush_icache_range((unsigned long)p->ainsn.insn,
  69. (unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
  70. }
  71. p->ainsn.boostable = 0;
  72. return ret;
  73. }
  74. void __kprobes arch_arm_kprobe(struct kprobe *p)
  75. {
  76. *p->addr = BREAKPOINT_INSTRUCTION;
  77. flush_icache_range((unsigned long) p->addr,
  78. (unsigned long) p->addr + sizeof(kprobe_opcode_t));
  79. }
  80. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  81. {
  82. *p->addr = p->opcode;
  83. flush_icache_range((unsigned long) p->addr,
  84. (unsigned long) p->addr + sizeof(kprobe_opcode_t));
  85. }
  86. void __kprobes arch_remove_kprobe(struct kprobe *p)
  87. {
  88. if (p->ainsn.insn) {
  89. free_insn_slot(p->ainsn.insn, 0);
  90. p->ainsn.insn = NULL;
  91. }
  92. }
  93. static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
  94. {
  95. /* We turn off async exceptions to ensure that the single step will
  96. * be for the instruction we have the kprobe on, if we dont its
  97. * possible we'd get the single step reported for an exception handler
  98. * like Decrementer or External Interrupt */
  99. regs->msr &= ~MSR_EE;
  100. regs->msr |= MSR_SINGLESTEP;
  101. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  102. regs->msr &= ~MSR_CE;
  103. mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) | DBCR0_IC | DBCR0_IDM);
  104. #endif
  105. /*
  106. * On powerpc we should single step on the original
  107. * instruction even if the probed insn is a trap
  108. * variant as values in regs could play a part in
  109. * if the trap is taken or not
  110. */
  111. regs->nip = (unsigned long)p->ainsn.insn;
  112. }
  113. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  114. {
  115. kcb->prev_kprobe.kp = kprobe_running();
  116. kcb->prev_kprobe.status = kcb->kprobe_status;
  117. kcb->prev_kprobe.saved_msr = kcb->kprobe_saved_msr;
  118. }
  119. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  120. {
  121. __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
  122. kcb->kprobe_status = kcb->prev_kprobe.status;
  123. kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
  124. }
  125. static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
  126. struct kprobe_ctlblk *kcb)
  127. {
  128. __get_cpu_var(current_kprobe) = p;
  129. kcb->kprobe_saved_msr = regs->msr;
  130. }
  131. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  132. struct pt_regs *regs)
  133. {
  134. ri->ret_addr = (kprobe_opcode_t *)regs->link;
  135. /* Replace the return addr with trampoline addr */
  136. regs->link = (unsigned long)kretprobe_trampoline;
  137. }
  138. static int __kprobes kprobe_handler(struct pt_regs *regs)
  139. {
  140. struct kprobe *p;
  141. int ret = 0;
  142. unsigned int *addr = (unsigned int *)regs->nip;
  143. struct kprobe_ctlblk *kcb;
  144. /*
  145. * We don't want to be preempted for the entire
  146. * duration of kprobe processing
  147. */
  148. preempt_disable();
  149. kcb = get_kprobe_ctlblk();
  150. /* Check we're not actually recursing */
  151. if (kprobe_running()) {
  152. p = get_kprobe(addr);
  153. if (p) {
  154. kprobe_opcode_t insn = *p->ainsn.insn;
  155. if (kcb->kprobe_status == KPROBE_HIT_SS &&
  156. is_trap(insn)) {
  157. /* Turn off 'trace' bits */
  158. regs->msr &= ~MSR_SINGLESTEP;
  159. regs->msr |= kcb->kprobe_saved_msr;
  160. goto no_kprobe;
  161. }
  162. /* We have reentered the kprobe_handler(), since
  163. * another probe was hit while within the handler.
  164. * We here save the original kprobes variables and
  165. * just single step on the instruction of the new probe
  166. * without calling any user handlers.
  167. */
  168. save_previous_kprobe(kcb);
  169. set_current_kprobe(p, regs, kcb);
  170. kcb->kprobe_saved_msr = regs->msr;
  171. kprobes_inc_nmissed_count(p);
  172. prepare_singlestep(p, regs);
  173. kcb->kprobe_status = KPROBE_REENTER;
  174. return 1;
  175. } else {
  176. if (*addr != BREAKPOINT_INSTRUCTION) {
  177. /* If trap variant, then it belongs not to us */
  178. kprobe_opcode_t cur_insn = *addr;
  179. if (is_trap(cur_insn))
  180. goto no_kprobe;
  181. /* The breakpoint instruction was removed by
  182. * another cpu right after we hit, no further
  183. * handling of this interrupt is appropriate
  184. */
  185. ret = 1;
  186. goto no_kprobe;
  187. }
  188. p = __get_cpu_var(current_kprobe);
  189. if (p->break_handler && p->break_handler(p, regs)) {
  190. goto ss_probe;
  191. }
  192. }
  193. goto no_kprobe;
  194. }
  195. p = get_kprobe(addr);
  196. if (!p) {
  197. if (*addr != BREAKPOINT_INSTRUCTION) {
  198. /*
  199. * PowerPC has multiple variants of the "trap"
  200. * instruction. If the current instruction is a
  201. * trap variant, it could belong to someone else
  202. */
  203. kprobe_opcode_t cur_insn = *addr;
  204. if (is_trap(cur_insn))
  205. goto no_kprobe;
  206. /*
  207. * The breakpoint instruction was removed right
  208. * after we hit it. Another cpu has removed
  209. * either a probepoint or a debugger breakpoint
  210. * at this address. In either case, no further
  211. * handling of this interrupt is appropriate.
  212. */
  213. ret = 1;
  214. }
  215. /* Not one of ours: let kernel handle it */
  216. goto no_kprobe;
  217. }
  218. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  219. set_current_kprobe(p, regs, kcb);
  220. if (p->pre_handler && p->pre_handler(p, regs))
  221. /* handler has already set things up, so skip ss setup */
  222. return 1;
  223. ss_probe:
  224. if (p->ainsn.boostable >= 0) {
  225. unsigned int insn = *p->ainsn.insn;
  226. /* regs->nip is also adjusted if emulate_step returns 1 */
  227. ret = emulate_step(regs, insn);
  228. if (ret > 0) {
  229. /*
  230. * Once this instruction has been boosted
  231. * successfully, set the boostable flag
  232. */
  233. if (unlikely(p->ainsn.boostable == 0))
  234. p->ainsn.boostable = 1;
  235. if (p->post_handler)
  236. p->post_handler(p, regs, 0);
  237. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  238. reset_current_kprobe();
  239. preempt_enable_no_resched();
  240. return 1;
  241. } else if (ret < 0) {
  242. /*
  243. * We don't allow kprobes on mtmsr(d)/rfi(d), etc.
  244. * So, we should never get here... but, its still
  245. * good to catch them, just in case...
  246. */
  247. printk("Can't step on instruction %x\n", insn);
  248. BUG();
  249. } else if (ret == 0)
  250. /* This instruction can't be boosted */
  251. p->ainsn.boostable = -1;
  252. }
  253. prepare_singlestep(p, regs);
  254. kcb->kprobe_status = KPROBE_HIT_SS;
  255. return 1;
  256. no_kprobe:
  257. preempt_enable_no_resched();
  258. return ret;
  259. }
  260. /*
  261. * Function return probe trampoline:
  262. * - init_kprobes() establishes a probepoint here
  263. * - When the probed function returns, this probe
  264. * causes the handlers to fire
  265. */
  266. static void __used kretprobe_trampoline_holder(void)
  267. {
  268. asm volatile(".global kretprobe_trampoline\n"
  269. "kretprobe_trampoline:\n"
  270. "nop\n");
  271. }
  272. /*
  273. * Called when the probe at kretprobe trampoline is hit
  274. */
  275. static int __kprobes trampoline_probe_handler(struct kprobe *p,
  276. struct pt_regs *regs)
  277. {
  278. struct kretprobe_instance *ri = NULL;
  279. struct hlist_head *head, empty_rp;
  280. struct hlist_node *node, *tmp;
  281. unsigned long flags, orig_ret_address = 0;
  282. unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline;
  283. INIT_HLIST_HEAD(&empty_rp);
  284. kretprobe_hash_lock(current, &head, &flags);
  285. /*
  286. * It is possible to have multiple instances associated with a given
  287. * task either because an multiple functions in the call path
  288. * have a return probe installed on them, and/or more than one return
  289. * return probe was registered for a target function.
  290. *
  291. * We can handle this because:
  292. * - instances are always inserted at the head of the list
  293. * - when multiple return probes are registered for the same
  294. * function, the first instance's ret_addr will point to the
  295. * real return address, and all the rest will point to
  296. * kretprobe_trampoline
  297. */
  298. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  299. if (ri->task != current)
  300. /* another task is sharing our hash bucket */
  301. continue;
  302. if (ri->rp && ri->rp->handler)
  303. ri->rp->handler(ri, regs);
  304. orig_ret_address = (unsigned long)ri->ret_addr;
  305. recycle_rp_inst(ri, &empty_rp);
  306. if (orig_ret_address != trampoline_address)
  307. /*
  308. * This is the real return address. Any other
  309. * instances associated with this task are for
  310. * other calls deeper on the call stack
  311. */
  312. break;
  313. }
  314. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  315. regs->nip = orig_ret_address;
  316. reset_current_kprobe();
  317. kretprobe_hash_unlock(current, &flags);
  318. preempt_enable_no_resched();
  319. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  320. hlist_del(&ri->hlist);
  321. kfree(ri);
  322. }
  323. /*
  324. * By returning a non-zero value, we are telling
  325. * kprobe_handler() that we don't want the post_handler
  326. * to run (and have re-enabled preemption)
  327. */
  328. return 1;
  329. }
  330. /*
  331. * Called after single-stepping. p->addr is the address of the
  332. * instruction whose first byte has been replaced by the "breakpoint"
  333. * instruction. To avoid the SMP problems that can occur when we
  334. * temporarily put back the original opcode to single-step, we
  335. * single-stepped a copy of the instruction. The address of this
  336. * copy is p->ainsn.insn.
  337. */
  338. static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
  339. {
  340. int ret;
  341. unsigned int insn = *p->ainsn.insn;
  342. regs->nip = (unsigned long)p->addr;
  343. ret = emulate_step(regs, insn);
  344. if (ret == 0)
  345. regs->nip = (unsigned long)p->addr + 4;
  346. }
  347. static int __kprobes post_kprobe_handler(struct pt_regs *regs)
  348. {
  349. struct kprobe *cur = kprobe_running();
  350. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  351. if (!cur)
  352. return 0;
  353. /* make sure we got here for instruction we have a kprobe on */
  354. if (((unsigned long)cur->ainsn.insn + 4) != regs->nip)
  355. return 0;
  356. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  357. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  358. cur->post_handler(cur, regs, 0);
  359. }
  360. resume_execution(cur, regs);
  361. regs->msr |= kcb->kprobe_saved_msr;
  362. /*Restore back the original saved kprobes variables and continue. */
  363. if (kcb->kprobe_status == KPROBE_REENTER) {
  364. restore_previous_kprobe(kcb);
  365. goto out;
  366. }
  367. reset_current_kprobe();
  368. out:
  369. preempt_enable_no_resched();
  370. /*
  371. * if somebody else is singlestepping across a probe point, msr
  372. * will have DE/SE set, in which case, continue the remaining processing
  373. * of do_debug, as if this is not a probe hit.
  374. */
  375. if (regs->msr & MSR_SINGLESTEP)
  376. return 0;
  377. return 1;
  378. }
  379. int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
  380. {
  381. struct kprobe *cur = kprobe_running();
  382. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  383. const struct exception_table_entry *entry;
  384. switch(kcb->kprobe_status) {
  385. case KPROBE_HIT_SS:
  386. case KPROBE_REENTER:
  387. /*
  388. * We are here because the instruction being single
  389. * stepped caused a page fault. We reset the current
  390. * kprobe and the nip points back to the probe address
  391. * and allow the page fault handler to continue as a
  392. * normal page fault.
  393. */
  394. regs->nip = (unsigned long)cur->addr;
  395. regs->msr &= ~MSR_SINGLESTEP; /* Turn off 'trace' bits */
  396. regs->msr |= kcb->kprobe_saved_msr;
  397. if (kcb->kprobe_status == KPROBE_REENTER)
  398. restore_previous_kprobe(kcb);
  399. else
  400. reset_current_kprobe();
  401. preempt_enable_no_resched();
  402. break;
  403. case KPROBE_HIT_ACTIVE:
  404. case KPROBE_HIT_SSDONE:
  405. /*
  406. * We increment the nmissed count for accounting,
  407. * we can also use npre/npostfault count for accouting
  408. * these specific fault cases.
  409. */
  410. kprobes_inc_nmissed_count(cur);
  411. /*
  412. * We come here because instructions in the pre/post
  413. * handler caused the page_fault, this could happen
  414. * if handler tries to access user space by
  415. * copy_from_user(), get_user() etc. Let the
  416. * user-specified handler try to fix it first.
  417. */
  418. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  419. return 1;
  420. /*
  421. * In case the user-specified fault handler returned
  422. * zero, try to fix up.
  423. */
  424. if ((entry = search_exception_tables(regs->nip)) != NULL) {
  425. regs->nip = entry->fixup;
  426. return 1;
  427. }
  428. /*
  429. * fixup_exception() could not handle it,
  430. * Let do_page_fault() fix it.
  431. */
  432. break;
  433. default:
  434. break;
  435. }
  436. return 0;
  437. }
  438. /*
  439. * Wrapper routine to for handling exceptions.
  440. */
  441. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  442. unsigned long val, void *data)
  443. {
  444. struct die_args *args = (struct die_args *)data;
  445. int ret = NOTIFY_DONE;
  446. if (args->regs && user_mode(args->regs))
  447. return ret;
  448. switch (val) {
  449. case DIE_BPT:
  450. if (kprobe_handler(args->regs))
  451. ret = NOTIFY_STOP;
  452. break;
  453. case DIE_SSTEP:
  454. if (post_kprobe_handler(args->regs))
  455. ret = NOTIFY_STOP;
  456. break;
  457. default:
  458. break;
  459. }
  460. return ret;
  461. }
  462. #ifdef CONFIG_PPC64
  463. unsigned long arch_deref_entry_point(void *entry)
  464. {
  465. return ((func_descr_t *)entry)->entry;
  466. }
  467. #endif
  468. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  469. {
  470. struct jprobe *jp = container_of(p, struct jprobe, kp);
  471. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  472. memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
  473. /* setup return addr to the jprobe handler routine */
  474. regs->nip = arch_deref_entry_point(jp->entry);
  475. #ifdef CONFIG_PPC64
  476. regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
  477. #endif
  478. return 1;
  479. }
  480. void __used __kprobes jprobe_return(void)
  481. {
  482. asm volatile("trap" ::: "memory");
  483. }
  484. static void __used __kprobes jprobe_return_end(void)
  485. {
  486. };
  487. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  488. {
  489. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  490. /*
  491. * FIXME - we should ideally be validating that we got here 'cos
  492. * of the "trap" in jprobe_return() above, before restoring the
  493. * saved regs...
  494. */
  495. memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
  496. preempt_enable_no_resched();
  497. return 1;
  498. }
  499. static struct kprobe trampoline_p = {
  500. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  501. .pre_handler = trampoline_probe_handler
  502. };
  503. int __init arch_init_kprobes(void)
  504. {
  505. return register_kprobe(&trampoline_p);
  506. }
  507. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  508. {
  509. if (p->addr == (kprobe_opcode_t *)&kretprobe_trampoline)
  510. return 1;
  511. return 0;
  512. }