kprobes.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/kprobes.h>
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/kprobes.h>
  13. #include <linux/kdebug.h>
  14. #include <linux/sched.h>
  15. #include <linux/uaccess.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/current.h>
  18. #include <asm/disasm.h>
  19. #define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
  20. (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
  21. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  22. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  23. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  24. {
  25. /* Attempt to probe at unaligned address */
  26. if ((unsigned long)p->addr & 0x01)
  27. return -EINVAL;
  28. /* Address should not be in exception handling code */
  29. p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
  30. p->opcode = *p->addr;
  31. return 0;
  32. }
  33. void __kprobes arch_arm_kprobe(struct kprobe *p)
  34. {
  35. *p->addr = UNIMP_S_INSTRUCTION;
  36. flush_icache_range((unsigned long)p->addr,
  37. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  38. }
  39. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  40. {
  41. *p->addr = p->opcode;
  42. flush_icache_range((unsigned long)p->addr,
  43. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  44. }
  45. void __kprobes arch_remove_kprobe(struct kprobe *p)
  46. {
  47. arch_disarm_kprobe(p);
  48. /* Can we remove the kprobe in the middle of kprobe handling? */
  49. if (p->ainsn.t1_addr) {
  50. *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  51. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  52. (unsigned long)p->ainsn.t1_addr +
  53. sizeof(kprobe_opcode_t));
  54. p->ainsn.t1_addr = NULL;
  55. }
  56. if (p->ainsn.t2_addr) {
  57. *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  58. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  59. (unsigned long)p->ainsn.t2_addr +
  60. sizeof(kprobe_opcode_t));
  61. p->ainsn.t2_addr = NULL;
  62. }
  63. }
  64. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  65. {
  66. kcb->prev_kprobe.kp = kprobe_running();
  67. kcb->prev_kprobe.status = kcb->kprobe_status;
  68. }
  69. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  70. {
  71. __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
  72. kcb->kprobe_status = kcb->prev_kprobe.status;
  73. }
  74. static inline void __kprobes set_current_kprobe(struct kprobe *p)
  75. {
  76. __get_cpu_var(current_kprobe) = p;
  77. }
  78. static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
  79. struct pt_regs *regs)
  80. {
  81. /* Remove the trap instructions inserted for single step and
  82. * restore the original instructions
  83. */
  84. if (p->ainsn.t1_addr) {
  85. *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  86. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  87. (unsigned long)p->ainsn.t1_addr +
  88. sizeof(kprobe_opcode_t));
  89. p->ainsn.t1_addr = NULL;
  90. }
  91. if (p->ainsn.t2_addr) {
  92. *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  93. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  94. (unsigned long)p->ainsn.t2_addr +
  95. sizeof(kprobe_opcode_t));
  96. p->ainsn.t2_addr = NULL;
  97. }
  98. return;
  99. }
  100. static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
  101. {
  102. unsigned long next_pc;
  103. unsigned long tgt_if_br = 0;
  104. int is_branch;
  105. unsigned long bta;
  106. /* Copy the opcode back to the kprobe location and execute the
  107. * instruction. Because of this we will not be able to get into the
  108. * same kprobe until this kprobe is done
  109. */
  110. *(p->addr) = p->opcode;
  111. flush_icache_range((unsigned long)p->addr,
  112. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  113. /* Now we insert the trap at the next location after this instruction to
  114. * single step. If it is a branch we insert the trap at possible branch
  115. * targets
  116. */
  117. bta = regs->bta;
  118. if (regs->status32 & 0x40) {
  119. /* We are in a delay slot with the branch taken */
  120. next_pc = bta & ~0x01;
  121. if (!p->ainsn.is_short) {
  122. if (bta & 0x01)
  123. regs->blink += 2;
  124. else {
  125. /* Branch not taken */
  126. next_pc += 2;
  127. /* next pc is taken from bta after executing the
  128. * delay slot instruction
  129. */
  130. regs->bta += 2;
  131. }
  132. }
  133. is_branch = 0;
  134. } else
  135. is_branch =
  136. disasm_next_pc((unsigned long)p->addr, regs,
  137. (struct callee_regs *) current->thread.callee_reg,
  138. &next_pc, &tgt_if_br);
  139. p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
  140. p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
  141. *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
  142. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  143. (unsigned long)p->ainsn.t1_addr +
  144. sizeof(kprobe_opcode_t));
  145. if (is_branch) {
  146. p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
  147. p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
  148. *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
  149. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  150. (unsigned long)p->ainsn.t2_addr +
  151. sizeof(kprobe_opcode_t));
  152. }
  153. }
  154. int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
  155. {
  156. struct kprobe *p;
  157. struct kprobe_ctlblk *kcb;
  158. preempt_disable();
  159. kcb = get_kprobe_ctlblk();
  160. p = get_kprobe((unsigned long *)addr);
  161. if (p) {
  162. /*
  163. * We have reentered the kprobe_handler, since another kprobe
  164. * was hit while within the handler, we save the original
  165. * kprobes and single step on the instruction of the new probe
  166. * without calling any user handlers to avoid recursive
  167. * kprobes.
  168. */
  169. if (kprobe_running()) {
  170. save_previous_kprobe(kcb);
  171. set_current_kprobe(p);
  172. kprobes_inc_nmissed_count(p);
  173. setup_singlestep(p, regs);
  174. kcb->kprobe_status = KPROBE_REENTER;
  175. return 1;
  176. }
  177. set_current_kprobe(p);
  178. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  179. /* If we have no pre-handler or it returned 0, we continue with
  180. * normal processing. If we have a pre-handler and it returned
  181. * non-zero - which is expected from setjmp_pre_handler for
  182. * jprobe, we return without single stepping and leave that to
  183. * the break-handler which is invoked by a kprobe from
  184. * jprobe_return
  185. */
  186. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  187. setup_singlestep(p, regs);
  188. kcb->kprobe_status = KPROBE_HIT_SS;
  189. }
  190. return 1;
  191. } else if (kprobe_running()) {
  192. p = __get_cpu_var(current_kprobe);
  193. if (p->break_handler && p->break_handler(p, regs)) {
  194. setup_singlestep(p, regs);
  195. kcb->kprobe_status = KPROBE_HIT_SS;
  196. return 1;
  197. }
  198. }
  199. /* no_kprobe: */
  200. preempt_enable_no_resched();
  201. return 0;
  202. }
  203. static int __kprobes arc_post_kprobe_handler(unsigned long addr,
  204. struct pt_regs *regs)
  205. {
  206. struct kprobe *cur = kprobe_running();
  207. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  208. if (!cur)
  209. return 0;
  210. resume_execution(cur, addr, regs);
  211. /* Rearm the kprobe */
  212. arch_arm_kprobe(cur);
  213. /*
  214. * When we return from trap instruction we go to the next instruction
  215. * We restored the actual instruction in resume_exectuiont and we to
  216. * return to the same address and execute it
  217. */
  218. regs->ret = addr;
  219. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  220. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  221. cur->post_handler(cur, regs, 0);
  222. }
  223. if (kcb->kprobe_status == KPROBE_REENTER) {
  224. restore_previous_kprobe(kcb);
  225. goto out;
  226. }
  227. reset_current_kprobe();
  228. out:
  229. preempt_enable_no_resched();
  230. return 1;
  231. }
  232. /*
  233. * Fault can be for the instruction being single stepped or for the
  234. * pre/post handlers in the module.
  235. * This is applicable for applications like user probes, where we have the
  236. * probe in user space and the handlers in the kernel
  237. */
  238. int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
  239. {
  240. struct kprobe *cur = kprobe_running();
  241. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  242. switch (kcb->kprobe_status) {
  243. case KPROBE_HIT_SS:
  244. case KPROBE_REENTER:
  245. /*
  246. * We are here because the instruction being single stepped
  247. * caused the fault. We reset the current kprobe and allow the
  248. * exception handler as if it is regular exception. In our
  249. * case it doesn't matter because the system will be halted
  250. */
  251. resume_execution(cur, (unsigned long)cur->addr, regs);
  252. if (kcb->kprobe_status == KPROBE_REENTER)
  253. restore_previous_kprobe(kcb);
  254. else
  255. reset_current_kprobe();
  256. preempt_enable_no_resched();
  257. break;
  258. case KPROBE_HIT_ACTIVE:
  259. case KPROBE_HIT_SSDONE:
  260. /*
  261. * We are here because the instructions in the pre/post handler
  262. * caused the fault.
  263. */
  264. /* We increment the nmissed count for accounting,
  265. * we can also use npre/npostfault count for accouting
  266. * these specific fault cases.
  267. */
  268. kprobes_inc_nmissed_count(cur);
  269. /*
  270. * We come here because instructions in the pre/post
  271. * handler caused the page_fault, this could happen
  272. * if handler tries to access user space by
  273. * copy_from_user(), get_user() etc. Let the
  274. * user-specified handler try to fix it first.
  275. */
  276. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  277. return 1;
  278. /*
  279. * In case the user-specified fault handler returned zero,
  280. * try to fix up.
  281. */
  282. if (fixup_exception(regs))
  283. return 1;
  284. /*
  285. * fixup_exception() could not handle it,
  286. * Let do_page_fault() fix it.
  287. */
  288. break;
  289. default:
  290. break;
  291. }
  292. return 0;
  293. }
  294. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  295. unsigned long val, void *data)
  296. {
  297. struct die_args *args = data;
  298. unsigned long addr = args->err;
  299. int ret = NOTIFY_DONE;
  300. switch (val) {
  301. case DIE_IERR:
  302. if (arc_kprobe_handler(addr, args->regs))
  303. return NOTIFY_STOP;
  304. break;
  305. case DIE_TRAP:
  306. if (arc_post_kprobe_handler(addr, args->regs))
  307. return NOTIFY_STOP;
  308. break;
  309. default:
  310. break;
  311. }
  312. return ret;
  313. }
  314. int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
  315. {
  316. struct jprobe *jp = container_of(p, struct jprobe, kp);
  317. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  318. unsigned long sp_addr = regs->sp;
  319. kcb->jprobe_saved_regs = *regs;
  320. memcpy(kcb->jprobes_stack, (void *)sp_addr, MIN_STACK_SIZE(sp_addr));
  321. regs->ret = (unsigned long)(jp->entry);
  322. return 1;
  323. }
  324. void __kprobes jprobe_return(void)
  325. {
  326. __asm__ __volatile__("unimp_s");
  327. return;
  328. }
  329. int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
  330. {
  331. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  332. unsigned long sp_addr;
  333. *regs = kcb->jprobe_saved_regs;
  334. sp_addr = regs->sp;
  335. memcpy((void *)sp_addr, kcb->jprobes_stack, MIN_STACK_SIZE(sp_addr));
  336. preempt_enable_no_resched();
  337. return 1;
  338. }
  339. static void __used kretprobe_trampoline_holder(void)
  340. {
  341. __asm__ __volatile__(".global kretprobe_trampoline\n"
  342. "kretprobe_trampoline:\n" "nop\n");
  343. }
  344. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  345. struct pt_regs *regs)
  346. {
  347. ri->ret_addr = (kprobe_opcode_t *) regs->blink;
  348. /* Replace the return addr with trampoline addr */
  349. regs->blink = (unsigned long)&kretprobe_trampoline;
  350. }
  351. static int __kprobes trampoline_probe_handler(struct kprobe *p,
  352. struct pt_regs *regs)
  353. {
  354. struct kretprobe_instance *ri = NULL;
  355. struct hlist_head *head, empty_rp;
  356. struct hlist_node *tmp;
  357. unsigned long flags, orig_ret_address = 0;
  358. unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
  359. INIT_HLIST_HEAD(&empty_rp);
  360. kretprobe_hash_lock(current, &head, &flags);
  361. /*
  362. * It is possible to have multiple instances associated with a given
  363. * task either because an multiple functions in the call path
  364. * have a return probe installed on them, and/or more than one return
  365. * return probe was registered for a target function.
  366. *
  367. * We can handle this because:
  368. * - instances are always inserted at the head of the list
  369. * - when multiple return probes are registered for the same
  370. * function, the first instance's ret_addr will point to the
  371. * real return address, and all the rest will point to
  372. * kretprobe_trampoline
  373. */
  374. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  375. if (ri->task != current)
  376. /* another task is sharing our hash bucket */
  377. continue;
  378. if (ri->rp && ri->rp->handler)
  379. ri->rp->handler(ri, regs);
  380. orig_ret_address = (unsigned long)ri->ret_addr;
  381. recycle_rp_inst(ri, &empty_rp);
  382. if (orig_ret_address != trampoline_address) {
  383. /*
  384. * This is the real return address. Any other
  385. * instances associated with this task are for
  386. * other calls deeper on the call stack
  387. */
  388. break;
  389. }
  390. }
  391. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  392. regs->ret = orig_ret_address;
  393. reset_current_kprobe();
  394. kretprobe_hash_unlock(current, &flags);
  395. preempt_enable_no_resched();
  396. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  397. hlist_del(&ri->hlist);
  398. kfree(ri);
  399. }
  400. /* By returning a non zero value, we are telling the kprobe handler
  401. * that we don't want the post_handler to run
  402. */
  403. return 1;
  404. }
  405. static struct kprobe trampoline_p = {
  406. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  407. .pre_handler = trampoline_probe_handler
  408. };
  409. int __init arch_init_kprobes(void)
  410. {
  411. /* Registering the trampoline code for the kret probe */
  412. return register_kprobe(&trampoline_p);
  413. }
  414. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  415. {
  416. if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
  417. return 1;
  418. return 0;
  419. }
  420. void trap_is_kprobe(unsigned long cause, unsigned long address,
  421. struct pt_regs *regs)
  422. {
  423. notify_die(DIE_TRAP, "kprobe_trap", regs, address, cause, SIGTRAP);
  424. }