kprobes.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/spinlock.h>
  36. #include <linux/hash.h>
  37. #include <linux/init.h>
  38. #include <linux/module.h>
  39. #include <asm/cacheflush.h>
  40. #include <asm/errno.h>
  41. #include <asm/kdebug.h>
  42. #define KPROBE_HASH_BITS 6
  43. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  44. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  45. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  46. unsigned int kprobe_cpu = NR_CPUS;
  47. static DEFINE_SPINLOCK(kprobe_lock);
  48. static struct kprobe *curr_kprobe;
  49. /* Locks kprobe: irqs must be disabled */
  50. void lock_kprobes(void)
  51. {
  52. spin_lock(&kprobe_lock);
  53. kprobe_cpu = smp_processor_id();
  54. }
  55. void unlock_kprobes(void)
  56. {
  57. kprobe_cpu = NR_CPUS;
  58. spin_unlock(&kprobe_lock);
  59. }
  60. /* You have to be holding the kprobe_lock */
  61. struct kprobe *get_kprobe(void *addr)
  62. {
  63. struct hlist_head *head;
  64. struct hlist_node *node;
  65. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  66. hlist_for_each(node, head) {
  67. struct kprobe *p = hlist_entry(node, struct kprobe, hlist);
  68. if (p->addr == addr)
  69. return p;
  70. }
  71. return NULL;
  72. }
  73. /*
  74. * Aggregate handlers for multiple kprobes support - these handlers
  75. * take care of invoking the individual kprobe handlers on p->list
  76. */
  77. static int aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  78. {
  79. struct kprobe *kp;
  80. list_for_each_entry(kp, &p->list, list) {
  81. if (kp->pre_handler) {
  82. curr_kprobe = kp;
  83. if (kp->pre_handler(kp, regs))
  84. return 1;
  85. }
  86. curr_kprobe = NULL;
  87. }
  88. return 0;
  89. }
  90. static void aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  91. unsigned long flags)
  92. {
  93. struct kprobe *kp;
  94. list_for_each_entry(kp, &p->list, list) {
  95. if (kp->post_handler) {
  96. curr_kprobe = kp;
  97. kp->post_handler(kp, regs, flags);
  98. curr_kprobe = NULL;
  99. }
  100. }
  101. return;
  102. }
  103. static int aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  104. int trapnr)
  105. {
  106. /*
  107. * if we faulted "during" the execution of a user specified
  108. * probe handler, invoke just that probe's fault handler
  109. */
  110. if (curr_kprobe && curr_kprobe->fault_handler) {
  111. if (curr_kprobe->fault_handler(curr_kprobe, regs, trapnr))
  112. return 1;
  113. }
  114. return 0;
  115. }
  116. static int aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  117. {
  118. struct kprobe *kp = curr_kprobe;
  119. if (curr_kprobe && kp->break_handler) {
  120. if (kp->break_handler(kp, regs)) {
  121. curr_kprobe = NULL;
  122. return 1;
  123. }
  124. }
  125. curr_kprobe = NULL;
  126. return 0;
  127. }
  128. struct kprobe trampoline_p = {
  129. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  130. .pre_handler = trampoline_probe_handler,
  131. .post_handler = trampoline_post_handler
  132. };
  133. struct kretprobe_instance *get_free_rp_inst(struct kretprobe *rp)
  134. {
  135. struct hlist_node *node;
  136. struct kretprobe_instance *ri;
  137. hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
  138. return ri;
  139. return NULL;
  140. }
  141. static struct kretprobe_instance *get_used_rp_inst(struct kretprobe *rp)
  142. {
  143. struct hlist_node *node;
  144. struct kretprobe_instance *ri;
  145. hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
  146. return ri;
  147. return NULL;
  148. }
  149. struct kretprobe_instance *get_rp_inst(void *sara)
  150. {
  151. struct hlist_head *head;
  152. struct hlist_node *node;
  153. struct task_struct *tsk;
  154. struct kretprobe_instance *ri;
  155. tsk = arch_get_kprobe_task(sara);
  156. head = &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  157. hlist_for_each_entry(ri, node, head, hlist) {
  158. if (ri->stack_addr == sara)
  159. return ri;
  160. }
  161. return NULL;
  162. }
  163. void add_rp_inst(struct kretprobe_instance *ri)
  164. {
  165. struct task_struct *tsk;
  166. /*
  167. * Remove rp inst off the free list -
  168. * Add it back when probed function returns
  169. */
  170. hlist_del(&ri->uflist);
  171. tsk = arch_get_kprobe_task(ri->stack_addr);
  172. /* Add rp inst onto table */
  173. INIT_HLIST_NODE(&ri->hlist);
  174. hlist_add_head(&ri->hlist,
  175. &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)]);
  176. /* Also add this rp inst to the used list. */
  177. INIT_HLIST_NODE(&ri->uflist);
  178. hlist_add_head(&ri->uflist, &ri->rp->used_instances);
  179. }
  180. void recycle_rp_inst(struct kretprobe_instance *ri)
  181. {
  182. /* remove rp inst off the rprobe_inst_table */
  183. hlist_del(&ri->hlist);
  184. if (ri->rp) {
  185. /* remove rp inst off the used list */
  186. hlist_del(&ri->uflist);
  187. /* put rp inst back onto the free list */
  188. INIT_HLIST_NODE(&ri->uflist);
  189. hlist_add_head(&ri->uflist, &ri->rp->free_instances);
  190. } else
  191. /* Unregistering */
  192. kfree(ri);
  193. }
  194. struct hlist_head * kretprobe_inst_table_head(struct task_struct *tsk)
  195. {
  196. return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  197. }
  198. struct kretprobe_instance *get_rp_inst_tsk(struct task_struct *tk)
  199. {
  200. struct task_struct *tsk;
  201. struct hlist_head *head;
  202. struct hlist_node *node;
  203. struct kretprobe_instance *ri;
  204. head = &kretprobe_inst_table[hash_ptr(tk, KPROBE_HASH_BITS)];
  205. hlist_for_each_entry(ri, node, head, hlist) {
  206. tsk = arch_get_kprobe_task(ri->stack_addr);
  207. if (tsk == tk)
  208. return ri;
  209. }
  210. return NULL;
  211. }
  212. /*
  213. * This function is called from do_exit or do_execv when task tk's stack is
  214. * about to be recycled. Recycle any function-return probe instances
  215. * associated with this task. These represent probed functions that have
  216. * been called but may never return.
  217. */
  218. void kprobe_flush_task(struct task_struct *tk)
  219. {
  220. unsigned long flags = 0;
  221. spin_lock_irqsave(&kprobe_lock, flags);
  222. arch_kprobe_flush_task(tk);
  223. spin_unlock_irqrestore(&kprobe_lock, flags);
  224. }
  225. /*
  226. * This kprobe pre_handler is registered with every kretprobe. When probe
  227. * hits it will set up the return probe.
  228. */
  229. static int pre_handler_kretprobe(struct kprobe *p, struct pt_regs *regs)
  230. {
  231. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  232. /*TODO: consider to only swap the RA after the last pre_handler fired */
  233. arch_prepare_kretprobe(rp, regs);
  234. return 0;
  235. }
  236. static inline void free_rp_inst(struct kretprobe *rp)
  237. {
  238. struct kretprobe_instance *ri;
  239. while ((ri = get_free_rp_inst(rp)) != NULL) {
  240. hlist_del(&ri->uflist);
  241. kfree(ri);
  242. }
  243. }
  244. /*
  245. * Keep all fields in the kprobe consistent
  246. */
  247. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  248. {
  249. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  250. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  251. }
  252. /*
  253. * Add the new probe to old_p->list. Fail if this is the
  254. * second jprobe at the address - two jprobes can't coexist
  255. */
  256. static int add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  257. {
  258. struct kprobe *kp;
  259. if (p->break_handler) {
  260. list_for_each_entry(kp, &old_p->list, list) {
  261. if (kp->break_handler)
  262. return -EEXIST;
  263. }
  264. list_add_tail(&p->list, &old_p->list);
  265. } else
  266. list_add(&p->list, &old_p->list);
  267. return 0;
  268. }
  269. /*
  270. * Fill in the required fields of the "manager kprobe". Replace the
  271. * earlier kprobe in the hlist with the manager kprobe
  272. */
  273. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  274. {
  275. copy_kprobe(p, ap);
  276. ap->addr = p->addr;
  277. ap->pre_handler = aggr_pre_handler;
  278. ap->post_handler = aggr_post_handler;
  279. ap->fault_handler = aggr_fault_handler;
  280. ap->break_handler = aggr_break_handler;
  281. INIT_LIST_HEAD(&ap->list);
  282. list_add(&p->list, &ap->list);
  283. INIT_HLIST_NODE(&ap->hlist);
  284. hlist_del(&p->hlist);
  285. hlist_add_head(&ap->hlist,
  286. &kprobe_table[hash_ptr(ap->addr, KPROBE_HASH_BITS)]);
  287. }
  288. /*
  289. * This is the second or subsequent kprobe at the address - handle
  290. * the intricacies
  291. * TODO: Move kcalloc outside the spinlock
  292. */
  293. static int register_aggr_kprobe(struct kprobe *old_p, struct kprobe *p)
  294. {
  295. int ret = 0;
  296. struct kprobe *ap;
  297. if (old_p->pre_handler == aggr_pre_handler) {
  298. copy_kprobe(old_p, p);
  299. ret = add_new_kprobe(old_p, p);
  300. } else {
  301. ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
  302. if (!ap)
  303. return -ENOMEM;
  304. add_aggr_kprobe(ap, old_p);
  305. copy_kprobe(ap, p);
  306. ret = add_new_kprobe(ap, p);
  307. }
  308. return ret;
  309. }
  310. /* kprobe removal house-keeping routines */
  311. static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
  312. {
  313. arch_disarm_kprobe(p);
  314. hlist_del(&p->hlist);
  315. spin_unlock_irqrestore(&kprobe_lock, flags);
  316. arch_remove_kprobe(p);
  317. }
  318. static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
  319. struct kprobe *p, unsigned long flags)
  320. {
  321. list_del(&p->list);
  322. if (list_empty(&old_p->list)) {
  323. cleanup_kprobe(old_p, flags);
  324. kfree(old_p);
  325. } else
  326. spin_unlock_irqrestore(&kprobe_lock, flags);
  327. }
  328. int register_kprobe(struct kprobe *p)
  329. {
  330. int ret = 0;
  331. unsigned long flags = 0;
  332. struct kprobe *old_p;
  333. if ((ret = arch_prepare_kprobe(p)) != 0) {
  334. goto rm_kprobe;
  335. }
  336. spin_lock_irqsave(&kprobe_lock, flags);
  337. old_p = get_kprobe(p->addr);
  338. p->nmissed = 0;
  339. if (old_p) {
  340. ret = register_aggr_kprobe(old_p, p);
  341. goto out;
  342. }
  343. arch_copy_kprobe(p);
  344. INIT_HLIST_NODE(&p->hlist);
  345. hlist_add_head(&p->hlist,
  346. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  347. arch_arm_kprobe(p);
  348. out:
  349. spin_unlock_irqrestore(&kprobe_lock, flags);
  350. rm_kprobe:
  351. if (ret == -EEXIST)
  352. arch_remove_kprobe(p);
  353. return ret;
  354. }
  355. void unregister_kprobe(struct kprobe *p)
  356. {
  357. unsigned long flags;
  358. struct kprobe *old_p;
  359. spin_lock_irqsave(&kprobe_lock, flags);
  360. old_p = get_kprobe(p->addr);
  361. if (old_p) {
  362. if (old_p->pre_handler == aggr_pre_handler)
  363. cleanup_aggr_kprobe(old_p, p, flags);
  364. else
  365. cleanup_kprobe(p, flags);
  366. } else
  367. spin_unlock_irqrestore(&kprobe_lock, flags);
  368. }
  369. static struct notifier_block kprobe_exceptions_nb = {
  370. .notifier_call = kprobe_exceptions_notify,
  371. .priority = 0x7fffffff /* we need to notified first */
  372. };
  373. int register_jprobe(struct jprobe *jp)
  374. {
  375. /* Todo: Verify probepoint is a function entry point */
  376. jp->kp.pre_handler = setjmp_pre_handler;
  377. jp->kp.break_handler = longjmp_break_handler;
  378. return register_kprobe(&jp->kp);
  379. }
  380. void unregister_jprobe(struct jprobe *jp)
  381. {
  382. unregister_kprobe(&jp->kp);
  383. }
  384. #ifdef ARCH_SUPPORTS_KRETPROBES
  385. int register_kretprobe(struct kretprobe *rp)
  386. {
  387. int ret = 0;
  388. struct kretprobe_instance *inst;
  389. int i;
  390. rp->kp.pre_handler = pre_handler_kretprobe;
  391. /* Pre-allocate memory for max kretprobe instances */
  392. if (rp->maxactive <= 0) {
  393. #ifdef CONFIG_PREEMPT
  394. rp->maxactive = max(10, 2 * NR_CPUS);
  395. #else
  396. rp->maxactive = NR_CPUS;
  397. #endif
  398. }
  399. INIT_HLIST_HEAD(&rp->used_instances);
  400. INIT_HLIST_HEAD(&rp->free_instances);
  401. for (i = 0; i < rp->maxactive; i++) {
  402. inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
  403. if (inst == NULL) {
  404. free_rp_inst(rp);
  405. return -ENOMEM;
  406. }
  407. INIT_HLIST_NODE(&inst->uflist);
  408. hlist_add_head(&inst->uflist, &rp->free_instances);
  409. }
  410. rp->nmissed = 0;
  411. /* Establish function entry probe point */
  412. if ((ret = register_kprobe(&rp->kp)) != 0)
  413. free_rp_inst(rp);
  414. return ret;
  415. }
  416. #else /* ARCH_SUPPORTS_KRETPROBES */
  417. int register_kretprobe(struct kretprobe *rp)
  418. {
  419. return -ENOSYS;
  420. }
  421. #endif /* ARCH_SUPPORTS_KRETPROBES */
  422. void unregister_kretprobe(struct kretprobe *rp)
  423. {
  424. unsigned long flags;
  425. struct kretprobe_instance *ri;
  426. unregister_kprobe(&rp->kp);
  427. /* No race here */
  428. spin_lock_irqsave(&kprobe_lock, flags);
  429. free_rp_inst(rp);
  430. while ((ri = get_used_rp_inst(rp)) != NULL) {
  431. ri->rp = NULL;
  432. hlist_del(&ri->uflist);
  433. }
  434. spin_unlock_irqrestore(&kprobe_lock, flags);
  435. }
  436. static int __init init_kprobes(void)
  437. {
  438. int i, err = 0;
  439. /* FIXME allocate the probe table, currently defined statically */
  440. /* initialize all list heads */
  441. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  442. INIT_HLIST_HEAD(&kprobe_table[i]);
  443. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  444. }
  445. err = register_die_notifier(&kprobe_exceptions_nb);
  446. /* Register the trampoline probe for return probe */
  447. register_kprobe(&trampoline_p);
  448. return err;
  449. }
  450. __initcall(init_kprobes);
  451. EXPORT_SYMBOL_GPL(register_kprobe);
  452. EXPORT_SYMBOL_GPL(unregister_kprobe);
  453. EXPORT_SYMBOL_GPL(register_jprobe);
  454. EXPORT_SYMBOL_GPL(unregister_jprobe);
  455. EXPORT_SYMBOL_GPL(jprobe_return);
  456. EXPORT_SYMBOL_GPL(register_kretprobe);
  457. EXPORT_SYMBOL_GPL(unregister_kretprobe);