kprobes.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/module.h>
  39. #include <linux/moduleloader.h>
  40. #include <linux/kallsyms.h>
  41. #include <asm-generic/sections.h>
  42. #include <asm/cacheflush.h>
  43. #include <asm/errno.h>
  44. #include <asm/kdebug.h>
  45. #define KPROBE_HASH_BITS 6
  46. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  47. /*
  48. * Some oddball architectures like 64bit powerpc have function descriptors
  49. * so this must be overridable.
  50. */
  51. #ifndef kprobe_lookup_name
  52. #define kprobe_lookup_name(name, addr) \
  53. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  54. #endif
  55. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  56. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  57. static atomic_t kprobe_count;
  58. DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  59. DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
  60. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  61. static struct notifier_block kprobe_page_fault_nb = {
  62. .notifier_call = kprobe_exceptions_notify,
  63. .priority = 0x7fffffff /* we need to notified first */
  64. };
  65. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  66. /*
  67. * kprobe->ainsn.insn points to the copy of the instruction to be
  68. * single-stepped. x86_64, POWER4 and above have no-exec support and
  69. * stepping on the instruction on a vmalloced/kmalloced/data page
  70. * is a recipe for disaster
  71. */
  72. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  73. struct kprobe_insn_page {
  74. struct hlist_node hlist;
  75. kprobe_opcode_t *insns; /* Page of instruction slots */
  76. char slot_used[INSNS_PER_PAGE];
  77. int nused;
  78. };
  79. static struct hlist_head kprobe_insn_pages;
  80. /**
  81. * get_insn_slot() - Find a slot on an executable page for an instruction.
  82. * We allocate an executable page if there's no room on existing ones.
  83. */
  84. kprobe_opcode_t __kprobes *get_insn_slot(void)
  85. {
  86. struct kprobe_insn_page *kip;
  87. struct hlist_node *pos;
  88. hlist_for_each(pos, &kprobe_insn_pages) {
  89. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  90. if (kip->nused < INSNS_PER_PAGE) {
  91. int i;
  92. for (i = 0; i < INSNS_PER_PAGE; i++) {
  93. if (!kip->slot_used[i]) {
  94. kip->slot_used[i] = 1;
  95. kip->nused++;
  96. return kip->insns + (i * MAX_INSN_SIZE);
  97. }
  98. }
  99. /* Surprise! No unused slots. Fix kip->nused. */
  100. kip->nused = INSNS_PER_PAGE;
  101. }
  102. }
  103. /* All out of space. Need to allocate a new page. Use slot 0.*/
  104. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  105. if (!kip) {
  106. return NULL;
  107. }
  108. /*
  109. * Use module_alloc so this page is within +/- 2GB of where the
  110. * kernel image and loaded module images reside. This is required
  111. * so x86_64 can correctly handle the %rip-relative fixups.
  112. */
  113. kip->insns = module_alloc(PAGE_SIZE);
  114. if (!kip->insns) {
  115. kfree(kip);
  116. return NULL;
  117. }
  118. INIT_HLIST_NODE(&kip->hlist);
  119. hlist_add_head(&kip->hlist, &kprobe_insn_pages);
  120. memset(kip->slot_used, 0, INSNS_PER_PAGE);
  121. kip->slot_used[0] = 1;
  122. kip->nused = 1;
  123. return kip->insns;
  124. }
  125. void __kprobes free_insn_slot(kprobe_opcode_t *slot)
  126. {
  127. struct kprobe_insn_page *kip;
  128. struct hlist_node *pos;
  129. hlist_for_each(pos, &kprobe_insn_pages) {
  130. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  131. if (kip->insns <= slot &&
  132. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  133. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  134. kip->slot_used[i] = 0;
  135. kip->nused--;
  136. if (kip->nused == 0) {
  137. /*
  138. * Page is no longer in use. Free it unless
  139. * it's the last one. We keep the last one
  140. * so as not to have to set it up again the
  141. * next time somebody inserts a probe.
  142. */
  143. hlist_del(&kip->hlist);
  144. if (hlist_empty(&kprobe_insn_pages)) {
  145. INIT_HLIST_NODE(&kip->hlist);
  146. hlist_add_head(&kip->hlist,
  147. &kprobe_insn_pages);
  148. } else {
  149. module_free(NULL, kip->insns);
  150. kfree(kip);
  151. }
  152. }
  153. return;
  154. }
  155. }
  156. }
  157. #endif
  158. /* We have preemption disabled.. so it is safe to use __ versions */
  159. static inline void set_kprobe_instance(struct kprobe *kp)
  160. {
  161. __get_cpu_var(kprobe_instance) = kp;
  162. }
  163. static inline void reset_kprobe_instance(void)
  164. {
  165. __get_cpu_var(kprobe_instance) = NULL;
  166. }
  167. /*
  168. * This routine is called either:
  169. * - under the kprobe_mutex - during kprobe_[un]register()
  170. * OR
  171. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  172. */
  173. struct kprobe __kprobes *get_kprobe(void *addr)
  174. {
  175. struct hlist_head *head;
  176. struct hlist_node *node;
  177. struct kprobe *p;
  178. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  179. hlist_for_each_entry_rcu(p, node, head, hlist) {
  180. if (p->addr == addr)
  181. return p;
  182. }
  183. return NULL;
  184. }
  185. /*
  186. * Aggregate handlers for multiple kprobes support - these handlers
  187. * take care of invoking the individual kprobe handlers on p->list
  188. */
  189. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  190. {
  191. struct kprobe *kp;
  192. list_for_each_entry_rcu(kp, &p->list, list) {
  193. if (kp->pre_handler) {
  194. set_kprobe_instance(kp);
  195. if (kp->pre_handler(kp, regs))
  196. return 1;
  197. }
  198. reset_kprobe_instance();
  199. }
  200. return 0;
  201. }
  202. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  203. unsigned long flags)
  204. {
  205. struct kprobe *kp;
  206. list_for_each_entry_rcu(kp, &p->list, list) {
  207. if (kp->post_handler) {
  208. set_kprobe_instance(kp);
  209. kp->post_handler(kp, regs, flags);
  210. reset_kprobe_instance();
  211. }
  212. }
  213. return;
  214. }
  215. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  216. int trapnr)
  217. {
  218. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  219. /*
  220. * if we faulted "during" the execution of a user specified
  221. * probe handler, invoke just that probe's fault handler
  222. */
  223. if (cur && cur->fault_handler) {
  224. if (cur->fault_handler(cur, regs, trapnr))
  225. return 1;
  226. }
  227. return 0;
  228. }
  229. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  230. {
  231. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  232. int ret = 0;
  233. if (cur && cur->break_handler) {
  234. if (cur->break_handler(cur, regs))
  235. ret = 1;
  236. }
  237. reset_kprobe_instance();
  238. return ret;
  239. }
  240. /* Walks the list and increments nmissed count for multiprobe case */
  241. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  242. {
  243. struct kprobe *kp;
  244. if (p->pre_handler != aggr_pre_handler) {
  245. p->nmissed++;
  246. } else {
  247. list_for_each_entry_rcu(kp, &p->list, list)
  248. kp->nmissed++;
  249. }
  250. return;
  251. }
  252. /* Called with kretprobe_lock held */
  253. struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
  254. {
  255. struct hlist_node *node;
  256. struct kretprobe_instance *ri;
  257. hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
  258. return ri;
  259. return NULL;
  260. }
  261. /* Called with kretprobe_lock held */
  262. static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
  263. *rp)
  264. {
  265. struct hlist_node *node;
  266. struct kretprobe_instance *ri;
  267. hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
  268. return ri;
  269. return NULL;
  270. }
  271. /* Called with kretprobe_lock held */
  272. void __kprobes add_rp_inst(struct kretprobe_instance *ri)
  273. {
  274. /*
  275. * Remove rp inst off the free list -
  276. * Add it back when probed function returns
  277. */
  278. hlist_del(&ri->uflist);
  279. /* Add rp inst onto table */
  280. INIT_HLIST_NODE(&ri->hlist);
  281. hlist_add_head(&ri->hlist,
  282. &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
  283. /* Also add this rp inst to the used list. */
  284. INIT_HLIST_NODE(&ri->uflist);
  285. hlist_add_head(&ri->uflist, &ri->rp->used_instances);
  286. }
  287. /* Called with kretprobe_lock held */
  288. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
  289. {
  290. /* remove rp inst off the rprobe_inst_table */
  291. hlist_del(&ri->hlist);
  292. if (ri->rp) {
  293. /* remove rp inst off the used list */
  294. hlist_del(&ri->uflist);
  295. /* put rp inst back onto the free list */
  296. INIT_HLIST_NODE(&ri->uflist);
  297. hlist_add_head(&ri->uflist, &ri->rp->free_instances);
  298. } else
  299. /* Unregistering */
  300. kfree(ri);
  301. }
  302. struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
  303. {
  304. return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  305. }
  306. /*
  307. * This function is called from finish_task_switch when task tk becomes dead,
  308. * so that we can recycle any function-return probe instances associated
  309. * with this task. These left over instances represent probed functions
  310. * that have been called but will never return.
  311. */
  312. void __kprobes kprobe_flush_task(struct task_struct *tk)
  313. {
  314. struct kretprobe_instance *ri;
  315. struct hlist_head *head;
  316. struct hlist_node *node, *tmp;
  317. unsigned long flags = 0;
  318. spin_lock_irqsave(&kretprobe_lock, flags);
  319. head = kretprobe_inst_table_head(tk);
  320. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  321. if (ri->task == tk)
  322. recycle_rp_inst(ri);
  323. }
  324. spin_unlock_irqrestore(&kretprobe_lock, flags);
  325. }
  326. static inline void free_rp_inst(struct kretprobe *rp)
  327. {
  328. struct kretprobe_instance *ri;
  329. while ((ri = get_free_rp_inst(rp)) != NULL) {
  330. hlist_del(&ri->uflist);
  331. kfree(ri);
  332. }
  333. }
  334. /*
  335. * Keep all fields in the kprobe consistent
  336. */
  337. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  338. {
  339. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  340. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  341. }
  342. /*
  343. * Add the new probe to old_p->list. Fail if this is the
  344. * second jprobe at the address - two jprobes can't coexist
  345. */
  346. static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  347. {
  348. if (p->break_handler) {
  349. if (old_p->break_handler)
  350. return -EEXIST;
  351. list_add_tail_rcu(&p->list, &old_p->list);
  352. old_p->break_handler = aggr_break_handler;
  353. } else
  354. list_add_rcu(&p->list, &old_p->list);
  355. if (p->post_handler && !old_p->post_handler)
  356. old_p->post_handler = aggr_post_handler;
  357. return 0;
  358. }
  359. /*
  360. * Fill in the required fields of the "manager kprobe". Replace the
  361. * earlier kprobe in the hlist with the manager kprobe
  362. */
  363. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  364. {
  365. copy_kprobe(p, ap);
  366. flush_insn_slot(ap);
  367. ap->addr = p->addr;
  368. ap->pre_handler = aggr_pre_handler;
  369. ap->fault_handler = aggr_fault_handler;
  370. if (p->post_handler)
  371. ap->post_handler = aggr_post_handler;
  372. if (p->break_handler)
  373. ap->break_handler = aggr_break_handler;
  374. INIT_LIST_HEAD(&ap->list);
  375. list_add_rcu(&p->list, &ap->list);
  376. hlist_replace_rcu(&p->hlist, &ap->hlist);
  377. }
  378. /*
  379. * This is the second or subsequent kprobe at the address - handle
  380. * the intricacies
  381. */
  382. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  383. struct kprobe *p)
  384. {
  385. int ret = 0;
  386. struct kprobe *ap;
  387. if (old_p->pre_handler == aggr_pre_handler) {
  388. copy_kprobe(old_p, p);
  389. ret = add_new_kprobe(old_p, p);
  390. } else {
  391. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  392. if (!ap)
  393. return -ENOMEM;
  394. add_aggr_kprobe(ap, old_p);
  395. copy_kprobe(ap, p);
  396. ret = add_new_kprobe(ap, p);
  397. }
  398. return ret;
  399. }
  400. static int __kprobes in_kprobes_functions(unsigned long addr)
  401. {
  402. if (addr >= (unsigned long)__kprobes_text_start
  403. && addr < (unsigned long)__kprobes_text_end)
  404. return -EINVAL;
  405. return 0;
  406. }
  407. static int __kprobes __register_kprobe(struct kprobe *p,
  408. unsigned long called_from)
  409. {
  410. int ret = 0;
  411. struct kprobe *old_p;
  412. struct module *probed_mod;
  413. /*
  414. * If we have a symbol_name argument look it up,
  415. * and add it to the address. That way the addr
  416. * field can either be global or relative to a symbol.
  417. */
  418. if (p->symbol_name) {
  419. if (p->addr)
  420. return -EINVAL;
  421. kprobe_lookup_name(p->symbol_name, p->addr);
  422. }
  423. if (!p->addr)
  424. return -EINVAL;
  425. p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
  426. if ((!kernel_text_address((unsigned long) p->addr)) ||
  427. in_kprobes_functions((unsigned long) p->addr))
  428. return -EINVAL;
  429. p->mod_refcounted = 0;
  430. /* Check are we probing a module */
  431. if ((probed_mod = module_text_address((unsigned long) p->addr))) {
  432. struct module *calling_mod = module_text_address(called_from);
  433. /* We must allow modules to probe themself and
  434. * in this case avoid incrementing the module refcount,
  435. * so as to allow unloading of self probing modules.
  436. */
  437. if (calling_mod && (calling_mod != probed_mod)) {
  438. if (unlikely(!try_module_get(probed_mod)))
  439. return -EINVAL;
  440. p->mod_refcounted = 1;
  441. } else
  442. probed_mod = NULL;
  443. }
  444. p->nmissed = 0;
  445. mutex_lock(&kprobe_mutex);
  446. old_p = get_kprobe(p->addr);
  447. if (old_p) {
  448. ret = register_aggr_kprobe(old_p, p);
  449. if (!ret)
  450. atomic_inc(&kprobe_count);
  451. goto out;
  452. }
  453. if ((ret = arch_prepare_kprobe(p)) != 0)
  454. goto out;
  455. INIT_HLIST_NODE(&p->hlist);
  456. hlist_add_head_rcu(&p->hlist,
  457. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  458. if (atomic_add_return(1, &kprobe_count) == \
  459. (ARCH_INACTIVE_KPROBE_COUNT + 1))
  460. register_page_fault_notifier(&kprobe_page_fault_nb);
  461. arch_arm_kprobe(p);
  462. out:
  463. mutex_unlock(&kprobe_mutex);
  464. if (ret && probed_mod)
  465. module_put(probed_mod);
  466. return ret;
  467. }
  468. int __kprobes register_kprobe(struct kprobe *p)
  469. {
  470. return __register_kprobe(p,
  471. (unsigned long)__builtin_return_address(0));
  472. }
  473. void __kprobes unregister_kprobe(struct kprobe *p)
  474. {
  475. struct module *mod;
  476. struct kprobe *old_p, *list_p;
  477. int cleanup_p;
  478. mutex_lock(&kprobe_mutex);
  479. old_p = get_kprobe(p->addr);
  480. if (unlikely(!old_p)) {
  481. mutex_unlock(&kprobe_mutex);
  482. return;
  483. }
  484. if (p != old_p) {
  485. list_for_each_entry_rcu(list_p, &old_p->list, list)
  486. if (list_p == p)
  487. /* kprobe p is a valid probe */
  488. goto valid_p;
  489. mutex_unlock(&kprobe_mutex);
  490. return;
  491. }
  492. valid_p:
  493. if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
  494. (p->list.next == &old_p->list) &&
  495. (p->list.prev == &old_p->list))) {
  496. /* Only probe on the hash list */
  497. arch_disarm_kprobe(p);
  498. hlist_del_rcu(&old_p->hlist);
  499. cleanup_p = 1;
  500. } else {
  501. list_del_rcu(&p->list);
  502. cleanup_p = 0;
  503. }
  504. mutex_unlock(&kprobe_mutex);
  505. synchronize_sched();
  506. if (p->mod_refcounted &&
  507. (mod = module_text_address((unsigned long)p->addr)))
  508. module_put(mod);
  509. if (cleanup_p) {
  510. if (p != old_p) {
  511. list_del_rcu(&p->list);
  512. kfree(old_p);
  513. }
  514. arch_remove_kprobe(p);
  515. } else {
  516. mutex_lock(&kprobe_mutex);
  517. if (p->break_handler)
  518. old_p->break_handler = NULL;
  519. if (p->post_handler){
  520. list_for_each_entry_rcu(list_p, &old_p->list, list){
  521. if (list_p->post_handler){
  522. cleanup_p = 2;
  523. break;
  524. }
  525. }
  526. if (cleanup_p == 0)
  527. old_p->post_handler = NULL;
  528. }
  529. mutex_unlock(&kprobe_mutex);
  530. }
  531. /* Call unregister_page_fault_notifier()
  532. * if no probes are active
  533. */
  534. mutex_lock(&kprobe_mutex);
  535. if (atomic_add_return(-1, &kprobe_count) == \
  536. ARCH_INACTIVE_KPROBE_COUNT)
  537. unregister_page_fault_notifier(&kprobe_page_fault_nb);
  538. mutex_unlock(&kprobe_mutex);
  539. return;
  540. }
  541. static struct notifier_block kprobe_exceptions_nb = {
  542. .notifier_call = kprobe_exceptions_notify,
  543. .priority = 0x7fffffff /* we need to be notified first */
  544. };
  545. int __kprobes register_jprobe(struct jprobe *jp)
  546. {
  547. /* Todo: Verify probepoint is a function entry point */
  548. jp->kp.pre_handler = setjmp_pre_handler;
  549. jp->kp.break_handler = longjmp_break_handler;
  550. return __register_kprobe(&jp->kp,
  551. (unsigned long)__builtin_return_address(0));
  552. }
  553. void __kprobes unregister_jprobe(struct jprobe *jp)
  554. {
  555. unregister_kprobe(&jp->kp);
  556. }
  557. #ifdef ARCH_SUPPORTS_KRETPROBES
  558. /*
  559. * This kprobe pre_handler is registered with every kretprobe. When probe
  560. * hits it will set up the return probe.
  561. */
  562. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  563. struct pt_regs *regs)
  564. {
  565. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  566. unsigned long flags = 0;
  567. /*TODO: consider to only swap the RA after the last pre_handler fired */
  568. spin_lock_irqsave(&kretprobe_lock, flags);
  569. arch_prepare_kretprobe(rp, regs);
  570. spin_unlock_irqrestore(&kretprobe_lock, flags);
  571. return 0;
  572. }
  573. int __kprobes register_kretprobe(struct kretprobe *rp)
  574. {
  575. int ret = 0;
  576. struct kretprobe_instance *inst;
  577. int i;
  578. rp->kp.pre_handler = pre_handler_kretprobe;
  579. rp->kp.post_handler = NULL;
  580. rp->kp.fault_handler = NULL;
  581. rp->kp.break_handler = NULL;
  582. /* Pre-allocate memory for max kretprobe instances */
  583. if (rp->maxactive <= 0) {
  584. #ifdef CONFIG_PREEMPT
  585. rp->maxactive = max(10, 2 * NR_CPUS);
  586. #else
  587. rp->maxactive = NR_CPUS;
  588. #endif
  589. }
  590. INIT_HLIST_HEAD(&rp->used_instances);
  591. INIT_HLIST_HEAD(&rp->free_instances);
  592. for (i = 0; i < rp->maxactive; i++) {
  593. inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
  594. if (inst == NULL) {
  595. free_rp_inst(rp);
  596. return -ENOMEM;
  597. }
  598. INIT_HLIST_NODE(&inst->uflist);
  599. hlist_add_head(&inst->uflist, &rp->free_instances);
  600. }
  601. rp->nmissed = 0;
  602. /* Establish function entry probe point */
  603. if ((ret = __register_kprobe(&rp->kp,
  604. (unsigned long)__builtin_return_address(0))) != 0)
  605. free_rp_inst(rp);
  606. return ret;
  607. }
  608. #else /* ARCH_SUPPORTS_KRETPROBES */
  609. int __kprobes register_kretprobe(struct kretprobe *rp)
  610. {
  611. return -ENOSYS;
  612. }
  613. #endif /* ARCH_SUPPORTS_KRETPROBES */
  614. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  615. {
  616. unsigned long flags;
  617. struct kretprobe_instance *ri;
  618. unregister_kprobe(&rp->kp);
  619. /* No race here */
  620. spin_lock_irqsave(&kretprobe_lock, flags);
  621. while ((ri = get_used_rp_inst(rp)) != NULL) {
  622. ri->rp = NULL;
  623. hlist_del(&ri->uflist);
  624. }
  625. spin_unlock_irqrestore(&kretprobe_lock, flags);
  626. free_rp_inst(rp);
  627. }
  628. static int __init init_kprobes(void)
  629. {
  630. int i, err = 0;
  631. /* FIXME allocate the probe table, currently defined statically */
  632. /* initialize all list heads */
  633. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  634. INIT_HLIST_HEAD(&kprobe_table[i]);
  635. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  636. }
  637. atomic_set(&kprobe_count, 0);
  638. err = arch_init_kprobes();
  639. if (!err)
  640. err = register_die_notifier(&kprobe_exceptions_nb);
  641. return err;
  642. }
  643. __initcall(init_kprobes);
  644. EXPORT_SYMBOL_GPL(register_kprobe);
  645. EXPORT_SYMBOL_GPL(unregister_kprobe);
  646. EXPORT_SYMBOL_GPL(register_jprobe);
  647. EXPORT_SYMBOL_GPL(unregister_jprobe);
  648. EXPORT_SYMBOL_GPL(jprobe_return);
  649. EXPORT_SYMBOL_GPL(register_kretprobe);
  650. EXPORT_SYMBOL_GPL(unregister_kretprobe);