kprobes.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/module.h>
  39. #include <linux/moduleloader.h>
  40. #include <asm-generic/sections.h>
  41. #include <asm/cacheflush.h>
  42. #include <asm/errno.h>
  43. #include <asm/kdebug.h>
  44. #define KPROBE_HASH_BITS 6
  45. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  46. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  47. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  48. static DEFINE_SPINLOCK(kprobe_lock); /* Protects kprobe_table */
  49. DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
  50. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  51. /*
  52. * kprobe->ainsn.insn points to the copy of the instruction to be
  53. * single-stepped. x86_64, POWER4 and above have no-exec support and
  54. * stepping on the instruction on a vmalloced/kmalloced/data page
  55. * is a recipe for disaster
  56. */
  57. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  58. struct kprobe_insn_page {
  59. struct hlist_node hlist;
  60. kprobe_opcode_t *insns; /* Page of instruction slots */
  61. char slot_used[INSNS_PER_PAGE];
  62. int nused;
  63. };
  64. static struct hlist_head kprobe_insn_pages;
  65. /**
  66. * get_insn_slot() - Find a slot on an executable page for an instruction.
  67. * We allocate an executable page if there's no room on existing ones.
  68. */
  69. kprobe_opcode_t __kprobes *get_insn_slot(void)
  70. {
  71. struct kprobe_insn_page *kip;
  72. struct hlist_node *pos;
  73. hlist_for_each(pos, &kprobe_insn_pages) {
  74. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  75. if (kip->nused < INSNS_PER_PAGE) {
  76. int i;
  77. for (i = 0; i < INSNS_PER_PAGE; i++) {
  78. if (!kip->slot_used[i]) {
  79. kip->slot_used[i] = 1;
  80. kip->nused++;
  81. return kip->insns + (i * MAX_INSN_SIZE);
  82. }
  83. }
  84. /* Surprise! No unused slots. Fix kip->nused. */
  85. kip->nused = INSNS_PER_PAGE;
  86. }
  87. }
  88. /* All out of space. Need to allocate a new page. Use slot 0.*/
  89. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  90. if (!kip) {
  91. return NULL;
  92. }
  93. /*
  94. * Use module_alloc so this page is within +/- 2GB of where the
  95. * kernel image and loaded module images reside. This is required
  96. * so x86_64 can correctly handle the %rip-relative fixups.
  97. */
  98. kip->insns = module_alloc(PAGE_SIZE);
  99. if (!kip->insns) {
  100. kfree(kip);
  101. return NULL;
  102. }
  103. INIT_HLIST_NODE(&kip->hlist);
  104. hlist_add_head(&kip->hlist, &kprobe_insn_pages);
  105. memset(kip->slot_used, 0, INSNS_PER_PAGE);
  106. kip->slot_used[0] = 1;
  107. kip->nused = 1;
  108. return kip->insns;
  109. }
  110. void __kprobes free_insn_slot(kprobe_opcode_t *slot)
  111. {
  112. struct kprobe_insn_page *kip;
  113. struct hlist_node *pos;
  114. hlist_for_each(pos, &kprobe_insn_pages) {
  115. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  116. if (kip->insns <= slot &&
  117. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  118. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  119. kip->slot_used[i] = 0;
  120. kip->nused--;
  121. if (kip->nused == 0) {
  122. /*
  123. * Page is no longer in use. Free it unless
  124. * it's the last one. We keep the last one
  125. * so as not to have to set it up again the
  126. * next time somebody inserts a probe.
  127. */
  128. hlist_del(&kip->hlist);
  129. if (hlist_empty(&kprobe_insn_pages)) {
  130. INIT_HLIST_NODE(&kip->hlist);
  131. hlist_add_head(&kip->hlist,
  132. &kprobe_insn_pages);
  133. } else {
  134. module_free(NULL, kip->insns);
  135. kfree(kip);
  136. }
  137. }
  138. return;
  139. }
  140. }
  141. }
  142. /* We have preemption disabled.. so it is safe to use __ versions */
  143. static inline void set_kprobe_instance(struct kprobe *kp)
  144. {
  145. __get_cpu_var(kprobe_instance) = kp;
  146. }
  147. static inline void reset_kprobe_instance(void)
  148. {
  149. __get_cpu_var(kprobe_instance) = NULL;
  150. }
  151. /*
  152. * This routine is called either:
  153. * - under the kprobe_lock spinlock - during kprobe_[un]register()
  154. * OR
  155. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  156. */
  157. struct kprobe __kprobes *get_kprobe(void *addr)
  158. {
  159. struct hlist_head *head;
  160. struct hlist_node *node;
  161. struct kprobe *p;
  162. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  163. hlist_for_each_entry_rcu(p, node, head, hlist) {
  164. if (p->addr == addr)
  165. return p;
  166. }
  167. return NULL;
  168. }
  169. /*
  170. * Aggregate handlers for multiple kprobes support - these handlers
  171. * take care of invoking the individual kprobe handlers on p->list
  172. */
  173. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  174. {
  175. struct kprobe *kp;
  176. list_for_each_entry_rcu(kp, &p->list, list) {
  177. if (kp->pre_handler) {
  178. set_kprobe_instance(kp);
  179. if (kp->pre_handler(kp, regs))
  180. return 1;
  181. }
  182. reset_kprobe_instance();
  183. }
  184. return 0;
  185. }
  186. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  187. unsigned long flags)
  188. {
  189. struct kprobe *kp;
  190. list_for_each_entry_rcu(kp, &p->list, list) {
  191. if (kp->post_handler) {
  192. set_kprobe_instance(kp);
  193. kp->post_handler(kp, regs, flags);
  194. reset_kprobe_instance();
  195. }
  196. }
  197. return;
  198. }
  199. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  200. int trapnr)
  201. {
  202. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  203. /*
  204. * if we faulted "during" the execution of a user specified
  205. * probe handler, invoke just that probe's fault handler
  206. */
  207. if (cur && cur->fault_handler) {
  208. if (cur->fault_handler(cur, regs, trapnr))
  209. return 1;
  210. }
  211. return 0;
  212. }
  213. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  214. {
  215. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  216. int ret = 0;
  217. if (cur && cur->break_handler) {
  218. if (cur->break_handler(cur, regs))
  219. ret = 1;
  220. }
  221. reset_kprobe_instance();
  222. return ret;
  223. }
  224. /* Walks the list and increments nmissed count for multiprobe case */
  225. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  226. {
  227. struct kprobe *kp;
  228. if (p->pre_handler != aggr_pre_handler) {
  229. p->nmissed++;
  230. } else {
  231. list_for_each_entry_rcu(kp, &p->list, list)
  232. kp->nmissed++;
  233. }
  234. return;
  235. }
  236. /* Called with kretprobe_lock held */
  237. struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
  238. {
  239. struct hlist_node *node;
  240. struct kretprobe_instance *ri;
  241. hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
  242. return ri;
  243. return NULL;
  244. }
  245. /* Called with kretprobe_lock held */
  246. static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
  247. *rp)
  248. {
  249. struct hlist_node *node;
  250. struct kretprobe_instance *ri;
  251. hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
  252. return ri;
  253. return NULL;
  254. }
  255. /* Called with kretprobe_lock held */
  256. void __kprobes add_rp_inst(struct kretprobe_instance *ri)
  257. {
  258. /*
  259. * Remove rp inst off the free list -
  260. * Add it back when probed function returns
  261. */
  262. hlist_del(&ri->uflist);
  263. /* Add rp inst onto table */
  264. INIT_HLIST_NODE(&ri->hlist);
  265. hlist_add_head(&ri->hlist,
  266. &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
  267. /* Also add this rp inst to the used list. */
  268. INIT_HLIST_NODE(&ri->uflist);
  269. hlist_add_head(&ri->uflist, &ri->rp->used_instances);
  270. }
  271. /* Called with kretprobe_lock held */
  272. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri)
  273. {
  274. /* remove rp inst off the rprobe_inst_table */
  275. hlist_del(&ri->hlist);
  276. if (ri->rp) {
  277. /* remove rp inst off the used list */
  278. hlist_del(&ri->uflist);
  279. /* put rp inst back onto the free list */
  280. INIT_HLIST_NODE(&ri->uflist);
  281. hlist_add_head(&ri->uflist, &ri->rp->free_instances);
  282. } else
  283. /* Unregistering */
  284. kfree(ri);
  285. }
  286. struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
  287. {
  288. return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  289. }
  290. /*
  291. * This function is called from exit_thread or flush_thread when task tk's
  292. * stack is being recycled so that we can recycle any function-return probe
  293. * instances associated with this task. These left over instances represent
  294. * probed functions that have been called but will never return.
  295. */
  296. void __kprobes kprobe_flush_task(struct task_struct *tk)
  297. {
  298. struct kretprobe_instance *ri;
  299. struct hlist_head *head;
  300. struct hlist_node *node, *tmp;
  301. unsigned long flags = 0;
  302. spin_lock_irqsave(&kretprobe_lock, flags);
  303. head = kretprobe_inst_table_head(current);
  304. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  305. if (ri->task == tk)
  306. recycle_rp_inst(ri);
  307. }
  308. spin_unlock_irqrestore(&kretprobe_lock, flags);
  309. }
  310. /*
  311. * This kprobe pre_handler is registered with every kretprobe. When probe
  312. * hits it will set up the return probe.
  313. */
  314. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  315. struct pt_regs *regs)
  316. {
  317. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  318. unsigned long flags = 0;
  319. /*TODO: consider to only swap the RA after the last pre_handler fired */
  320. spin_lock_irqsave(&kretprobe_lock, flags);
  321. arch_prepare_kretprobe(rp, regs);
  322. spin_unlock_irqrestore(&kretprobe_lock, flags);
  323. return 0;
  324. }
  325. static inline void free_rp_inst(struct kretprobe *rp)
  326. {
  327. struct kretprobe_instance *ri;
  328. while ((ri = get_free_rp_inst(rp)) != NULL) {
  329. hlist_del(&ri->uflist);
  330. kfree(ri);
  331. }
  332. }
  333. /*
  334. * Keep all fields in the kprobe consistent
  335. */
  336. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  337. {
  338. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  339. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  340. }
  341. /*
  342. * Add the new probe to old_p->list. Fail if this is the
  343. * second jprobe at the address - two jprobes can't coexist
  344. */
  345. static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  346. {
  347. struct kprobe *kp;
  348. if (p->break_handler) {
  349. list_for_each_entry_rcu(kp, &old_p->list, list) {
  350. if (kp->break_handler)
  351. return -EEXIST;
  352. }
  353. list_add_tail_rcu(&p->list, &old_p->list);
  354. } else
  355. list_add_rcu(&p->list, &old_p->list);
  356. return 0;
  357. }
  358. /*
  359. * Fill in the required fields of the "manager kprobe". Replace the
  360. * earlier kprobe in the hlist with the manager kprobe
  361. */
  362. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  363. {
  364. copy_kprobe(p, ap);
  365. ap->addr = p->addr;
  366. ap->pre_handler = aggr_pre_handler;
  367. ap->post_handler = aggr_post_handler;
  368. ap->fault_handler = aggr_fault_handler;
  369. ap->break_handler = aggr_break_handler;
  370. INIT_LIST_HEAD(&ap->list);
  371. list_add_rcu(&p->list, &ap->list);
  372. hlist_replace_rcu(&p->hlist, &ap->hlist);
  373. }
  374. /*
  375. * This is the second or subsequent kprobe at the address - handle
  376. * the intricacies
  377. * TODO: Move kcalloc outside the spin_lock
  378. */
  379. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  380. struct kprobe *p)
  381. {
  382. int ret = 0;
  383. struct kprobe *ap;
  384. if (old_p->pre_handler == aggr_pre_handler) {
  385. copy_kprobe(old_p, p);
  386. ret = add_new_kprobe(old_p, p);
  387. } else {
  388. ap = kcalloc(1, sizeof(struct kprobe), GFP_ATOMIC);
  389. if (!ap)
  390. return -ENOMEM;
  391. add_aggr_kprobe(ap, old_p);
  392. copy_kprobe(ap, p);
  393. ret = add_new_kprobe(ap, p);
  394. }
  395. return ret;
  396. }
  397. /* kprobe removal house-keeping routines */
  398. static inline void cleanup_kprobe(struct kprobe *p, unsigned long flags)
  399. {
  400. arch_disarm_kprobe(p);
  401. hlist_del_rcu(&p->hlist);
  402. spin_unlock_irqrestore(&kprobe_lock, flags);
  403. arch_remove_kprobe(p);
  404. }
  405. static inline void cleanup_aggr_kprobe(struct kprobe *old_p,
  406. struct kprobe *p, unsigned long flags)
  407. {
  408. list_del_rcu(&p->list);
  409. if (list_empty(&old_p->list))
  410. cleanup_kprobe(old_p, flags);
  411. else
  412. spin_unlock_irqrestore(&kprobe_lock, flags);
  413. }
  414. static int __kprobes in_kprobes_functions(unsigned long addr)
  415. {
  416. if (addr >= (unsigned long)__kprobes_text_start
  417. && addr < (unsigned long)__kprobes_text_end)
  418. return -EINVAL;
  419. return 0;
  420. }
  421. int __kprobes register_kprobe(struct kprobe *p)
  422. {
  423. int ret = 0;
  424. unsigned long flags = 0;
  425. struct kprobe *old_p;
  426. struct module *mod;
  427. if ((!kernel_text_address((unsigned long) p->addr)) ||
  428. in_kprobes_functions((unsigned long) p->addr))
  429. return -EINVAL;
  430. if ((mod = module_text_address((unsigned long) p->addr)) &&
  431. (unlikely(!try_module_get(mod))))
  432. return -EINVAL;
  433. if ((ret = arch_prepare_kprobe(p)) != 0)
  434. goto rm_kprobe;
  435. p->nmissed = 0;
  436. spin_lock_irqsave(&kprobe_lock, flags);
  437. old_p = get_kprobe(p->addr);
  438. if (old_p) {
  439. ret = register_aggr_kprobe(old_p, p);
  440. goto out;
  441. }
  442. arch_copy_kprobe(p);
  443. INIT_HLIST_NODE(&p->hlist);
  444. hlist_add_head_rcu(&p->hlist,
  445. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  446. arch_arm_kprobe(p);
  447. out:
  448. spin_unlock_irqrestore(&kprobe_lock, flags);
  449. rm_kprobe:
  450. if (ret == -EEXIST)
  451. arch_remove_kprobe(p);
  452. if (ret && mod)
  453. module_put(mod);
  454. return ret;
  455. }
  456. void __kprobes unregister_kprobe(struct kprobe *p)
  457. {
  458. unsigned long flags;
  459. struct kprobe *old_p;
  460. struct module *mod;
  461. spin_lock_irqsave(&kprobe_lock, flags);
  462. old_p = get_kprobe(p->addr);
  463. if (old_p) {
  464. /* cleanup_*_kprobe() does the spin_unlock_irqrestore */
  465. if (old_p->pre_handler == aggr_pre_handler)
  466. cleanup_aggr_kprobe(old_p, p, flags);
  467. else
  468. cleanup_kprobe(p, flags);
  469. synchronize_sched();
  470. if ((mod = module_text_address((unsigned long)p->addr)))
  471. module_put(mod);
  472. if (old_p->pre_handler == aggr_pre_handler &&
  473. list_empty(&old_p->list))
  474. kfree(old_p);
  475. } else
  476. spin_unlock_irqrestore(&kprobe_lock, flags);
  477. }
  478. static struct notifier_block kprobe_exceptions_nb = {
  479. .notifier_call = kprobe_exceptions_notify,
  480. .priority = 0x7fffffff /* we need to notified first */
  481. };
  482. int __kprobes register_jprobe(struct jprobe *jp)
  483. {
  484. /* Todo: Verify probepoint is a function entry point */
  485. jp->kp.pre_handler = setjmp_pre_handler;
  486. jp->kp.break_handler = longjmp_break_handler;
  487. return register_kprobe(&jp->kp);
  488. }
  489. void __kprobes unregister_jprobe(struct jprobe *jp)
  490. {
  491. unregister_kprobe(&jp->kp);
  492. }
  493. #ifdef ARCH_SUPPORTS_KRETPROBES
  494. int __kprobes register_kretprobe(struct kretprobe *rp)
  495. {
  496. int ret = 0;
  497. struct kretprobe_instance *inst;
  498. int i;
  499. rp->kp.pre_handler = pre_handler_kretprobe;
  500. /* Pre-allocate memory for max kretprobe instances */
  501. if (rp->maxactive <= 0) {
  502. #ifdef CONFIG_PREEMPT
  503. rp->maxactive = max(10, 2 * NR_CPUS);
  504. #else
  505. rp->maxactive = NR_CPUS;
  506. #endif
  507. }
  508. INIT_HLIST_HEAD(&rp->used_instances);
  509. INIT_HLIST_HEAD(&rp->free_instances);
  510. for (i = 0; i < rp->maxactive; i++) {
  511. inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
  512. if (inst == NULL) {
  513. free_rp_inst(rp);
  514. return -ENOMEM;
  515. }
  516. INIT_HLIST_NODE(&inst->uflist);
  517. hlist_add_head(&inst->uflist, &rp->free_instances);
  518. }
  519. rp->nmissed = 0;
  520. /* Establish function entry probe point */
  521. if ((ret = register_kprobe(&rp->kp)) != 0)
  522. free_rp_inst(rp);
  523. return ret;
  524. }
  525. #else /* ARCH_SUPPORTS_KRETPROBES */
  526. int __kprobes register_kretprobe(struct kretprobe *rp)
  527. {
  528. return -ENOSYS;
  529. }
  530. #endif /* ARCH_SUPPORTS_KRETPROBES */
  531. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  532. {
  533. unsigned long flags;
  534. struct kretprobe_instance *ri;
  535. unregister_kprobe(&rp->kp);
  536. /* No race here */
  537. spin_lock_irqsave(&kretprobe_lock, flags);
  538. free_rp_inst(rp);
  539. while ((ri = get_used_rp_inst(rp)) != NULL) {
  540. ri->rp = NULL;
  541. hlist_del(&ri->uflist);
  542. }
  543. spin_unlock_irqrestore(&kretprobe_lock, flags);
  544. }
  545. static int __init init_kprobes(void)
  546. {
  547. int i, err = 0;
  548. /* FIXME allocate the probe table, currently defined statically */
  549. /* initialize all list heads */
  550. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  551. INIT_HLIST_HEAD(&kprobe_table[i]);
  552. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  553. }
  554. err = arch_init_kprobes();
  555. if (!err)
  556. err = register_die_notifier(&kprobe_exceptions_nb);
  557. return err;
  558. }
  559. __initcall(init_kprobes);
  560. EXPORT_SYMBOL_GPL(register_kprobe);
  561. EXPORT_SYMBOL_GPL(unregister_kprobe);
  562. EXPORT_SYMBOL_GPL(register_jprobe);
  563. EXPORT_SYMBOL_GPL(unregister_jprobe);
  564. EXPORT_SYMBOL_GPL(jprobe_return);
  565. EXPORT_SYMBOL_GPL(register_kretprobe);
  566. EXPORT_SYMBOL_GPL(unregister_kretprobe);