kprobes.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/module.h>
  39. #include <linux/moduleloader.h>
  40. #include <linux/kallsyms.h>
  41. #include <linux/freezer.h>
  42. #include <asm-generic/sections.h>
  43. #include <asm/cacheflush.h>
  44. #include <asm/errno.h>
  45. #include <asm/kdebug.h>
  46. #define KPROBE_HASH_BITS 6
  47. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  48. /*
  49. * Some oddball architectures like 64bit powerpc have function descriptors
  50. * so this must be overridable.
  51. */
  52. #ifndef kprobe_lookup_name
  53. #define kprobe_lookup_name(name, addr) \
  54. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  55. #endif
  56. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  57. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  58. static atomic_t kprobe_count;
  59. DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  60. DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
  61. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  62. static struct notifier_block kprobe_page_fault_nb = {
  63. .notifier_call = kprobe_exceptions_notify,
  64. .priority = 0x7fffffff /* we need to notified first */
  65. };
  66. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  67. /*
  68. * kprobe->ainsn.insn points to the copy of the instruction to be
  69. * single-stepped. x86_64, POWER4 and above have no-exec support and
  70. * stepping on the instruction on a vmalloced/kmalloced/data page
  71. * is a recipe for disaster
  72. */
  73. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  74. struct kprobe_insn_page {
  75. struct hlist_node hlist;
  76. kprobe_opcode_t *insns; /* Page of instruction slots */
  77. char slot_used[INSNS_PER_PAGE];
  78. int nused;
  79. int ngarbage;
  80. };
  81. enum kprobe_slot_state {
  82. SLOT_CLEAN = 0,
  83. SLOT_DIRTY = 1,
  84. SLOT_USED = 2,
  85. };
  86. static struct hlist_head kprobe_insn_pages;
  87. static int kprobe_garbage_slots;
  88. static int collect_garbage_slots(void);
  89. static int __kprobes check_safety(void)
  90. {
  91. int ret = 0;
  92. #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
  93. ret = freeze_processes();
  94. if (ret == 0) {
  95. struct task_struct *p, *q;
  96. do_each_thread(p, q) {
  97. if (p != current && p->state == TASK_RUNNING &&
  98. p->pid != 0) {
  99. printk("Check failed: %s is running\n",p->comm);
  100. ret = -1;
  101. goto loop_end;
  102. }
  103. } while_each_thread(p, q);
  104. }
  105. loop_end:
  106. thaw_processes();
  107. #else
  108. synchronize_sched();
  109. #endif
  110. return ret;
  111. }
  112. /**
  113. * get_insn_slot() - Find a slot on an executable page for an instruction.
  114. * We allocate an executable page if there's no room on existing ones.
  115. */
  116. kprobe_opcode_t __kprobes *get_insn_slot(void)
  117. {
  118. struct kprobe_insn_page *kip;
  119. struct hlist_node *pos;
  120. retry:
  121. hlist_for_each(pos, &kprobe_insn_pages) {
  122. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  123. if (kip->nused < INSNS_PER_PAGE) {
  124. int i;
  125. for (i = 0; i < INSNS_PER_PAGE; i++) {
  126. if (kip->slot_used[i] == SLOT_CLEAN) {
  127. kip->slot_used[i] = SLOT_USED;
  128. kip->nused++;
  129. return kip->insns + (i * MAX_INSN_SIZE);
  130. }
  131. }
  132. /* Surprise! No unused slots. Fix kip->nused. */
  133. kip->nused = INSNS_PER_PAGE;
  134. }
  135. }
  136. /* If there are any garbage slots, collect it and try again. */
  137. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  138. goto retry;
  139. }
  140. /* All out of space. Need to allocate a new page. Use slot 0. */
  141. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  142. if (!kip) {
  143. return NULL;
  144. }
  145. /*
  146. * Use module_alloc so this page is within +/- 2GB of where the
  147. * kernel image and loaded module images reside. This is required
  148. * so x86_64 can correctly handle the %rip-relative fixups.
  149. */
  150. kip->insns = module_alloc(PAGE_SIZE);
  151. if (!kip->insns) {
  152. kfree(kip);
  153. return NULL;
  154. }
  155. INIT_HLIST_NODE(&kip->hlist);
  156. hlist_add_head(&kip->hlist, &kprobe_insn_pages);
  157. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  158. kip->slot_used[0] = SLOT_USED;
  159. kip->nused = 1;
  160. kip->ngarbage = 0;
  161. return kip->insns;
  162. }
  163. /* Return 1 if all garbages are collected, otherwise 0. */
  164. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  165. {
  166. kip->slot_used[idx] = SLOT_CLEAN;
  167. kip->nused--;
  168. if (kip->nused == 0) {
  169. /*
  170. * Page is no longer in use. Free it unless
  171. * it's the last one. We keep the last one
  172. * so as not to have to set it up again the
  173. * next time somebody inserts a probe.
  174. */
  175. hlist_del(&kip->hlist);
  176. if (hlist_empty(&kprobe_insn_pages)) {
  177. INIT_HLIST_NODE(&kip->hlist);
  178. hlist_add_head(&kip->hlist,
  179. &kprobe_insn_pages);
  180. } else {
  181. module_free(NULL, kip->insns);
  182. kfree(kip);
  183. }
  184. return 1;
  185. }
  186. return 0;
  187. }
  188. static int __kprobes collect_garbage_slots(void)
  189. {
  190. struct kprobe_insn_page *kip;
  191. struct hlist_node *pos, *next;
  192. /* Ensure no-one is preepmted on the garbages */
  193. if (check_safety() != 0)
  194. return -EAGAIN;
  195. hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
  196. int i;
  197. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  198. if (kip->ngarbage == 0)
  199. continue;
  200. kip->ngarbage = 0; /* we will collect all garbages */
  201. for (i = 0; i < INSNS_PER_PAGE; i++) {
  202. if (kip->slot_used[i] == SLOT_DIRTY &&
  203. collect_one_slot(kip, i))
  204. break;
  205. }
  206. }
  207. kprobe_garbage_slots = 0;
  208. return 0;
  209. }
  210. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  211. {
  212. struct kprobe_insn_page *kip;
  213. struct hlist_node *pos;
  214. hlist_for_each(pos, &kprobe_insn_pages) {
  215. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  216. if (kip->insns <= slot &&
  217. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  218. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  219. if (dirty) {
  220. kip->slot_used[i] = SLOT_DIRTY;
  221. kip->ngarbage++;
  222. } else {
  223. collect_one_slot(kip, i);
  224. }
  225. break;
  226. }
  227. }
  228. if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
  229. collect_garbage_slots();
  230. }
  231. }
  232. #endif
  233. /* We have preemption disabled.. so it is safe to use __ versions */
  234. static inline void set_kprobe_instance(struct kprobe *kp)
  235. {
  236. __get_cpu_var(kprobe_instance) = kp;
  237. }
  238. static inline void reset_kprobe_instance(void)
  239. {
  240. __get_cpu_var(kprobe_instance) = NULL;
  241. }
  242. /*
  243. * This routine is called either:
  244. * - under the kprobe_mutex - during kprobe_[un]register()
  245. * OR
  246. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  247. */
  248. struct kprobe __kprobes *get_kprobe(void *addr)
  249. {
  250. struct hlist_head *head;
  251. struct hlist_node *node;
  252. struct kprobe *p;
  253. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  254. hlist_for_each_entry_rcu(p, node, head, hlist) {
  255. if (p->addr == addr)
  256. return p;
  257. }
  258. return NULL;
  259. }
  260. /*
  261. * Aggregate handlers for multiple kprobes support - these handlers
  262. * take care of invoking the individual kprobe handlers on p->list
  263. */
  264. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  265. {
  266. struct kprobe *kp;
  267. list_for_each_entry_rcu(kp, &p->list, list) {
  268. if (kp->pre_handler) {
  269. set_kprobe_instance(kp);
  270. if (kp->pre_handler(kp, regs))
  271. return 1;
  272. }
  273. reset_kprobe_instance();
  274. }
  275. return 0;
  276. }
  277. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  278. unsigned long flags)
  279. {
  280. struct kprobe *kp;
  281. list_for_each_entry_rcu(kp, &p->list, list) {
  282. if (kp->post_handler) {
  283. set_kprobe_instance(kp);
  284. kp->post_handler(kp, regs, flags);
  285. reset_kprobe_instance();
  286. }
  287. }
  288. return;
  289. }
  290. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  291. int trapnr)
  292. {
  293. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  294. /*
  295. * if we faulted "during" the execution of a user specified
  296. * probe handler, invoke just that probe's fault handler
  297. */
  298. if (cur && cur->fault_handler) {
  299. if (cur->fault_handler(cur, regs, trapnr))
  300. return 1;
  301. }
  302. return 0;
  303. }
  304. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  305. {
  306. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  307. int ret = 0;
  308. if (cur && cur->break_handler) {
  309. if (cur->break_handler(cur, regs))
  310. ret = 1;
  311. }
  312. reset_kprobe_instance();
  313. return ret;
  314. }
  315. /* Walks the list and increments nmissed count for multiprobe case */
  316. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  317. {
  318. struct kprobe *kp;
  319. if (p->pre_handler != aggr_pre_handler) {
  320. p->nmissed++;
  321. } else {
  322. list_for_each_entry_rcu(kp, &p->list, list)
  323. kp->nmissed++;
  324. }
  325. return;
  326. }
  327. /* Called with kretprobe_lock held */
  328. struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
  329. {
  330. struct hlist_node *node;
  331. struct kretprobe_instance *ri;
  332. hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
  333. return ri;
  334. return NULL;
  335. }
  336. /* Called with kretprobe_lock held */
  337. static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
  338. *rp)
  339. {
  340. struct hlist_node *node;
  341. struct kretprobe_instance *ri;
  342. hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
  343. return ri;
  344. return NULL;
  345. }
  346. /* Called with kretprobe_lock held */
  347. void __kprobes add_rp_inst(struct kretprobe_instance *ri)
  348. {
  349. /*
  350. * Remove rp inst off the free list -
  351. * Add it back when probed function returns
  352. */
  353. hlist_del(&ri->uflist);
  354. /* Add rp inst onto table */
  355. INIT_HLIST_NODE(&ri->hlist);
  356. hlist_add_head(&ri->hlist,
  357. &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
  358. /* Also add this rp inst to the used list. */
  359. INIT_HLIST_NODE(&ri->uflist);
  360. hlist_add_head(&ri->uflist, &ri->rp->used_instances);
  361. }
  362. /* Called with kretprobe_lock held */
  363. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  364. struct hlist_head *head)
  365. {
  366. /* remove rp inst off the rprobe_inst_table */
  367. hlist_del(&ri->hlist);
  368. if (ri->rp) {
  369. /* remove rp inst off the used list */
  370. hlist_del(&ri->uflist);
  371. /* put rp inst back onto the free list */
  372. INIT_HLIST_NODE(&ri->uflist);
  373. hlist_add_head(&ri->uflist, &ri->rp->free_instances);
  374. } else
  375. /* Unregistering */
  376. hlist_add_head(&ri->hlist, head);
  377. }
  378. struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
  379. {
  380. return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  381. }
  382. /*
  383. * This function is called from finish_task_switch when task tk becomes dead,
  384. * so that we can recycle any function-return probe instances associated
  385. * with this task. These left over instances represent probed functions
  386. * that have been called but will never return.
  387. */
  388. void __kprobes kprobe_flush_task(struct task_struct *tk)
  389. {
  390. struct kretprobe_instance *ri;
  391. struct hlist_head *head, empty_rp;
  392. struct hlist_node *node, *tmp;
  393. unsigned long flags = 0;
  394. INIT_HLIST_HEAD(&empty_rp);
  395. spin_lock_irqsave(&kretprobe_lock, flags);
  396. head = kretprobe_inst_table_head(tk);
  397. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  398. if (ri->task == tk)
  399. recycle_rp_inst(ri, &empty_rp);
  400. }
  401. spin_unlock_irqrestore(&kretprobe_lock, flags);
  402. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  403. hlist_del(&ri->hlist);
  404. kfree(ri);
  405. }
  406. }
  407. static inline void free_rp_inst(struct kretprobe *rp)
  408. {
  409. struct kretprobe_instance *ri;
  410. while ((ri = get_free_rp_inst(rp)) != NULL) {
  411. hlist_del(&ri->uflist);
  412. kfree(ri);
  413. }
  414. }
  415. /*
  416. * Keep all fields in the kprobe consistent
  417. */
  418. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  419. {
  420. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  421. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  422. }
  423. /*
  424. * Add the new probe to old_p->list. Fail if this is the
  425. * second jprobe at the address - two jprobes can't coexist
  426. */
  427. static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  428. {
  429. if (p->break_handler) {
  430. if (old_p->break_handler)
  431. return -EEXIST;
  432. list_add_tail_rcu(&p->list, &old_p->list);
  433. old_p->break_handler = aggr_break_handler;
  434. } else
  435. list_add_rcu(&p->list, &old_p->list);
  436. if (p->post_handler && !old_p->post_handler)
  437. old_p->post_handler = aggr_post_handler;
  438. return 0;
  439. }
  440. /*
  441. * Fill in the required fields of the "manager kprobe". Replace the
  442. * earlier kprobe in the hlist with the manager kprobe
  443. */
  444. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  445. {
  446. copy_kprobe(p, ap);
  447. flush_insn_slot(ap);
  448. ap->addr = p->addr;
  449. ap->pre_handler = aggr_pre_handler;
  450. ap->fault_handler = aggr_fault_handler;
  451. if (p->post_handler)
  452. ap->post_handler = aggr_post_handler;
  453. if (p->break_handler)
  454. ap->break_handler = aggr_break_handler;
  455. INIT_LIST_HEAD(&ap->list);
  456. list_add_rcu(&p->list, &ap->list);
  457. hlist_replace_rcu(&p->hlist, &ap->hlist);
  458. }
  459. /*
  460. * This is the second or subsequent kprobe at the address - handle
  461. * the intricacies
  462. */
  463. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  464. struct kprobe *p)
  465. {
  466. int ret = 0;
  467. struct kprobe *ap;
  468. if (old_p->pre_handler == aggr_pre_handler) {
  469. copy_kprobe(old_p, p);
  470. ret = add_new_kprobe(old_p, p);
  471. } else {
  472. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  473. if (!ap)
  474. return -ENOMEM;
  475. add_aggr_kprobe(ap, old_p);
  476. copy_kprobe(ap, p);
  477. ret = add_new_kprobe(ap, p);
  478. }
  479. return ret;
  480. }
  481. static int __kprobes in_kprobes_functions(unsigned long addr)
  482. {
  483. if (addr >= (unsigned long)__kprobes_text_start
  484. && addr < (unsigned long)__kprobes_text_end)
  485. return -EINVAL;
  486. return 0;
  487. }
  488. static int __kprobes __register_kprobe(struct kprobe *p,
  489. unsigned long called_from)
  490. {
  491. int ret = 0;
  492. struct kprobe *old_p;
  493. struct module *probed_mod;
  494. /*
  495. * If we have a symbol_name argument look it up,
  496. * and add it to the address. That way the addr
  497. * field can either be global or relative to a symbol.
  498. */
  499. if (p->symbol_name) {
  500. if (p->addr)
  501. return -EINVAL;
  502. kprobe_lookup_name(p->symbol_name, p->addr);
  503. }
  504. if (!p->addr)
  505. return -EINVAL;
  506. p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
  507. if ((!kernel_text_address((unsigned long) p->addr)) ||
  508. in_kprobes_functions((unsigned long) p->addr))
  509. return -EINVAL;
  510. p->mod_refcounted = 0;
  511. /* Check are we probing a module */
  512. if ((probed_mod = module_text_address((unsigned long) p->addr))) {
  513. struct module *calling_mod = module_text_address(called_from);
  514. /* We must allow modules to probe themself and
  515. * in this case avoid incrementing the module refcount,
  516. * so as to allow unloading of self probing modules.
  517. */
  518. if (calling_mod && (calling_mod != probed_mod)) {
  519. if (unlikely(!try_module_get(probed_mod)))
  520. return -EINVAL;
  521. p->mod_refcounted = 1;
  522. } else
  523. probed_mod = NULL;
  524. }
  525. p->nmissed = 0;
  526. mutex_lock(&kprobe_mutex);
  527. old_p = get_kprobe(p->addr);
  528. if (old_p) {
  529. ret = register_aggr_kprobe(old_p, p);
  530. if (!ret)
  531. atomic_inc(&kprobe_count);
  532. goto out;
  533. }
  534. if ((ret = arch_prepare_kprobe(p)) != 0)
  535. goto out;
  536. INIT_HLIST_NODE(&p->hlist);
  537. hlist_add_head_rcu(&p->hlist,
  538. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  539. if (atomic_add_return(1, &kprobe_count) == \
  540. (ARCH_INACTIVE_KPROBE_COUNT + 1))
  541. register_page_fault_notifier(&kprobe_page_fault_nb);
  542. arch_arm_kprobe(p);
  543. out:
  544. mutex_unlock(&kprobe_mutex);
  545. if (ret && probed_mod)
  546. module_put(probed_mod);
  547. return ret;
  548. }
  549. int __kprobes register_kprobe(struct kprobe *p)
  550. {
  551. return __register_kprobe(p,
  552. (unsigned long)__builtin_return_address(0));
  553. }
  554. void __kprobes unregister_kprobe(struct kprobe *p)
  555. {
  556. struct module *mod;
  557. struct kprobe *old_p, *list_p;
  558. int cleanup_p;
  559. mutex_lock(&kprobe_mutex);
  560. old_p = get_kprobe(p->addr);
  561. if (unlikely(!old_p)) {
  562. mutex_unlock(&kprobe_mutex);
  563. return;
  564. }
  565. if (p != old_p) {
  566. list_for_each_entry_rcu(list_p, &old_p->list, list)
  567. if (list_p == p)
  568. /* kprobe p is a valid probe */
  569. goto valid_p;
  570. mutex_unlock(&kprobe_mutex);
  571. return;
  572. }
  573. valid_p:
  574. if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
  575. (p->list.next == &old_p->list) &&
  576. (p->list.prev == &old_p->list))) {
  577. /* Only probe on the hash list */
  578. arch_disarm_kprobe(p);
  579. hlist_del_rcu(&old_p->hlist);
  580. cleanup_p = 1;
  581. } else {
  582. list_del_rcu(&p->list);
  583. cleanup_p = 0;
  584. }
  585. mutex_unlock(&kprobe_mutex);
  586. synchronize_sched();
  587. if (p->mod_refcounted &&
  588. (mod = module_text_address((unsigned long)p->addr)))
  589. module_put(mod);
  590. if (cleanup_p) {
  591. if (p != old_p) {
  592. list_del_rcu(&p->list);
  593. kfree(old_p);
  594. }
  595. arch_remove_kprobe(p);
  596. } else {
  597. mutex_lock(&kprobe_mutex);
  598. if (p->break_handler)
  599. old_p->break_handler = NULL;
  600. if (p->post_handler){
  601. list_for_each_entry_rcu(list_p, &old_p->list, list){
  602. if (list_p->post_handler){
  603. cleanup_p = 2;
  604. break;
  605. }
  606. }
  607. if (cleanup_p == 0)
  608. old_p->post_handler = NULL;
  609. }
  610. mutex_unlock(&kprobe_mutex);
  611. }
  612. /* Call unregister_page_fault_notifier()
  613. * if no probes are active
  614. */
  615. mutex_lock(&kprobe_mutex);
  616. if (atomic_add_return(-1, &kprobe_count) == \
  617. ARCH_INACTIVE_KPROBE_COUNT)
  618. unregister_page_fault_notifier(&kprobe_page_fault_nb);
  619. mutex_unlock(&kprobe_mutex);
  620. return;
  621. }
  622. static struct notifier_block kprobe_exceptions_nb = {
  623. .notifier_call = kprobe_exceptions_notify,
  624. .priority = 0x7fffffff /* we need to be notified first */
  625. };
  626. int __kprobes register_jprobe(struct jprobe *jp)
  627. {
  628. /* Todo: Verify probepoint is a function entry point */
  629. jp->kp.pre_handler = setjmp_pre_handler;
  630. jp->kp.break_handler = longjmp_break_handler;
  631. return __register_kprobe(&jp->kp,
  632. (unsigned long)__builtin_return_address(0));
  633. }
  634. void __kprobes unregister_jprobe(struct jprobe *jp)
  635. {
  636. unregister_kprobe(&jp->kp);
  637. }
  638. #ifdef ARCH_SUPPORTS_KRETPROBES
  639. /*
  640. * This kprobe pre_handler is registered with every kretprobe. When probe
  641. * hits it will set up the return probe.
  642. */
  643. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  644. struct pt_regs *regs)
  645. {
  646. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  647. unsigned long flags = 0;
  648. /*TODO: consider to only swap the RA after the last pre_handler fired */
  649. spin_lock_irqsave(&kretprobe_lock, flags);
  650. arch_prepare_kretprobe(rp, regs);
  651. spin_unlock_irqrestore(&kretprobe_lock, flags);
  652. return 0;
  653. }
  654. int __kprobes register_kretprobe(struct kretprobe *rp)
  655. {
  656. int ret = 0;
  657. struct kretprobe_instance *inst;
  658. int i;
  659. rp->kp.pre_handler = pre_handler_kretprobe;
  660. rp->kp.post_handler = NULL;
  661. rp->kp.fault_handler = NULL;
  662. rp->kp.break_handler = NULL;
  663. /* Pre-allocate memory for max kretprobe instances */
  664. if (rp->maxactive <= 0) {
  665. #ifdef CONFIG_PREEMPT
  666. rp->maxactive = max(10, 2 * NR_CPUS);
  667. #else
  668. rp->maxactive = NR_CPUS;
  669. #endif
  670. }
  671. INIT_HLIST_HEAD(&rp->used_instances);
  672. INIT_HLIST_HEAD(&rp->free_instances);
  673. for (i = 0; i < rp->maxactive; i++) {
  674. inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
  675. if (inst == NULL) {
  676. free_rp_inst(rp);
  677. return -ENOMEM;
  678. }
  679. INIT_HLIST_NODE(&inst->uflist);
  680. hlist_add_head(&inst->uflist, &rp->free_instances);
  681. }
  682. rp->nmissed = 0;
  683. /* Establish function entry probe point */
  684. if ((ret = __register_kprobe(&rp->kp,
  685. (unsigned long)__builtin_return_address(0))) != 0)
  686. free_rp_inst(rp);
  687. return ret;
  688. }
  689. #else /* ARCH_SUPPORTS_KRETPROBES */
  690. int __kprobes register_kretprobe(struct kretprobe *rp)
  691. {
  692. return -ENOSYS;
  693. }
  694. #endif /* ARCH_SUPPORTS_KRETPROBES */
  695. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  696. {
  697. unsigned long flags;
  698. struct kretprobe_instance *ri;
  699. unregister_kprobe(&rp->kp);
  700. /* No race here */
  701. spin_lock_irqsave(&kretprobe_lock, flags);
  702. while ((ri = get_used_rp_inst(rp)) != NULL) {
  703. ri->rp = NULL;
  704. hlist_del(&ri->uflist);
  705. }
  706. spin_unlock_irqrestore(&kretprobe_lock, flags);
  707. free_rp_inst(rp);
  708. }
  709. static int __init init_kprobes(void)
  710. {
  711. int i, err = 0;
  712. /* FIXME allocate the probe table, currently defined statically */
  713. /* initialize all list heads */
  714. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  715. INIT_HLIST_HEAD(&kprobe_table[i]);
  716. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  717. }
  718. atomic_set(&kprobe_count, 0);
  719. err = arch_init_kprobes();
  720. if (!err)
  721. err = register_die_notifier(&kprobe_exceptions_nb);
  722. return err;
  723. }
  724. __initcall(init_kprobes);
  725. EXPORT_SYMBOL_GPL(register_kprobe);
  726. EXPORT_SYMBOL_GPL(unregister_kprobe);
  727. EXPORT_SYMBOL_GPL(register_jprobe);
  728. EXPORT_SYMBOL_GPL(unregister_jprobe);
  729. EXPORT_SYMBOL_GPL(jprobe_return);
  730. EXPORT_SYMBOL_GPL(register_kretprobe);
  731. EXPORT_SYMBOL_GPL(unregister_kretprobe);