kprobes.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <asm-generic/sections.h>
  46. #include <asm/cacheflush.h>
  47. #include <asm/errno.h>
  48. #include <asm/kdebug.h>
  49. #define KPROBE_HASH_BITS 6
  50. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  51. /*
  52. * Some oddball architectures like 64bit powerpc have function descriptors
  53. * so this must be overridable.
  54. */
  55. #ifndef kprobe_lookup_name
  56. #define kprobe_lookup_name(name, addr) \
  57. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  58. #endif
  59. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  60. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  61. static atomic_t kprobe_count;
  62. DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  63. DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
  64. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  65. static struct notifier_block kprobe_page_fault_nb = {
  66. .notifier_call = kprobe_exceptions_notify,
  67. .priority = 0x7fffffff /* we need to notified first */
  68. };
  69. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  70. /*
  71. * kprobe->ainsn.insn points to the copy of the instruction to be
  72. * single-stepped. x86_64, POWER4 and above have no-exec support and
  73. * stepping on the instruction on a vmalloced/kmalloced/data page
  74. * is a recipe for disaster
  75. */
  76. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  77. struct kprobe_insn_page {
  78. struct hlist_node hlist;
  79. kprobe_opcode_t *insns; /* Page of instruction slots */
  80. char slot_used[INSNS_PER_PAGE];
  81. int nused;
  82. int ngarbage;
  83. };
  84. enum kprobe_slot_state {
  85. SLOT_CLEAN = 0,
  86. SLOT_DIRTY = 1,
  87. SLOT_USED = 2,
  88. };
  89. static struct hlist_head kprobe_insn_pages;
  90. static int kprobe_garbage_slots;
  91. static int collect_garbage_slots(void);
  92. static int __kprobes check_safety(void)
  93. {
  94. int ret = 0;
  95. #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
  96. ret = freeze_processes();
  97. if (ret == 0) {
  98. struct task_struct *p, *q;
  99. do_each_thread(p, q) {
  100. if (p != current && p->state == TASK_RUNNING &&
  101. p->pid != 0) {
  102. printk("Check failed: %s is running\n",p->comm);
  103. ret = -1;
  104. goto loop_end;
  105. }
  106. } while_each_thread(p, q);
  107. }
  108. loop_end:
  109. thaw_processes();
  110. #else
  111. synchronize_sched();
  112. #endif
  113. return ret;
  114. }
  115. /**
  116. * get_insn_slot() - Find a slot on an executable page for an instruction.
  117. * We allocate an executable page if there's no room on existing ones.
  118. */
  119. kprobe_opcode_t __kprobes *get_insn_slot(void)
  120. {
  121. struct kprobe_insn_page *kip;
  122. struct hlist_node *pos;
  123. retry:
  124. hlist_for_each(pos, &kprobe_insn_pages) {
  125. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  126. if (kip->nused < INSNS_PER_PAGE) {
  127. int i;
  128. for (i = 0; i < INSNS_PER_PAGE; i++) {
  129. if (kip->slot_used[i] == SLOT_CLEAN) {
  130. kip->slot_used[i] = SLOT_USED;
  131. kip->nused++;
  132. return kip->insns + (i * MAX_INSN_SIZE);
  133. }
  134. }
  135. /* Surprise! No unused slots. Fix kip->nused. */
  136. kip->nused = INSNS_PER_PAGE;
  137. }
  138. }
  139. /* If there are any garbage slots, collect it and try again. */
  140. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  141. goto retry;
  142. }
  143. /* All out of space. Need to allocate a new page. Use slot 0. */
  144. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  145. if (!kip) {
  146. return NULL;
  147. }
  148. /*
  149. * Use module_alloc so this page is within +/- 2GB of where the
  150. * kernel image and loaded module images reside. This is required
  151. * so x86_64 can correctly handle the %rip-relative fixups.
  152. */
  153. kip->insns = module_alloc(PAGE_SIZE);
  154. if (!kip->insns) {
  155. kfree(kip);
  156. return NULL;
  157. }
  158. INIT_HLIST_NODE(&kip->hlist);
  159. hlist_add_head(&kip->hlist, &kprobe_insn_pages);
  160. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  161. kip->slot_used[0] = SLOT_USED;
  162. kip->nused = 1;
  163. kip->ngarbage = 0;
  164. return kip->insns;
  165. }
  166. /* Return 1 if all garbages are collected, otherwise 0. */
  167. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  168. {
  169. kip->slot_used[idx] = SLOT_CLEAN;
  170. kip->nused--;
  171. if (kip->nused == 0) {
  172. /*
  173. * Page is no longer in use. Free it unless
  174. * it's the last one. We keep the last one
  175. * so as not to have to set it up again the
  176. * next time somebody inserts a probe.
  177. */
  178. hlist_del(&kip->hlist);
  179. if (hlist_empty(&kprobe_insn_pages)) {
  180. INIT_HLIST_NODE(&kip->hlist);
  181. hlist_add_head(&kip->hlist,
  182. &kprobe_insn_pages);
  183. } else {
  184. module_free(NULL, kip->insns);
  185. kfree(kip);
  186. }
  187. return 1;
  188. }
  189. return 0;
  190. }
  191. static int __kprobes collect_garbage_slots(void)
  192. {
  193. struct kprobe_insn_page *kip;
  194. struct hlist_node *pos, *next;
  195. /* Ensure no-one is preepmted on the garbages */
  196. if (check_safety() != 0)
  197. return -EAGAIN;
  198. hlist_for_each_safe(pos, next, &kprobe_insn_pages) {
  199. int i;
  200. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  201. if (kip->ngarbage == 0)
  202. continue;
  203. kip->ngarbage = 0; /* we will collect all garbages */
  204. for (i = 0; i < INSNS_PER_PAGE; i++) {
  205. if (kip->slot_used[i] == SLOT_DIRTY &&
  206. collect_one_slot(kip, i))
  207. break;
  208. }
  209. }
  210. kprobe_garbage_slots = 0;
  211. return 0;
  212. }
  213. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  214. {
  215. struct kprobe_insn_page *kip;
  216. struct hlist_node *pos;
  217. hlist_for_each(pos, &kprobe_insn_pages) {
  218. kip = hlist_entry(pos, struct kprobe_insn_page, hlist);
  219. if (kip->insns <= slot &&
  220. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  221. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  222. if (dirty) {
  223. kip->slot_used[i] = SLOT_DIRTY;
  224. kip->ngarbage++;
  225. } else {
  226. collect_one_slot(kip, i);
  227. }
  228. break;
  229. }
  230. }
  231. if (dirty && (++kprobe_garbage_slots > INSNS_PER_PAGE)) {
  232. collect_garbage_slots();
  233. }
  234. }
  235. #endif
  236. /* We have preemption disabled.. so it is safe to use __ versions */
  237. static inline void set_kprobe_instance(struct kprobe *kp)
  238. {
  239. __get_cpu_var(kprobe_instance) = kp;
  240. }
  241. static inline void reset_kprobe_instance(void)
  242. {
  243. __get_cpu_var(kprobe_instance) = NULL;
  244. }
  245. /*
  246. * This routine is called either:
  247. * - under the kprobe_mutex - during kprobe_[un]register()
  248. * OR
  249. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  250. */
  251. struct kprobe __kprobes *get_kprobe(void *addr)
  252. {
  253. struct hlist_head *head;
  254. struct hlist_node *node;
  255. struct kprobe *p;
  256. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  257. hlist_for_each_entry_rcu(p, node, head, hlist) {
  258. if (p->addr == addr)
  259. return p;
  260. }
  261. return NULL;
  262. }
  263. /*
  264. * Aggregate handlers for multiple kprobes support - these handlers
  265. * take care of invoking the individual kprobe handlers on p->list
  266. */
  267. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  268. {
  269. struct kprobe *kp;
  270. list_for_each_entry_rcu(kp, &p->list, list) {
  271. if (kp->pre_handler) {
  272. set_kprobe_instance(kp);
  273. if (kp->pre_handler(kp, regs))
  274. return 1;
  275. }
  276. reset_kprobe_instance();
  277. }
  278. return 0;
  279. }
  280. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  281. unsigned long flags)
  282. {
  283. struct kprobe *kp;
  284. list_for_each_entry_rcu(kp, &p->list, list) {
  285. if (kp->post_handler) {
  286. set_kprobe_instance(kp);
  287. kp->post_handler(kp, regs, flags);
  288. reset_kprobe_instance();
  289. }
  290. }
  291. return;
  292. }
  293. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  294. int trapnr)
  295. {
  296. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  297. /*
  298. * if we faulted "during" the execution of a user specified
  299. * probe handler, invoke just that probe's fault handler
  300. */
  301. if (cur && cur->fault_handler) {
  302. if (cur->fault_handler(cur, regs, trapnr))
  303. return 1;
  304. }
  305. return 0;
  306. }
  307. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  308. {
  309. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  310. int ret = 0;
  311. if (cur && cur->break_handler) {
  312. if (cur->break_handler(cur, regs))
  313. ret = 1;
  314. }
  315. reset_kprobe_instance();
  316. return ret;
  317. }
  318. /* Walks the list and increments nmissed count for multiprobe case */
  319. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  320. {
  321. struct kprobe *kp;
  322. if (p->pre_handler != aggr_pre_handler) {
  323. p->nmissed++;
  324. } else {
  325. list_for_each_entry_rcu(kp, &p->list, list)
  326. kp->nmissed++;
  327. }
  328. return;
  329. }
  330. /* Called with kretprobe_lock held */
  331. struct kretprobe_instance __kprobes *get_free_rp_inst(struct kretprobe *rp)
  332. {
  333. struct hlist_node *node;
  334. struct kretprobe_instance *ri;
  335. hlist_for_each_entry(ri, node, &rp->free_instances, uflist)
  336. return ri;
  337. return NULL;
  338. }
  339. /* Called with kretprobe_lock held */
  340. static struct kretprobe_instance __kprobes *get_used_rp_inst(struct kretprobe
  341. *rp)
  342. {
  343. struct hlist_node *node;
  344. struct kretprobe_instance *ri;
  345. hlist_for_each_entry(ri, node, &rp->used_instances, uflist)
  346. return ri;
  347. return NULL;
  348. }
  349. /* Called with kretprobe_lock held */
  350. void __kprobes add_rp_inst(struct kretprobe_instance *ri)
  351. {
  352. /*
  353. * Remove rp inst off the free list -
  354. * Add it back when probed function returns
  355. */
  356. hlist_del(&ri->uflist);
  357. /* Add rp inst onto table */
  358. INIT_HLIST_NODE(&ri->hlist);
  359. hlist_add_head(&ri->hlist,
  360. &kretprobe_inst_table[hash_ptr(ri->task, KPROBE_HASH_BITS)]);
  361. /* Also add this rp inst to the used list. */
  362. INIT_HLIST_NODE(&ri->uflist);
  363. hlist_add_head(&ri->uflist, &ri->rp->used_instances);
  364. }
  365. /* Called with kretprobe_lock held */
  366. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  367. struct hlist_head *head)
  368. {
  369. /* remove rp inst off the rprobe_inst_table */
  370. hlist_del(&ri->hlist);
  371. if (ri->rp) {
  372. /* remove rp inst off the used list */
  373. hlist_del(&ri->uflist);
  374. /* put rp inst back onto the free list */
  375. INIT_HLIST_NODE(&ri->uflist);
  376. hlist_add_head(&ri->uflist, &ri->rp->free_instances);
  377. } else
  378. /* Unregistering */
  379. hlist_add_head(&ri->hlist, head);
  380. }
  381. struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
  382. {
  383. return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  384. }
  385. /*
  386. * This function is called from finish_task_switch when task tk becomes dead,
  387. * so that we can recycle any function-return probe instances associated
  388. * with this task. These left over instances represent probed functions
  389. * that have been called but will never return.
  390. */
  391. void __kprobes kprobe_flush_task(struct task_struct *tk)
  392. {
  393. struct kretprobe_instance *ri;
  394. struct hlist_head *head, empty_rp;
  395. struct hlist_node *node, *tmp;
  396. unsigned long flags = 0;
  397. INIT_HLIST_HEAD(&empty_rp);
  398. spin_lock_irqsave(&kretprobe_lock, flags);
  399. head = kretprobe_inst_table_head(tk);
  400. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  401. if (ri->task == tk)
  402. recycle_rp_inst(ri, &empty_rp);
  403. }
  404. spin_unlock_irqrestore(&kretprobe_lock, flags);
  405. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  406. hlist_del(&ri->hlist);
  407. kfree(ri);
  408. }
  409. }
  410. static inline void free_rp_inst(struct kretprobe *rp)
  411. {
  412. struct kretprobe_instance *ri;
  413. while ((ri = get_free_rp_inst(rp)) != NULL) {
  414. hlist_del(&ri->uflist);
  415. kfree(ri);
  416. }
  417. }
  418. /*
  419. * Keep all fields in the kprobe consistent
  420. */
  421. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  422. {
  423. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  424. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  425. }
  426. /*
  427. * Add the new probe to old_p->list. Fail if this is the
  428. * second jprobe at the address - two jprobes can't coexist
  429. */
  430. static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  431. {
  432. if (p->break_handler) {
  433. if (old_p->break_handler)
  434. return -EEXIST;
  435. list_add_tail_rcu(&p->list, &old_p->list);
  436. old_p->break_handler = aggr_break_handler;
  437. } else
  438. list_add_rcu(&p->list, &old_p->list);
  439. if (p->post_handler && !old_p->post_handler)
  440. old_p->post_handler = aggr_post_handler;
  441. return 0;
  442. }
  443. /*
  444. * Fill in the required fields of the "manager kprobe". Replace the
  445. * earlier kprobe in the hlist with the manager kprobe
  446. */
  447. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  448. {
  449. copy_kprobe(p, ap);
  450. flush_insn_slot(ap);
  451. ap->addr = p->addr;
  452. ap->pre_handler = aggr_pre_handler;
  453. ap->fault_handler = aggr_fault_handler;
  454. if (p->post_handler)
  455. ap->post_handler = aggr_post_handler;
  456. if (p->break_handler)
  457. ap->break_handler = aggr_break_handler;
  458. INIT_LIST_HEAD(&ap->list);
  459. list_add_rcu(&p->list, &ap->list);
  460. hlist_replace_rcu(&p->hlist, &ap->hlist);
  461. }
  462. /*
  463. * This is the second or subsequent kprobe at the address - handle
  464. * the intricacies
  465. */
  466. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  467. struct kprobe *p)
  468. {
  469. int ret = 0;
  470. struct kprobe *ap;
  471. if (old_p->pre_handler == aggr_pre_handler) {
  472. copy_kprobe(old_p, p);
  473. ret = add_new_kprobe(old_p, p);
  474. } else {
  475. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  476. if (!ap)
  477. return -ENOMEM;
  478. add_aggr_kprobe(ap, old_p);
  479. copy_kprobe(ap, p);
  480. ret = add_new_kprobe(ap, p);
  481. }
  482. return ret;
  483. }
  484. static int __kprobes in_kprobes_functions(unsigned long addr)
  485. {
  486. if (addr >= (unsigned long)__kprobes_text_start
  487. && addr < (unsigned long)__kprobes_text_end)
  488. return -EINVAL;
  489. return 0;
  490. }
  491. static int __kprobes __register_kprobe(struct kprobe *p,
  492. unsigned long called_from)
  493. {
  494. int ret = 0;
  495. struct kprobe *old_p;
  496. struct module *probed_mod;
  497. /*
  498. * If we have a symbol_name argument look it up,
  499. * and add it to the address. That way the addr
  500. * field can either be global or relative to a symbol.
  501. */
  502. if (p->symbol_name) {
  503. if (p->addr)
  504. return -EINVAL;
  505. kprobe_lookup_name(p->symbol_name, p->addr);
  506. }
  507. if (!p->addr)
  508. return -EINVAL;
  509. p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
  510. if ((!kernel_text_address((unsigned long) p->addr)) ||
  511. in_kprobes_functions((unsigned long) p->addr))
  512. return -EINVAL;
  513. p->mod_refcounted = 0;
  514. /* Check are we probing a module */
  515. if ((probed_mod = module_text_address((unsigned long) p->addr))) {
  516. struct module *calling_mod = module_text_address(called_from);
  517. /* We must allow modules to probe themself and
  518. * in this case avoid incrementing the module refcount,
  519. * so as to allow unloading of self probing modules.
  520. */
  521. if (calling_mod && (calling_mod != probed_mod)) {
  522. if (unlikely(!try_module_get(probed_mod)))
  523. return -EINVAL;
  524. p->mod_refcounted = 1;
  525. } else
  526. probed_mod = NULL;
  527. }
  528. p->nmissed = 0;
  529. mutex_lock(&kprobe_mutex);
  530. old_p = get_kprobe(p->addr);
  531. if (old_p) {
  532. ret = register_aggr_kprobe(old_p, p);
  533. if (!ret)
  534. atomic_inc(&kprobe_count);
  535. goto out;
  536. }
  537. if ((ret = arch_prepare_kprobe(p)) != 0)
  538. goto out;
  539. INIT_HLIST_NODE(&p->hlist);
  540. hlist_add_head_rcu(&p->hlist,
  541. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  542. if (atomic_add_return(1, &kprobe_count) == \
  543. (ARCH_INACTIVE_KPROBE_COUNT + 1))
  544. register_page_fault_notifier(&kprobe_page_fault_nb);
  545. arch_arm_kprobe(p);
  546. out:
  547. mutex_unlock(&kprobe_mutex);
  548. if (ret && probed_mod)
  549. module_put(probed_mod);
  550. return ret;
  551. }
  552. int __kprobes register_kprobe(struct kprobe *p)
  553. {
  554. return __register_kprobe(p,
  555. (unsigned long)__builtin_return_address(0));
  556. }
  557. void __kprobes unregister_kprobe(struct kprobe *p)
  558. {
  559. struct module *mod;
  560. struct kprobe *old_p, *list_p;
  561. int cleanup_p;
  562. mutex_lock(&kprobe_mutex);
  563. old_p = get_kprobe(p->addr);
  564. if (unlikely(!old_p)) {
  565. mutex_unlock(&kprobe_mutex);
  566. return;
  567. }
  568. if (p != old_p) {
  569. list_for_each_entry_rcu(list_p, &old_p->list, list)
  570. if (list_p == p)
  571. /* kprobe p is a valid probe */
  572. goto valid_p;
  573. mutex_unlock(&kprobe_mutex);
  574. return;
  575. }
  576. valid_p:
  577. if ((old_p == p) || ((old_p->pre_handler == aggr_pre_handler) &&
  578. (p->list.next == &old_p->list) &&
  579. (p->list.prev == &old_p->list))) {
  580. /* Only probe on the hash list */
  581. arch_disarm_kprobe(p);
  582. hlist_del_rcu(&old_p->hlist);
  583. cleanup_p = 1;
  584. } else {
  585. list_del_rcu(&p->list);
  586. cleanup_p = 0;
  587. }
  588. mutex_unlock(&kprobe_mutex);
  589. synchronize_sched();
  590. if (p->mod_refcounted &&
  591. (mod = module_text_address((unsigned long)p->addr)))
  592. module_put(mod);
  593. if (cleanup_p) {
  594. if (p != old_p) {
  595. list_del_rcu(&p->list);
  596. kfree(old_p);
  597. }
  598. arch_remove_kprobe(p);
  599. } else {
  600. mutex_lock(&kprobe_mutex);
  601. if (p->break_handler)
  602. old_p->break_handler = NULL;
  603. if (p->post_handler){
  604. list_for_each_entry_rcu(list_p, &old_p->list, list){
  605. if (list_p->post_handler){
  606. cleanup_p = 2;
  607. break;
  608. }
  609. }
  610. if (cleanup_p == 0)
  611. old_p->post_handler = NULL;
  612. }
  613. mutex_unlock(&kprobe_mutex);
  614. }
  615. /* Call unregister_page_fault_notifier()
  616. * if no probes are active
  617. */
  618. mutex_lock(&kprobe_mutex);
  619. if (atomic_add_return(-1, &kprobe_count) == \
  620. ARCH_INACTIVE_KPROBE_COUNT)
  621. unregister_page_fault_notifier(&kprobe_page_fault_nb);
  622. mutex_unlock(&kprobe_mutex);
  623. return;
  624. }
  625. static struct notifier_block kprobe_exceptions_nb = {
  626. .notifier_call = kprobe_exceptions_notify,
  627. .priority = 0x7fffffff /* we need to be notified first */
  628. };
  629. int __kprobes register_jprobe(struct jprobe *jp)
  630. {
  631. /* Todo: Verify probepoint is a function entry point */
  632. jp->kp.pre_handler = setjmp_pre_handler;
  633. jp->kp.break_handler = longjmp_break_handler;
  634. return __register_kprobe(&jp->kp,
  635. (unsigned long)__builtin_return_address(0));
  636. }
  637. void __kprobes unregister_jprobe(struct jprobe *jp)
  638. {
  639. unregister_kprobe(&jp->kp);
  640. }
  641. #ifdef ARCH_SUPPORTS_KRETPROBES
  642. /*
  643. * This kprobe pre_handler is registered with every kretprobe. When probe
  644. * hits it will set up the return probe.
  645. */
  646. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  647. struct pt_regs *regs)
  648. {
  649. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  650. unsigned long flags = 0;
  651. /*TODO: consider to only swap the RA after the last pre_handler fired */
  652. spin_lock_irqsave(&kretprobe_lock, flags);
  653. arch_prepare_kretprobe(rp, regs);
  654. spin_unlock_irqrestore(&kretprobe_lock, flags);
  655. return 0;
  656. }
  657. int __kprobes register_kretprobe(struct kretprobe *rp)
  658. {
  659. int ret = 0;
  660. struct kretprobe_instance *inst;
  661. int i;
  662. rp->kp.pre_handler = pre_handler_kretprobe;
  663. rp->kp.post_handler = NULL;
  664. rp->kp.fault_handler = NULL;
  665. rp->kp.break_handler = NULL;
  666. /* Pre-allocate memory for max kretprobe instances */
  667. if (rp->maxactive <= 0) {
  668. #ifdef CONFIG_PREEMPT
  669. rp->maxactive = max(10, 2 * NR_CPUS);
  670. #else
  671. rp->maxactive = NR_CPUS;
  672. #endif
  673. }
  674. INIT_HLIST_HEAD(&rp->used_instances);
  675. INIT_HLIST_HEAD(&rp->free_instances);
  676. for (i = 0; i < rp->maxactive; i++) {
  677. inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
  678. if (inst == NULL) {
  679. free_rp_inst(rp);
  680. return -ENOMEM;
  681. }
  682. INIT_HLIST_NODE(&inst->uflist);
  683. hlist_add_head(&inst->uflist, &rp->free_instances);
  684. }
  685. rp->nmissed = 0;
  686. /* Establish function entry probe point */
  687. if ((ret = __register_kprobe(&rp->kp,
  688. (unsigned long)__builtin_return_address(0))) != 0)
  689. free_rp_inst(rp);
  690. return ret;
  691. }
  692. #else /* ARCH_SUPPORTS_KRETPROBES */
  693. int __kprobes register_kretprobe(struct kretprobe *rp)
  694. {
  695. return -ENOSYS;
  696. }
  697. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  698. struct pt_regs *regs)
  699. {
  700. return 0;
  701. }
  702. #endif /* ARCH_SUPPORTS_KRETPROBES */
  703. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  704. {
  705. unsigned long flags;
  706. struct kretprobe_instance *ri;
  707. unregister_kprobe(&rp->kp);
  708. /* No race here */
  709. spin_lock_irqsave(&kretprobe_lock, flags);
  710. while ((ri = get_used_rp_inst(rp)) != NULL) {
  711. ri->rp = NULL;
  712. hlist_del(&ri->uflist);
  713. }
  714. spin_unlock_irqrestore(&kretprobe_lock, flags);
  715. free_rp_inst(rp);
  716. }
  717. static int __init init_kprobes(void)
  718. {
  719. int i, err = 0;
  720. /* FIXME allocate the probe table, currently defined statically */
  721. /* initialize all list heads */
  722. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  723. INIT_HLIST_HEAD(&kprobe_table[i]);
  724. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  725. }
  726. atomic_set(&kprobe_count, 0);
  727. err = arch_init_kprobes();
  728. if (!err)
  729. err = register_die_notifier(&kprobe_exceptions_nb);
  730. return err;
  731. }
  732. #ifdef CONFIG_DEBUG_FS
  733. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  734. const char *sym, int offset,char *modname)
  735. {
  736. char *kprobe_type;
  737. if (p->pre_handler == pre_handler_kretprobe)
  738. kprobe_type = "r";
  739. else if (p->pre_handler == setjmp_pre_handler)
  740. kprobe_type = "j";
  741. else
  742. kprobe_type = "k";
  743. if (sym)
  744. seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
  745. sym, offset, (modname ? modname : " "));
  746. else
  747. seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
  748. }
  749. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  750. {
  751. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  752. }
  753. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  754. {
  755. (*pos)++;
  756. if (*pos >= KPROBE_TABLE_SIZE)
  757. return NULL;
  758. return pos;
  759. }
  760. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  761. {
  762. /* Nothing to do */
  763. }
  764. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  765. {
  766. struct hlist_head *head;
  767. struct hlist_node *node;
  768. struct kprobe *p, *kp;
  769. const char *sym = NULL;
  770. unsigned int i = *(loff_t *) v;
  771. unsigned long size, offset = 0;
  772. char *modname, namebuf[128];
  773. head = &kprobe_table[i];
  774. preempt_disable();
  775. hlist_for_each_entry_rcu(p, node, head, hlist) {
  776. sym = kallsyms_lookup((unsigned long)p->addr, &size,
  777. &offset, &modname, namebuf);
  778. if (p->pre_handler == aggr_pre_handler) {
  779. list_for_each_entry_rcu(kp, &p->list, list)
  780. report_probe(pi, kp, sym, offset, modname);
  781. } else
  782. report_probe(pi, p, sym, offset, modname);
  783. }
  784. preempt_enable();
  785. return 0;
  786. }
  787. static struct seq_operations kprobes_seq_ops = {
  788. .start = kprobe_seq_start,
  789. .next = kprobe_seq_next,
  790. .stop = kprobe_seq_stop,
  791. .show = show_kprobe_addr
  792. };
  793. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  794. {
  795. return seq_open(filp, &kprobes_seq_ops);
  796. }
  797. static struct file_operations debugfs_kprobes_operations = {
  798. .open = kprobes_open,
  799. .read = seq_read,
  800. .llseek = seq_lseek,
  801. .release = seq_release,
  802. };
  803. static int __kprobes debugfs_kprobe_init(void)
  804. {
  805. struct dentry *dir, *file;
  806. dir = debugfs_create_dir("kprobes", NULL);
  807. if (!dir)
  808. return -ENOMEM;
  809. file = debugfs_create_file("list", 0444, dir, NULL,
  810. &debugfs_kprobes_operations);
  811. if (!file) {
  812. debugfs_remove(dir);
  813. return -ENOMEM;
  814. }
  815. return 0;
  816. }
  817. late_initcall(debugfs_kprobe_init);
  818. #endif /* CONFIG_DEBUG_FS */
  819. module_init(init_kprobes);
  820. EXPORT_SYMBOL_GPL(register_kprobe);
  821. EXPORT_SYMBOL_GPL(unregister_kprobe);
  822. EXPORT_SYMBOL_GPL(register_jprobe);
  823. EXPORT_SYMBOL_GPL(unregister_jprobe);
  824. EXPORT_SYMBOL_GPL(jprobe_return);
  825. EXPORT_SYMBOL_GPL(register_kretprobe);
  826. EXPORT_SYMBOL_GPL(unregister_kretprobe);