kprobes.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/kdebug.h>
  46. #include <asm-generic/sections.h>
  47. #include <asm/cacheflush.h>
  48. #include <asm/errno.h>
  49. #include <asm/uaccess.h>
  50. #define KPROBE_HASH_BITS 6
  51. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  52. /*
  53. * Some oddball architectures like 64bit powerpc have function descriptors
  54. * so this must be overridable.
  55. */
  56. #ifndef kprobe_lookup_name
  57. #define kprobe_lookup_name(name, addr) \
  58. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  59. #endif
  60. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  61. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  62. /* NOTE: change this value only with kprobe_mutex held */
  63. static bool kprobe_enabled;
  64. DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  65. DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
  66. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  67. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  68. /*
  69. * kprobe->ainsn.insn points to the copy of the instruction to be
  70. * single-stepped. x86_64, POWER4 and above have no-exec support and
  71. * stepping on the instruction on a vmalloced/kmalloced/data page
  72. * is a recipe for disaster
  73. */
  74. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  75. struct kprobe_insn_page {
  76. struct hlist_node hlist;
  77. kprobe_opcode_t *insns; /* Page of instruction slots */
  78. char slot_used[INSNS_PER_PAGE];
  79. int nused;
  80. int ngarbage;
  81. };
  82. enum kprobe_slot_state {
  83. SLOT_CLEAN = 0,
  84. SLOT_DIRTY = 1,
  85. SLOT_USED = 2,
  86. };
  87. static struct hlist_head kprobe_insn_pages;
  88. static int kprobe_garbage_slots;
  89. static int collect_garbage_slots(void);
  90. static int __kprobes check_safety(void)
  91. {
  92. int ret = 0;
  93. #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
  94. ret = freeze_processes();
  95. if (ret == 0) {
  96. struct task_struct *p, *q;
  97. do_each_thread(p, q) {
  98. if (p != current && p->state == TASK_RUNNING &&
  99. p->pid != 0) {
  100. printk("Check failed: %s is running\n",p->comm);
  101. ret = -1;
  102. goto loop_end;
  103. }
  104. } while_each_thread(p, q);
  105. }
  106. loop_end:
  107. thaw_processes();
  108. #else
  109. synchronize_sched();
  110. #endif
  111. return ret;
  112. }
  113. /**
  114. * get_insn_slot() - Find a slot on an executable page for an instruction.
  115. * We allocate an executable page if there's no room on existing ones.
  116. */
  117. kprobe_opcode_t __kprobes *get_insn_slot(void)
  118. {
  119. struct kprobe_insn_page *kip;
  120. struct hlist_node *pos;
  121. retry:
  122. hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
  123. if (kip->nused < INSNS_PER_PAGE) {
  124. int i;
  125. for (i = 0; i < INSNS_PER_PAGE; i++) {
  126. if (kip->slot_used[i] == SLOT_CLEAN) {
  127. kip->slot_used[i] = SLOT_USED;
  128. kip->nused++;
  129. return kip->insns + (i * MAX_INSN_SIZE);
  130. }
  131. }
  132. /* Surprise! No unused slots. Fix kip->nused. */
  133. kip->nused = INSNS_PER_PAGE;
  134. }
  135. }
  136. /* If there are any garbage slots, collect it and try again. */
  137. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  138. goto retry;
  139. }
  140. /* All out of space. Need to allocate a new page. Use slot 0. */
  141. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  142. if (!kip)
  143. return NULL;
  144. /*
  145. * Use module_alloc so this page is within +/- 2GB of where the
  146. * kernel image and loaded module images reside. This is required
  147. * so x86_64 can correctly handle the %rip-relative fixups.
  148. */
  149. kip->insns = module_alloc(PAGE_SIZE);
  150. if (!kip->insns) {
  151. kfree(kip);
  152. return NULL;
  153. }
  154. INIT_HLIST_NODE(&kip->hlist);
  155. hlist_add_head(&kip->hlist, &kprobe_insn_pages);
  156. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  157. kip->slot_used[0] = SLOT_USED;
  158. kip->nused = 1;
  159. kip->ngarbage = 0;
  160. return kip->insns;
  161. }
  162. /* Return 1 if all garbages are collected, otherwise 0. */
  163. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  164. {
  165. kip->slot_used[idx] = SLOT_CLEAN;
  166. kip->nused--;
  167. if (kip->nused == 0) {
  168. /*
  169. * Page is no longer in use. Free it unless
  170. * it's the last one. We keep the last one
  171. * so as not to have to set it up again the
  172. * next time somebody inserts a probe.
  173. */
  174. hlist_del(&kip->hlist);
  175. if (hlist_empty(&kprobe_insn_pages)) {
  176. INIT_HLIST_NODE(&kip->hlist);
  177. hlist_add_head(&kip->hlist,
  178. &kprobe_insn_pages);
  179. } else {
  180. module_free(NULL, kip->insns);
  181. kfree(kip);
  182. }
  183. return 1;
  184. }
  185. return 0;
  186. }
  187. static int __kprobes collect_garbage_slots(void)
  188. {
  189. struct kprobe_insn_page *kip;
  190. struct hlist_node *pos, *next;
  191. /* Ensure no-one is preepmted on the garbages */
  192. if (check_safety() != 0)
  193. return -EAGAIN;
  194. hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
  195. int i;
  196. if (kip->ngarbage == 0)
  197. continue;
  198. kip->ngarbage = 0; /* we will collect all garbages */
  199. for (i = 0; i < INSNS_PER_PAGE; i++) {
  200. if (kip->slot_used[i] == SLOT_DIRTY &&
  201. collect_one_slot(kip, i))
  202. break;
  203. }
  204. }
  205. kprobe_garbage_slots = 0;
  206. return 0;
  207. }
  208. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  209. {
  210. struct kprobe_insn_page *kip;
  211. struct hlist_node *pos;
  212. hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
  213. if (kip->insns <= slot &&
  214. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  215. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  216. if (dirty) {
  217. kip->slot_used[i] = SLOT_DIRTY;
  218. kip->ngarbage++;
  219. } else {
  220. collect_one_slot(kip, i);
  221. }
  222. break;
  223. }
  224. }
  225. if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
  226. collect_garbage_slots();
  227. }
  228. #endif
  229. /* We have preemption disabled.. so it is safe to use __ versions */
  230. static inline void set_kprobe_instance(struct kprobe *kp)
  231. {
  232. __get_cpu_var(kprobe_instance) = kp;
  233. }
  234. static inline void reset_kprobe_instance(void)
  235. {
  236. __get_cpu_var(kprobe_instance) = NULL;
  237. }
  238. /*
  239. * This routine is called either:
  240. * - under the kprobe_mutex - during kprobe_[un]register()
  241. * OR
  242. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  243. */
  244. struct kprobe __kprobes *get_kprobe(void *addr)
  245. {
  246. struct hlist_head *head;
  247. struct hlist_node *node;
  248. struct kprobe *p;
  249. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  250. hlist_for_each_entry_rcu(p, node, head, hlist) {
  251. if (p->addr == addr)
  252. return p;
  253. }
  254. return NULL;
  255. }
  256. /*
  257. * Aggregate handlers for multiple kprobes support - these handlers
  258. * take care of invoking the individual kprobe handlers on p->list
  259. */
  260. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  261. {
  262. struct kprobe *kp;
  263. list_for_each_entry_rcu(kp, &p->list, list) {
  264. if (kp->pre_handler) {
  265. set_kprobe_instance(kp);
  266. if (kp->pre_handler(kp, regs))
  267. return 1;
  268. }
  269. reset_kprobe_instance();
  270. }
  271. return 0;
  272. }
  273. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  274. unsigned long flags)
  275. {
  276. struct kprobe *kp;
  277. list_for_each_entry_rcu(kp, &p->list, list) {
  278. if (kp->post_handler) {
  279. set_kprobe_instance(kp);
  280. kp->post_handler(kp, regs, flags);
  281. reset_kprobe_instance();
  282. }
  283. }
  284. }
  285. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  286. int trapnr)
  287. {
  288. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  289. /*
  290. * if we faulted "during" the execution of a user specified
  291. * probe handler, invoke just that probe's fault handler
  292. */
  293. if (cur && cur->fault_handler) {
  294. if (cur->fault_handler(cur, regs, trapnr))
  295. return 1;
  296. }
  297. return 0;
  298. }
  299. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  300. {
  301. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  302. int ret = 0;
  303. if (cur && cur->break_handler) {
  304. if (cur->break_handler(cur, regs))
  305. ret = 1;
  306. }
  307. reset_kprobe_instance();
  308. return ret;
  309. }
  310. /* Walks the list and increments nmissed count for multiprobe case */
  311. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  312. {
  313. struct kprobe *kp;
  314. if (p->pre_handler != aggr_pre_handler) {
  315. p->nmissed++;
  316. } else {
  317. list_for_each_entry_rcu(kp, &p->list, list)
  318. kp->nmissed++;
  319. }
  320. return;
  321. }
  322. /* Called with kretprobe_lock held */
  323. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  324. struct hlist_head *head)
  325. {
  326. /* remove rp inst off the rprobe_inst_table */
  327. hlist_del(&ri->hlist);
  328. if (ri->rp) {
  329. /* remove rp inst off the used list */
  330. hlist_del(&ri->uflist);
  331. /* put rp inst back onto the free list */
  332. INIT_HLIST_NODE(&ri->uflist);
  333. hlist_add_head(&ri->uflist, &ri->rp->free_instances);
  334. } else
  335. /* Unregistering */
  336. hlist_add_head(&ri->hlist, head);
  337. }
  338. struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
  339. {
  340. return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  341. }
  342. /*
  343. * This function is called from finish_task_switch when task tk becomes dead,
  344. * so that we can recycle any function-return probe instances associated
  345. * with this task. These left over instances represent probed functions
  346. * that have been called but will never return.
  347. */
  348. void __kprobes kprobe_flush_task(struct task_struct *tk)
  349. {
  350. struct kretprobe_instance *ri;
  351. struct hlist_head *head, empty_rp;
  352. struct hlist_node *node, *tmp;
  353. unsigned long flags = 0;
  354. INIT_HLIST_HEAD(&empty_rp);
  355. spin_lock_irqsave(&kretprobe_lock, flags);
  356. head = kretprobe_inst_table_head(tk);
  357. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  358. if (ri->task == tk)
  359. recycle_rp_inst(ri, &empty_rp);
  360. }
  361. spin_unlock_irqrestore(&kretprobe_lock, flags);
  362. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  363. hlist_del(&ri->hlist);
  364. kfree(ri);
  365. }
  366. }
  367. static inline void free_rp_inst(struct kretprobe *rp)
  368. {
  369. struct kretprobe_instance *ri;
  370. struct hlist_node *pos, *next;
  371. hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
  372. hlist_del(&ri->uflist);
  373. kfree(ri);
  374. }
  375. }
  376. /*
  377. * Keep all fields in the kprobe consistent
  378. */
  379. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  380. {
  381. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  382. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  383. }
  384. /*
  385. * Add the new probe to old_p->list. Fail if this is the
  386. * second jprobe at the address - two jprobes can't coexist
  387. */
  388. static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  389. {
  390. if (p->break_handler) {
  391. if (old_p->break_handler)
  392. return -EEXIST;
  393. list_add_tail_rcu(&p->list, &old_p->list);
  394. old_p->break_handler = aggr_break_handler;
  395. } else
  396. list_add_rcu(&p->list, &old_p->list);
  397. if (p->post_handler && !old_p->post_handler)
  398. old_p->post_handler = aggr_post_handler;
  399. return 0;
  400. }
  401. /*
  402. * Fill in the required fields of the "manager kprobe". Replace the
  403. * earlier kprobe in the hlist with the manager kprobe
  404. */
  405. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  406. {
  407. copy_kprobe(p, ap);
  408. flush_insn_slot(ap);
  409. ap->addr = p->addr;
  410. ap->pre_handler = aggr_pre_handler;
  411. ap->fault_handler = aggr_fault_handler;
  412. if (p->post_handler)
  413. ap->post_handler = aggr_post_handler;
  414. if (p->break_handler)
  415. ap->break_handler = aggr_break_handler;
  416. INIT_LIST_HEAD(&ap->list);
  417. list_add_rcu(&p->list, &ap->list);
  418. hlist_replace_rcu(&p->hlist, &ap->hlist);
  419. }
  420. /*
  421. * This is the second or subsequent kprobe at the address - handle
  422. * the intricacies
  423. */
  424. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  425. struct kprobe *p)
  426. {
  427. int ret = 0;
  428. struct kprobe *ap;
  429. if (old_p->pre_handler == aggr_pre_handler) {
  430. copy_kprobe(old_p, p);
  431. ret = add_new_kprobe(old_p, p);
  432. } else {
  433. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  434. if (!ap)
  435. return -ENOMEM;
  436. add_aggr_kprobe(ap, old_p);
  437. copy_kprobe(ap, p);
  438. ret = add_new_kprobe(ap, p);
  439. }
  440. return ret;
  441. }
  442. static int __kprobes in_kprobes_functions(unsigned long addr)
  443. {
  444. if (addr >= (unsigned long)__kprobes_text_start &&
  445. addr < (unsigned long)__kprobes_text_end)
  446. return -EINVAL;
  447. return 0;
  448. }
  449. static int __kprobes __register_kprobe(struct kprobe *p,
  450. unsigned long called_from)
  451. {
  452. int ret = 0;
  453. struct kprobe *old_p;
  454. struct module *probed_mod;
  455. /*
  456. * If we have a symbol_name argument look it up,
  457. * and add it to the address. That way the addr
  458. * field can either be global or relative to a symbol.
  459. */
  460. if (p->symbol_name) {
  461. if (p->addr)
  462. return -EINVAL;
  463. kprobe_lookup_name(p->symbol_name, p->addr);
  464. }
  465. if (!p->addr)
  466. return -EINVAL;
  467. p->addr = (kprobe_opcode_t *)(((char *)p->addr)+ p->offset);
  468. if (!kernel_text_address((unsigned long) p->addr) ||
  469. in_kprobes_functions((unsigned long) p->addr))
  470. return -EINVAL;
  471. p->mod_refcounted = 0;
  472. /*
  473. * Check if are we probing a module.
  474. */
  475. probed_mod = module_text_address((unsigned long) p->addr);
  476. if (probed_mod) {
  477. struct module *calling_mod = module_text_address(called_from);
  478. /*
  479. * We must allow modules to probe themself and in this case
  480. * avoid incrementing the module refcount, so as to allow
  481. * unloading of self probing modules.
  482. */
  483. if (calling_mod && calling_mod != probed_mod) {
  484. if (unlikely(!try_module_get(probed_mod)))
  485. return -EINVAL;
  486. p->mod_refcounted = 1;
  487. } else
  488. probed_mod = NULL;
  489. }
  490. p->nmissed = 0;
  491. mutex_lock(&kprobe_mutex);
  492. old_p = get_kprobe(p->addr);
  493. if (old_p) {
  494. ret = register_aggr_kprobe(old_p, p);
  495. goto out;
  496. }
  497. ret = arch_prepare_kprobe(p);
  498. if (ret)
  499. goto out;
  500. INIT_HLIST_NODE(&p->hlist);
  501. hlist_add_head_rcu(&p->hlist,
  502. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  503. if (kprobe_enabled)
  504. arch_arm_kprobe(p);
  505. out:
  506. mutex_unlock(&kprobe_mutex);
  507. if (ret && probed_mod)
  508. module_put(probed_mod);
  509. return ret;
  510. }
  511. int __kprobes register_kprobe(struct kprobe *p)
  512. {
  513. return __register_kprobe(p, (unsigned long)__builtin_return_address(0));
  514. }
  515. void __kprobes unregister_kprobe(struct kprobe *p)
  516. {
  517. struct module *mod;
  518. struct kprobe *old_p, *list_p;
  519. int cleanup_p;
  520. mutex_lock(&kprobe_mutex);
  521. old_p = get_kprobe(p->addr);
  522. if (unlikely(!old_p)) {
  523. mutex_unlock(&kprobe_mutex);
  524. return;
  525. }
  526. if (p != old_p) {
  527. list_for_each_entry_rcu(list_p, &old_p->list, list)
  528. if (list_p == p)
  529. /* kprobe p is a valid probe */
  530. goto valid_p;
  531. mutex_unlock(&kprobe_mutex);
  532. return;
  533. }
  534. valid_p:
  535. if (old_p == p ||
  536. (old_p->pre_handler == aggr_pre_handler &&
  537. p->list.next == &old_p->list && p->list.prev == &old_p->list)) {
  538. /*
  539. * Only probe on the hash list. Disarm only if kprobes are
  540. * enabled - otherwise, the breakpoint would already have
  541. * been removed. We save on flushing icache.
  542. */
  543. if (kprobe_enabled)
  544. arch_disarm_kprobe(p);
  545. hlist_del_rcu(&old_p->hlist);
  546. cleanup_p = 1;
  547. } else {
  548. list_del_rcu(&p->list);
  549. cleanup_p = 0;
  550. }
  551. mutex_unlock(&kprobe_mutex);
  552. synchronize_sched();
  553. if (p->mod_refcounted) {
  554. mod = module_text_address((unsigned long)p->addr);
  555. if (mod)
  556. module_put(mod);
  557. }
  558. if (cleanup_p) {
  559. if (p != old_p) {
  560. list_del_rcu(&p->list);
  561. kfree(old_p);
  562. }
  563. arch_remove_kprobe(p);
  564. } else {
  565. mutex_lock(&kprobe_mutex);
  566. if (p->break_handler)
  567. old_p->break_handler = NULL;
  568. if (p->post_handler){
  569. list_for_each_entry_rcu(list_p, &old_p->list, list){
  570. if (list_p->post_handler){
  571. cleanup_p = 2;
  572. break;
  573. }
  574. }
  575. if (cleanup_p == 0)
  576. old_p->post_handler = NULL;
  577. }
  578. mutex_unlock(&kprobe_mutex);
  579. }
  580. }
  581. static struct notifier_block kprobe_exceptions_nb = {
  582. .notifier_call = kprobe_exceptions_notify,
  583. .priority = 0x7fffffff /* we need to be notified first */
  584. };
  585. unsigned long __weak arch_deref_entry_point(void *entry)
  586. {
  587. return (unsigned long)entry;
  588. }
  589. int __kprobes register_jprobe(struct jprobe *jp)
  590. {
  591. unsigned long addr = arch_deref_entry_point(jp->entry);
  592. if (!kernel_text_address(addr))
  593. return -EINVAL;
  594. /* Todo: Verify probepoint is a function entry point */
  595. jp->kp.pre_handler = setjmp_pre_handler;
  596. jp->kp.break_handler = longjmp_break_handler;
  597. return __register_kprobe(&jp->kp,
  598. (unsigned long)__builtin_return_address(0));
  599. }
  600. void __kprobes unregister_jprobe(struct jprobe *jp)
  601. {
  602. unregister_kprobe(&jp->kp);
  603. }
  604. #ifdef ARCH_SUPPORTS_KRETPROBES
  605. /*
  606. * This kprobe pre_handler is registered with every kretprobe. When probe
  607. * hits it will set up the return probe.
  608. */
  609. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  610. struct pt_regs *regs)
  611. {
  612. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  613. unsigned long flags = 0;
  614. /*TODO: consider to only swap the RA after the last pre_handler fired */
  615. spin_lock_irqsave(&kretprobe_lock, flags);
  616. if (!hlist_empty(&rp->free_instances)) {
  617. struct kretprobe_instance *ri;
  618. ri = hlist_entry(rp->free_instances.first,
  619. struct kretprobe_instance, uflist);
  620. ri->rp = rp;
  621. ri->task = current;
  622. arch_prepare_kretprobe(ri, regs);
  623. /* XXX(hch): why is there no hlist_move_head? */
  624. hlist_del(&ri->uflist);
  625. hlist_add_head(&ri->uflist, &ri->rp->used_instances);
  626. hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
  627. } else
  628. rp->nmissed++;
  629. spin_unlock_irqrestore(&kretprobe_lock, flags);
  630. return 0;
  631. }
  632. int __kprobes register_kretprobe(struct kretprobe *rp)
  633. {
  634. int ret = 0;
  635. struct kretprobe_instance *inst;
  636. int i;
  637. void *addr = rp->kp.addr;
  638. if (kretprobe_blacklist_size) {
  639. if (addr == NULL)
  640. kprobe_lookup_name(rp->kp.symbol_name, addr);
  641. addr += rp->kp.offset;
  642. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  643. if (kretprobe_blacklist[i].addr == addr)
  644. return -EINVAL;
  645. }
  646. }
  647. rp->kp.pre_handler = pre_handler_kretprobe;
  648. rp->kp.post_handler = NULL;
  649. rp->kp.fault_handler = NULL;
  650. rp->kp.break_handler = NULL;
  651. /* Pre-allocate memory for max kretprobe instances */
  652. if (rp->maxactive <= 0) {
  653. #ifdef CONFIG_PREEMPT
  654. rp->maxactive = max(10, 2 * NR_CPUS);
  655. #else
  656. rp->maxactive = NR_CPUS;
  657. #endif
  658. }
  659. INIT_HLIST_HEAD(&rp->used_instances);
  660. INIT_HLIST_HEAD(&rp->free_instances);
  661. for (i = 0; i < rp->maxactive; i++) {
  662. inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
  663. if (inst == NULL) {
  664. free_rp_inst(rp);
  665. return -ENOMEM;
  666. }
  667. INIT_HLIST_NODE(&inst->uflist);
  668. hlist_add_head(&inst->uflist, &rp->free_instances);
  669. }
  670. rp->nmissed = 0;
  671. /* Establish function entry probe point */
  672. if ((ret = __register_kprobe(&rp->kp,
  673. (unsigned long)__builtin_return_address(0))) != 0)
  674. free_rp_inst(rp);
  675. return ret;
  676. }
  677. #else /* ARCH_SUPPORTS_KRETPROBES */
  678. int __kprobes register_kretprobe(struct kretprobe *rp)
  679. {
  680. return -ENOSYS;
  681. }
  682. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  683. struct pt_regs *regs)
  684. {
  685. return 0;
  686. }
  687. #endif /* ARCH_SUPPORTS_KRETPROBES */
  688. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  689. {
  690. unsigned long flags;
  691. struct kretprobe_instance *ri;
  692. struct hlist_node *pos, *next;
  693. unregister_kprobe(&rp->kp);
  694. /* No race here */
  695. spin_lock_irqsave(&kretprobe_lock, flags);
  696. hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
  697. ri->rp = NULL;
  698. hlist_del(&ri->uflist);
  699. }
  700. spin_unlock_irqrestore(&kretprobe_lock, flags);
  701. free_rp_inst(rp);
  702. }
  703. static int __init init_kprobes(void)
  704. {
  705. int i, err = 0;
  706. /* FIXME allocate the probe table, currently defined statically */
  707. /* initialize all list heads */
  708. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  709. INIT_HLIST_HEAD(&kprobe_table[i]);
  710. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  711. }
  712. if (kretprobe_blacklist_size) {
  713. /* lookup the function address from its name */
  714. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  715. kprobe_lookup_name(kretprobe_blacklist[i].name,
  716. kretprobe_blacklist[i].addr);
  717. if (!kretprobe_blacklist[i].addr)
  718. printk("kretprobe: lookup failed: %s\n",
  719. kretprobe_blacklist[i].name);
  720. }
  721. }
  722. /* By default, kprobes are enabled */
  723. kprobe_enabled = true;
  724. err = arch_init_kprobes();
  725. if (!err)
  726. err = register_die_notifier(&kprobe_exceptions_nb);
  727. return err;
  728. }
  729. #ifdef CONFIG_DEBUG_FS
  730. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  731. const char *sym, int offset,char *modname)
  732. {
  733. char *kprobe_type;
  734. if (p->pre_handler == pre_handler_kretprobe)
  735. kprobe_type = "r";
  736. else if (p->pre_handler == setjmp_pre_handler)
  737. kprobe_type = "j";
  738. else
  739. kprobe_type = "k";
  740. if (sym)
  741. seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
  742. sym, offset, (modname ? modname : " "));
  743. else
  744. seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
  745. }
  746. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  747. {
  748. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  749. }
  750. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  751. {
  752. (*pos)++;
  753. if (*pos >= KPROBE_TABLE_SIZE)
  754. return NULL;
  755. return pos;
  756. }
  757. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  758. {
  759. /* Nothing to do */
  760. }
  761. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  762. {
  763. struct hlist_head *head;
  764. struct hlist_node *node;
  765. struct kprobe *p, *kp;
  766. const char *sym = NULL;
  767. unsigned int i = *(loff_t *) v;
  768. unsigned long offset = 0;
  769. char *modname, namebuf[128];
  770. head = &kprobe_table[i];
  771. preempt_disable();
  772. hlist_for_each_entry_rcu(p, node, head, hlist) {
  773. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  774. &offset, &modname, namebuf);
  775. if (p->pre_handler == aggr_pre_handler) {
  776. list_for_each_entry_rcu(kp, &p->list, list)
  777. report_probe(pi, kp, sym, offset, modname);
  778. } else
  779. report_probe(pi, p, sym, offset, modname);
  780. }
  781. preempt_enable();
  782. return 0;
  783. }
  784. static struct seq_operations kprobes_seq_ops = {
  785. .start = kprobe_seq_start,
  786. .next = kprobe_seq_next,
  787. .stop = kprobe_seq_stop,
  788. .show = show_kprobe_addr
  789. };
  790. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  791. {
  792. return seq_open(filp, &kprobes_seq_ops);
  793. }
  794. static struct file_operations debugfs_kprobes_operations = {
  795. .open = kprobes_open,
  796. .read = seq_read,
  797. .llseek = seq_lseek,
  798. .release = seq_release,
  799. };
  800. static void __kprobes enable_all_kprobes(void)
  801. {
  802. struct hlist_head *head;
  803. struct hlist_node *node;
  804. struct kprobe *p;
  805. unsigned int i;
  806. mutex_lock(&kprobe_mutex);
  807. /* If kprobes are already enabled, just return */
  808. if (kprobe_enabled)
  809. goto already_enabled;
  810. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  811. head = &kprobe_table[i];
  812. hlist_for_each_entry_rcu(p, node, head, hlist)
  813. arch_arm_kprobe(p);
  814. }
  815. kprobe_enabled = true;
  816. printk(KERN_INFO "Kprobes globally enabled\n");
  817. already_enabled:
  818. mutex_unlock(&kprobe_mutex);
  819. return;
  820. }
  821. static void __kprobes disable_all_kprobes(void)
  822. {
  823. struct hlist_head *head;
  824. struct hlist_node *node;
  825. struct kprobe *p;
  826. unsigned int i;
  827. mutex_lock(&kprobe_mutex);
  828. /* If kprobes are already disabled, just return */
  829. if (!kprobe_enabled)
  830. goto already_disabled;
  831. kprobe_enabled = false;
  832. printk(KERN_INFO "Kprobes globally disabled\n");
  833. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  834. head = &kprobe_table[i];
  835. hlist_for_each_entry_rcu(p, node, head, hlist) {
  836. if (!arch_trampoline_kprobe(p))
  837. arch_disarm_kprobe(p);
  838. }
  839. }
  840. mutex_unlock(&kprobe_mutex);
  841. /* Allow all currently running kprobes to complete */
  842. synchronize_sched();
  843. return;
  844. already_disabled:
  845. mutex_unlock(&kprobe_mutex);
  846. return;
  847. }
  848. /*
  849. * XXX: The debugfs bool file interface doesn't allow for callbacks
  850. * when the bool state is switched. We can reuse that facility when
  851. * available
  852. */
  853. static ssize_t read_enabled_file_bool(struct file *file,
  854. char __user *user_buf, size_t count, loff_t *ppos)
  855. {
  856. char buf[3];
  857. if (kprobe_enabled)
  858. buf[0] = '1';
  859. else
  860. buf[0] = '0';
  861. buf[1] = '\n';
  862. buf[2] = 0x00;
  863. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  864. }
  865. static ssize_t write_enabled_file_bool(struct file *file,
  866. const char __user *user_buf, size_t count, loff_t *ppos)
  867. {
  868. char buf[32];
  869. int buf_size;
  870. buf_size = min(count, (sizeof(buf)-1));
  871. if (copy_from_user(buf, user_buf, buf_size))
  872. return -EFAULT;
  873. switch (buf[0]) {
  874. case 'y':
  875. case 'Y':
  876. case '1':
  877. enable_all_kprobes();
  878. break;
  879. case 'n':
  880. case 'N':
  881. case '0':
  882. disable_all_kprobes();
  883. break;
  884. }
  885. return count;
  886. }
  887. static struct file_operations fops_kp = {
  888. .read = read_enabled_file_bool,
  889. .write = write_enabled_file_bool,
  890. };
  891. static int __kprobes debugfs_kprobe_init(void)
  892. {
  893. struct dentry *dir, *file;
  894. unsigned int value = 1;
  895. dir = debugfs_create_dir("kprobes", NULL);
  896. if (!dir)
  897. return -ENOMEM;
  898. file = debugfs_create_file("list", 0444, dir, NULL,
  899. &debugfs_kprobes_operations);
  900. if (!file) {
  901. debugfs_remove(dir);
  902. return -ENOMEM;
  903. }
  904. file = debugfs_create_file("enabled", 0600, dir,
  905. &value, &fops_kp);
  906. if (!file) {
  907. debugfs_remove(dir);
  908. return -ENOMEM;
  909. }
  910. return 0;
  911. }
  912. late_initcall(debugfs_kprobe_init);
  913. #endif /* CONFIG_DEBUG_FS */
  914. module_init(init_kprobes);
  915. EXPORT_SYMBOL_GPL(register_kprobe);
  916. EXPORT_SYMBOL_GPL(unregister_kprobe);
  917. EXPORT_SYMBOL_GPL(register_jprobe);
  918. EXPORT_SYMBOL_GPL(unregister_jprobe);
  919. #ifdef CONFIG_KPROBES
  920. EXPORT_SYMBOL_GPL(jprobe_return);
  921. #endif
  922. #ifdef CONFIG_KPROBES
  923. EXPORT_SYMBOL_GPL(register_kretprobe);
  924. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  925. #endif