kprobes.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/kdebug.h>
  46. #include <asm-generic/sections.h>
  47. #include <asm/cacheflush.h>
  48. #include <asm/errno.h>
  49. #include <asm/uaccess.h>
  50. #define KPROBE_HASH_BITS 6
  51. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  52. /*
  53. * Some oddball architectures like 64bit powerpc have function descriptors
  54. * so this must be overridable.
  55. */
  56. #ifndef kprobe_lookup_name
  57. #define kprobe_lookup_name(name, addr) \
  58. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  59. #endif
  60. static int kprobes_initialized;
  61. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  62. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  63. /* NOTE: change this value only with kprobe_mutex held */
  64. static bool kprobe_enabled;
  65. static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  66. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  67. static struct {
  68. spinlock_t lock ____cacheline_aligned_in_smp;
  69. } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  70. static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  71. {
  72. return &(kretprobe_table_locks[hash].lock);
  73. }
  74. /*
  75. * Normally, functions that we'd want to prohibit kprobes in, are marked
  76. * __kprobes. But, there are cases where such functions already belong to
  77. * a different section (__sched for preempt_schedule)
  78. *
  79. * For such cases, we now have a blacklist
  80. */
  81. static struct kprobe_blackpoint kprobe_blacklist[] = {
  82. {"preempt_schedule",},
  83. {NULL} /* Terminator */
  84. };
  85. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  86. /*
  87. * kprobe->ainsn.insn points to the copy of the instruction to be
  88. * single-stepped. x86_64, POWER4 and above have no-exec support and
  89. * stepping on the instruction on a vmalloced/kmalloced/data page
  90. * is a recipe for disaster
  91. */
  92. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  93. struct kprobe_insn_page {
  94. struct hlist_node hlist;
  95. kprobe_opcode_t *insns; /* Page of instruction slots */
  96. char slot_used[INSNS_PER_PAGE];
  97. int nused;
  98. int ngarbage;
  99. };
  100. enum kprobe_slot_state {
  101. SLOT_CLEAN = 0,
  102. SLOT_DIRTY = 1,
  103. SLOT_USED = 2,
  104. };
  105. static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
  106. static struct hlist_head kprobe_insn_pages;
  107. static int kprobe_garbage_slots;
  108. static int collect_garbage_slots(void);
  109. static int __kprobes check_safety(void)
  110. {
  111. int ret = 0;
  112. #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
  113. ret = freeze_processes();
  114. if (ret == 0) {
  115. struct task_struct *p, *q;
  116. do_each_thread(p, q) {
  117. if (p != current && p->state == TASK_RUNNING &&
  118. p->pid != 0) {
  119. printk("Check failed: %s is running\n",p->comm);
  120. ret = -1;
  121. goto loop_end;
  122. }
  123. } while_each_thread(p, q);
  124. }
  125. loop_end:
  126. thaw_processes();
  127. #else
  128. synchronize_sched();
  129. #endif
  130. return ret;
  131. }
  132. /**
  133. * __get_insn_slot() - Find a slot on an executable page for an instruction.
  134. * We allocate an executable page if there's no room on existing ones.
  135. */
  136. static kprobe_opcode_t __kprobes *__get_insn_slot(void)
  137. {
  138. struct kprobe_insn_page *kip;
  139. struct hlist_node *pos;
  140. retry:
  141. hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
  142. if (kip->nused < INSNS_PER_PAGE) {
  143. int i;
  144. for (i = 0; i < INSNS_PER_PAGE; i++) {
  145. if (kip->slot_used[i] == SLOT_CLEAN) {
  146. kip->slot_used[i] = SLOT_USED;
  147. kip->nused++;
  148. return kip->insns + (i * MAX_INSN_SIZE);
  149. }
  150. }
  151. /* Surprise! No unused slots. Fix kip->nused. */
  152. kip->nused = INSNS_PER_PAGE;
  153. }
  154. }
  155. /* If there are any garbage slots, collect it and try again. */
  156. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  157. goto retry;
  158. }
  159. /* All out of space. Need to allocate a new page. Use slot 0. */
  160. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  161. if (!kip)
  162. return NULL;
  163. /*
  164. * Use module_alloc so this page is within +/- 2GB of where the
  165. * kernel image and loaded module images reside. This is required
  166. * so x86_64 can correctly handle the %rip-relative fixups.
  167. */
  168. kip->insns = module_alloc(PAGE_SIZE);
  169. if (!kip->insns) {
  170. kfree(kip);
  171. return NULL;
  172. }
  173. INIT_HLIST_NODE(&kip->hlist);
  174. hlist_add_head(&kip->hlist, &kprobe_insn_pages);
  175. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  176. kip->slot_used[0] = SLOT_USED;
  177. kip->nused = 1;
  178. kip->ngarbage = 0;
  179. return kip->insns;
  180. }
  181. kprobe_opcode_t __kprobes *get_insn_slot(void)
  182. {
  183. kprobe_opcode_t *ret;
  184. mutex_lock(&kprobe_insn_mutex);
  185. ret = __get_insn_slot();
  186. mutex_unlock(&kprobe_insn_mutex);
  187. return ret;
  188. }
  189. /* Return 1 if all garbages are collected, otherwise 0. */
  190. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  191. {
  192. kip->slot_used[idx] = SLOT_CLEAN;
  193. kip->nused--;
  194. if (kip->nused == 0) {
  195. /*
  196. * Page is no longer in use. Free it unless
  197. * it's the last one. We keep the last one
  198. * so as not to have to set it up again the
  199. * next time somebody inserts a probe.
  200. */
  201. hlist_del(&kip->hlist);
  202. if (hlist_empty(&kprobe_insn_pages)) {
  203. INIT_HLIST_NODE(&kip->hlist);
  204. hlist_add_head(&kip->hlist,
  205. &kprobe_insn_pages);
  206. } else {
  207. module_free(NULL, kip->insns);
  208. kfree(kip);
  209. }
  210. return 1;
  211. }
  212. return 0;
  213. }
  214. static int __kprobes collect_garbage_slots(void)
  215. {
  216. struct kprobe_insn_page *kip;
  217. struct hlist_node *pos, *next;
  218. int safety;
  219. /* Ensure no-one is preepmted on the garbages */
  220. mutex_unlock(&kprobe_insn_mutex);
  221. safety = check_safety();
  222. mutex_lock(&kprobe_insn_mutex);
  223. if (safety != 0)
  224. return -EAGAIN;
  225. hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
  226. int i;
  227. if (kip->ngarbage == 0)
  228. continue;
  229. kip->ngarbage = 0; /* we will collect all garbages */
  230. for (i = 0; i < INSNS_PER_PAGE; i++) {
  231. if (kip->slot_used[i] == SLOT_DIRTY &&
  232. collect_one_slot(kip, i))
  233. break;
  234. }
  235. }
  236. kprobe_garbage_slots = 0;
  237. return 0;
  238. }
  239. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  240. {
  241. struct kprobe_insn_page *kip;
  242. struct hlist_node *pos;
  243. mutex_lock(&kprobe_insn_mutex);
  244. hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
  245. if (kip->insns <= slot &&
  246. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  247. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  248. if (dirty) {
  249. kip->slot_used[i] = SLOT_DIRTY;
  250. kip->ngarbage++;
  251. } else {
  252. collect_one_slot(kip, i);
  253. }
  254. break;
  255. }
  256. }
  257. if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
  258. collect_garbage_slots();
  259. mutex_unlock(&kprobe_insn_mutex);
  260. }
  261. #endif
  262. /* We have preemption disabled.. so it is safe to use __ versions */
  263. static inline void set_kprobe_instance(struct kprobe *kp)
  264. {
  265. __get_cpu_var(kprobe_instance) = kp;
  266. }
  267. static inline void reset_kprobe_instance(void)
  268. {
  269. __get_cpu_var(kprobe_instance) = NULL;
  270. }
  271. /*
  272. * This routine is called either:
  273. * - under the kprobe_mutex - during kprobe_[un]register()
  274. * OR
  275. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  276. */
  277. struct kprobe __kprobes *get_kprobe(void *addr)
  278. {
  279. struct hlist_head *head;
  280. struct hlist_node *node;
  281. struct kprobe *p;
  282. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  283. hlist_for_each_entry_rcu(p, node, head, hlist) {
  284. if (p->addr == addr)
  285. return p;
  286. }
  287. return NULL;
  288. }
  289. /*
  290. * Aggregate handlers for multiple kprobes support - these handlers
  291. * take care of invoking the individual kprobe handlers on p->list
  292. */
  293. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  294. {
  295. struct kprobe *kp;
  296. list_for_each_entry_rcu(kp, &p->list, list) {
  297. if (kp->pre_handler) {
  298. set_kprobe_instance(kp);
  299. if (kp->pre_handler(kp, regs))
  300. return 1;
  301. }
  302. reset_kprobe_instance();
  303. }
  304. return 0;
  305. }
  306. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  307. unsigned long flags)
  308. {
  309. struct kprobe *kp;
  310. list_for_each_entry_rcu(kp, &p->list, list) {
  311. if (kp->post_handler) {
  312. set_kprobe_instance(kp);
  313. kp->post_handler(kp, regs, flags);
  314. reset_kprobe_instance();
  315. }
  316. }
  317. }
  318. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  319. int trapnr)
  320. {
  321. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  322. /*
  323. * if we faulted "during" the execution of a user specified
  324. * probe handler, invoke just that probe's fault handler
  325. */
  326. if (cur && cur->fault_handler) {
  327. if (cur->fault_handler(cur, regs, trapnr))
  328. return 1;
  329. }
  330. return 0;
  331. }
  332. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  333. {
  334. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  335. int ret = 0;
  336. if (cur && cur->break_handler) {
  337. if (cur->break_handler(cur, regs))
  338. ret = 1;
  339. }
  340. reset_kprobe_instance();
  341. return ret;
  342. }
  343. /* Walks the list and increments nmissed count for multiprobe case */
  344. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  345. {
  346. struct kprobe *kp;
  347. if (p->pre_handler != aggr_pre_handler) {
  348. p->nmissed++;
  349. } else {
  350. list_for_each_entry_rcu(kp, &p->list, list)
  351. kp->nmissed++;
  352. }
  353. return;
  354. }
  355. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  356. struct hlist_head *head)
  357. {
  358. struct kretprobe *rp = ri->rp;
  359. /* remove rp inst off the rprobe_inst_table */
  360. hlist_del(&ri->hlist);
  361. INIT_HLIST_NODE(&ri->hlist);
  362. if (likely(rp)) {
  363. spin_lock(&rp->lock);
  364. hlist_add_head(&ri->hlist, &rp->free_instances);
  365. spin_unlock(&rp->lock);
  366. } else
  367. /* Unregistering */
  368. hlist_add_head(&ri->hlist, head);
  369. }
  370. void kretprobe_hash_lock(struct task_struct *tsk,
  371. struct hlist_head **head, unsigned long *flags)
  372. {
  373. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  374. spinlock_t *hlist_lock;
  375. *head = &kretprobe_inst_table[hash];
  376. hlist_lock = kretprobe_table_lock_ptr(hash);
  377. spin_lock_irqsave(hlist_lock, *flags);
  378. }
  379. static void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
  380. {
  381. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  382. spin_lock_irqsave(hlist_lock, *flags);
  383. }
  384. void kretprobe_hash_unlock(struct task_struct *tsk, unsigned long *flags)
  385. {
  386. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  387. spinlock_t *hlist_lock;
  388. hlist_lock = kretprobe_table_lock_ptr(hash);
  389. spin_unlock_irqrestore(hlist_lock, *flags);
  390. }
  391. void kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
  392. {
  393. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  394. spin_unlock_irqrestore(hlist_lock, *flags);
  395. }
  396. /*
  397. * This function is called from finish_task_switch when task tk becomes dead,
  398. * so that we can recycle any function-return probe instances associated
  399. * with this task. These left over instances represent probed functions
  400. * that have been called but will never return.
  401. */
  402. void __kprobes kprobe_flush_task(struct task_struct *tk)
  403. {
  404. struct kretprobe_instance *ri;
  405. struct hlist_head *head, empty_rp;
  406. struct hlist_node *node, *tmp;
  407. unsigned long hash, flags = 0;
  408. if (unlikely(!kprobes_initialized))
  409. /* Early boot. kretprobe_table_locks not yet initialized. */
  410. return;
  411. hash = hash_ptr(tk, KPROBE_HASH_BITS);
  412. head = &kretprobe_inst_table[hash];
  413. kretprobe_table_lock(hash, &flags);
  414. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  415. if (ri->task == tk)
  416. recycle_rp_inst(ri, &empty_rp);
  417. }
  418. kretprobe_table_unlock(hash, &flags);
  419. INIT_HLIST_HEAD(&empty_rp);
  420. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  421. hlist_del(&ri->hlist);
  422. kfree(ri);
  423. }
  424. }
  425. static inline void free_rp_inst(struct kretprobe *rp)
  426. {
  427. struct kretprobe_instance *ri;
  428. struct hlist_node *pos, *next;
  429. hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
  430. hlist_del(&ri->hlist);
  431. kfree(ri);
  432. }
  433. }
  434. static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  435. {
  436. unsigned long flags, hash;
  437. struct kretprobe_instance *ri;
  438. struct hlist_node *pos, *next;
  439. struct hlist_head *head;
  440. /* No race here */
  441. for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  442. kretprobe_table_lock(hash, &flags);
  443. head = &kretprobe_inst_table[hash];
  444. hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
  445. if (ri->rp == rp)
  446. ri->rp = NULL;
  447. }
  448. kretprobe_table_unlock(hash, &flags);
  449. }
  450. free_rp_inst(rp);
  451. }
  452. /*
  453. * Keep all fields in the kprobe consistent
  454. */
  455. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  456. {
  457. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  458. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  459. }
  460. /*
  461. * Add the new probe to old_p->list. Fail if this is the
  462. * second jprobe at the address - two jprobes can't coexist
  463. */
  464. static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  465. {
  466. if (p->break_handler) {
  467. if (old_p->break_handler)
  468. return -EEXIST;
  469. list_add_tail_rcu(&p->list, &old_p->list);
  470. old_p->break_handler = aggr_break_handler;
  471. } else
  472. list_add_rcu(&p->list, &old_p->list);
  473. if (p->post_handler && !old_p->post_handler)
  474. old_p->post_handler = aggr_post_handler;
  475. return 0;
  476. }
  477. /*
  478. * Fill in the required fields of the "manager kprobe". Replace the
  479. * earlier kprobe in the hlist with the manager kprobe
  480. */
  481. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  482. {
  483. copy_kprobe(p, ap);
  484. flush_insn_slot(ap);
  485. ap->addr = p->addr;
  486. ap->pre_handler = aggr_pre_handler;
  487. ap->fault_handler = aggr_fault_handler;
  488. if (p->post_handler)
  489. ap->post_handler = aggr_post_handler;
  490. if (p->break_handler)
  491. ap->break_handler = aggr_break_handler;
  492. INIT_LIST_HEAD(&ap->list);
  493. list_add_rcu(&p->list, &ap->list);
  494. hlist_replace_rcu(&p->hlist, &ap->hlist);
  495. }
  496. /*
  497. * This is the second or subsequent kprobe at the address - handle
  498. * the intricacies
  499. */
  500. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  501. struct kprobe *p)
  502. {
  503. int ret = 0;
  504. struct kprobe *ap;
  505. if (old_p->pre_handler == aggr_pre_handler) {
  506. copy_kprobe(old_p, p);
  507. ret = add_new_kprobe(old_p, p);
  508. } else {
  509. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  510. if (!ap)
  511. return -ENOMEM;
  512. add_aggr_kprobe(ap, old_p);
  513. copy_kprobe(ap, p);
  514. ret = add_new_kprobe(ap, p);
  515. }
  516. return ret;
  517. }
  518. static int __kprobes in_kprobes_functions(unsigned long addr)
  519. {
  520. struct kprobe_blackpoint *kb;
  521. if (addr >= (unsigned long)__kprobes_text_start &&
  522. addr < (unsigned long)__kprobes_text_end)
  523. return -EINVAL;
  524. /*
  525. * If there exists a kprobe_blacklist, verify and
  526. * fail any probe registration in the prohibited area
  527. */
  528. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  529. if (kb->start_addr) {
  530. if (addr >= kb->start_addr &&
  531. addr < (kb->start_addr + kb->range))
  532. return -EINVAL;
  533. }
  534. }
  535. return 0;
  536. }
  537. /*
  538. * If we have a symbol_name argument, look it up and add the offset field
  539. * to it. This way, we can specify a relative address to a symbol.
  540. */
  541. static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  542. {
  543. kprobe_opcode_t *addr = p->addr;
  544. if (p->symbol_name) {
  545. if (addr)
  546. return NULL;
  547. kprobe_lookup_name(p->symbol_name, addr);
  548. }
  549. if (!addr)
  550. return NULL;
  551. return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  552. }
  553. static int __kprobes __register_kprobe(struct kprobe *p,
  554. unsigned long called_from)
  555. {
  556. int ret = 0;
  557. struct kprobe *old_p;
  558. struct module *probed_mod;
  559. kprobe_opcode_t *addr;
  560. addr = kprobe_addr(p);
  561. if (!addr)
  562. return -EINVAL;
  563. p->addr = addr;
  564. preempt_disable();
  565. if (!__kernel_text_address((unsigned long) p->addr) ||
  566. in_kprobes_functions((unsigned long) p->addr)) {
  567. preempt_enable();
  568. return -EINVAL;
  569. }
  570. p->mod_refcounted = 0;
  571. /*
  572. * Check if are we probing a module.
  573. */
  574. probed_mod = __module_text_address((unsigned long) p->addr);
  575. if (probed_mod) {
  576. struct module *calling_mod;
  577. calling_mod = __module_text_address(called_from);
  578. /*
  579. * We must allow modules to probe themself and in this case
  580. * avoid incrementing the module refcount, so as to allow
  581. * unloading of self probing modules.
  582. */
  583. if (calling_mod != probed_mod) {
  584. if (unlikely(!try_module_get(probed_mod))) {
  585. preempt_enable();
  586. return -EINVAL;
  587. }
  588. p->mod_refcounted = 1;
  589. } else
  590. probed_mod = NULL;
  591. }
  592. preempt_enable();
  593. p->nmissed = 0;
  594. INIT_LIST_HEAD(&p->list);
  595. mutex_lock(&kprobe_mutex);
  596. old_p = get_kprobe(p->addr);
  597. if (old_p) {
  598. ret = register_aggr_kprobe(old_p, p);
  599. goto out;
  600. }
  601. ret = arch_prepare_kprobe(p);
  602. if (ret)
  603. goto out;
  604. INIT_HLIST_NODE(&p->hlist);
  605. hlist_add_head_rcu(&p->hlist,
  606. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  607. if (kprobe_enabled)
  608. arch_arm_kprobe(p);
  609. out:
  610. mutex_unlock(&kprobe_mutex);
  611. if (ret && probed_mod)
  612. module_put(probed_mod);
  613. return ret;
  614. }
  615. /*
  616. * Unregister a kprobe without a scheduler synchronization.
  617. */
  618. static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  619. {
  620. struct kprobe *old_p, *list_p;
  621. old_p = get_kprobe(p->addr);
  622. if (unlikely(!old_p))
  623. return -EINVAL;
  624. if (p != old_p) {
  625. list_for_each_entry_rcu(list_p, &old_p->list, list)
  626. if (list_p == p)
  627. /* kprobe p is a valid probe */
  628. goto valid_p;
  629. return -EINVAL;
  630. }
  631. valid_p:
  632. if (old_p == p ||
  633. (old_p->pre_handler == aggr_pre_handler &&
  634. list_is_singular(&old_p->list))) {
  635. /*
  636. * Only probe on the hash list. Disarm only if kprobes are
  637. * enabled - otherwise, the breakpoint would already have
  638. * been removed. We save on flushing icache.
  639. */
  640. if (kprobe_enabled)
  641. arch_disarm_kprobe(p);
  642. hlist_del_rcu(&old_p->hlist);
  643. } else {
  644. if (p->break_handler)
  645. old_p->break_handler = NULL;
  646. if (p->post_handler) {
  647. list_for_each_entry_rcu(list_p, &old_p->list, list) {
  648. if ((list_p != p) && (list_p->post_handler))
  649. goto noclean;
  650. }
  651. old_p->post_handler = NULL;
  652. }
  653. noclean:
  654. list_del_rcu(&p->list);
  655. }
  656. return 0;
  657. }
  658. static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  659. {
  660. struct module *mod;
  661. struct kprobe *old_p;
  662. if (p->mod_refcounted) {
  663. /*
  664. * Since we've already incremented refcount,
  665. * we don't need to disable preemption.
  666. */
  667. mod = module_text_address((unsigned long)p->addr);
  668. if (mod)
  669. module_put(mod);
  670. }
  671. if (list_empty(&p->list) || list_is_singular(&p->list)) {
  672. if (!list_empty(&p->list)) {
  673. /* "p" is the last child of an aggr_kprobe */
  674. old_p = list_entry(p->list.next, struct kprobe, list);
  675. list_del(&p->list);
  676. kfree(old_p);
  677. }
  678. arch_remove_kprobe(p);
  679. }
  680. }
  681. static int __register_kprobes(struct kprobe **kps, int num,
  682. unsigned long called_from)
  683. {
  684. int i, ret = 0;
  685. if (num <= 0)
  686. return -EINVAL;
  687. for (i = 0; i < num; i++) {
  688. ret = __register_kprobe(kps[i], called_from);
  689. if (ret < 0) {
  690. if (i > 0)
  691. unregister_kprobes(kps, i);
  692. break;
  693. }
  694. }
  695. return ret;
  696. }
  697. /*
  698. * Registration and unregistration functions for kprobe.
  699. */
  700. int __kprobes register_kprobe(struct kprobe *p)
  701. {
  702. return __register_kprobes(&p, 1,
  703. (unsigned long)__builtin_return_address(0));
  704. }
  705. void __kprobes unregister_kprobe(struct kprobe *p)
  706. {
  707. unregister_kprobes(&p, 1);
  708. }
  709. int __kprobes register_kprobes(struct kprobe **kps, int num)
  710. {
  711. return __register_kprobes(kps, num,
  712. (unsigned long)__builtin_return_address(0));
  713. }
  714. void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  715. {
  716. int i;
  717. if (num <= 0)
  718. return;
  719. mutex_lock(&kprobe_mutex);
  720. for (i = 0; i < num; i++)
  721. if (__unregister_kprobe_top(kps[i]) < 0)
  722. kps[i]->addr = NULL;
  723. mutex_unlock(&kprobe_mutex);
  724. synchronize_sched();
  725. for (i = 0; i < num; i++)
  726. if (kps[i]->addr)
  727. __unregister_kprobe_bottom(kps[i]);
  728. }
  729. static struct notifier_block kprobe_exceptions_nb = {
  730. .notifier_call = kprobe_exceptions_notify,
  731. .priority = 0x7fffffff /* we need to be notified first */
  732. };
  733. unsigned long __weak arch_deref_entry_point(void *entry)
  734. {
  735. return (unsigned long)entry;
  736. }
  737. static int __register_jprobes(struct jprobe **jps, int num,
  738. unsigned long called_from)
  739. {
  740. struct jprobe *jp;
  741. int ret = 0, i;
  742. if (num <= 0)
  743. return -EINVAL;
  744. for (i = 0; i < num; i++) {
  745. unsigned long addr;
  746. jp = jps[i];
  747. addr = arch_deref_entry_point(jp->entry);
  748. if (!kernel_text_address(addr))
  749. ret = -EINVAL;
  750. else {
  751. /* Todo: Verify probepoint is a function entry point */
  752. jp->kp.pre_handler = setjmp_pre_handler;
  753. jp->kp.break_handler = longjmp_break_handler;
  754. ret = __register_kprobe(&jp->kp, called_from);
  755. }
  756. if (ret < 0) {
  757. if (i > 0)
  758. unregister_jprobes(jps, i);
  759. break;
  760. }
  761. }
  762. return ret;
  763. }
  764. int __kprobes register_jprobe(struct jprobe *jp)
  765. {
  766. return __register_jprobes(&jp, 1,
  767. (unsigned long)__builtin_return_address(0));
  768. }
  769. void __kprobes unregister_jprobe(struct jprobe *jp)
  770. {
  771. unregister_jprobes(&jp, 1);
  772. }
  773. int __kprobes register_jprobes(struct jprobe **jps, int num)
  774. {
  775. return __register_jprobes(jps, num,
  776. (unsigned long)__builtin_return_address(0));
  777. }
  778. void __kprobes unregister_jprobes(struct jprobe **jps, int num)
  779. {
  780. int i;
  781. if (num <= 0)
  782. return;
  783. mutex_lock(&kprobe_mutex);
  784. for (i = 0; i < num; i++)
  785. if (__unregister_kprobe_top(&jps[i]->kp) < 0)
  786. jps[i]->kp.addr = NULL;
  787. mutex_unlock(&kprobe_mutex);
  788. synchronize_sched();
  789. for (i = 0; i < num; i++) {
  790. if (jps[i]->kp.addr)
  791. __unregister_kprobe_bottom(&jps[i]->kp);
  792. }
  793. }
  794. #ifdef CONFIG_KRETPROBES
  795. /*
  796. * This kprobe pre_handler is registered with every kretprobe. When probe
  797. * hits it will set up the return probe.
  798. */
  799. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  800. struct pt_regs *regs)
  801. {
  802. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  803. unsigned long hash, flags = 0;
  804. struct kretprobe_instance *ri;
  805. /*TODO: consider to only swap the RA after the last pre_handler fired */
  806. hash = hash_ptr(current, KPROBE_HASH_BITS);
  807. spin_lock_irqsave(&rp->lock, flags);
  808. if (!hlist_empty(&rp->free_instances)) {
  809. ri = hlist_entry(rp->free_instances.first,
  810. struct kretprobe_instance, hlist);
  811. hlist_del(&ri->hlist);
  812. spin_unlock_irqrestore(&rp->lock, flags);
  813. ri->rp = rp;
  814. ri->task = current;
  815. if (rp->entry_handler && rp->entry_handler(ri, regs)) {
  816. spin_unlock_irqrestore(&rp->lock, flags);
  817. return 0;
  818. }
  819. arch_prepare_kretprobe(ri, regs);
  820. /* XXX(hch): why is there no hlist_move_head? */
  821. INIT_HLIST_NODE(&ri->hlist);
  822. kretprobe_table_lock(hash, &flags);
  823. hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  824. kretprobe_table_unlock(hash, &flags);
  825. } else {
  826. rp->nmissed++;
  827. spin_unlock_irqrestore(&rp->lock, flags);
  828. }
  829. return 0;
  830. }
  831. static int __kprobes __register_kretprobe(struct kretprobe *rp,
  832. unsigned long called_from)
  833. {
  834. int ret = 0;
  835. struct kretprobe_instance *inst;
  836. int i;
  837. void *addr;
  838. if (kretprobe_blacklist_size) {
  839. addr = kprobe_addr(&rp->kp);
  840. if (!addr)
  841. return -EINVAL;
  842. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  843. if (kretprobe_blacklist[i].addr == addr)
  844. return -EINVAL;
  845. }
  846. }
  847. rp->kp.pre_handler = pre_handler_kretprobe;
  848. rp->kp.post_handler = NULL;
  849. rp->kp.fault_handler = NULL;
  850. rp->kp.break_handler = NULL;
  851. /* Pre-allocate memory for max kretprobe instances */
  852. if (rp->maxactive <= 0) {
  853. #ifdef CONFIG_PREEMPT
  854. rp->maxactive = max(10, 2 * NR_CPUS);
  855. #else
  856. rp->maxactive = NR_CPUS;
  857. #endif
  858. }
  859. spin_lock_init(&rp->lock);
  860. INIT_HLIST_HEAD(&rp->free_instances);
  861. for (i = 0; i < rp->maxactive; i++) {
  862. inst = kmalloc(sizeof(struct kretprobe_instance) +
  863. rp->data_size, GFP_KERNEL);
  864. if (inst == NULL) {
  865. free_rp_inst(rp);
  866. return -ENOMEM;
  867. }
  868. INIT_HLIST_NODE(&inst->hlist);
  869. hlist_add_head(&inst->hlist, &rp->free_instances);
  870. }
  871. rp->nmissed = 0;
  872. /* Establish function entry probe point */
  873. ret = __register_kprobe(&rp->kp, called_from);
  874. if (ret != 0)
  875. free_rp_inst(rp);
  876. return ret;
  877. }
  878. static int __register_kretprobes(struct kretprobe **rps, int num,
  879. unsigned long called_from)
  880. {
  881. int ret = 0, i;
  882. if (num <= 0)
  883. return -EINVAL;
  884. for (i = 0; i < num; i++) {
  885. ret = __register_kretprobe(rps[i], called_from);
  886. if (ret < 0) {
  887. if (i > 0)
  888. unregister_kretprobes(rps, i);
  889. break;
  890. }
  891. }
  892. return ret;
  893. }
  894. int __kprobes register_kretprobe(struct kretprobe *rp)
  895. {
  896. return __register_kretprobes(&rp, 1,
  897. (unsigned long)__builtin_return_address(0));
  898. }
  899. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  900. {
  901. unregister_kretprobes(&rp, 1);
  902. }
  903. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  904. {
  905. return __register_kretprobes(rps, num,
  906. (unsigned long)__builtin_return_address(0));
  907. }
  908. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  909. {
  910. int i;
  911. if (num <= 0)
  912. return;
  913. mutex_lock(&kprobe_mutex);
  914. for (i = 0; i < num; i++)
  915. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  916. rps[i]->kp.addr = NULL;
  917. mutex_unlock(&kprobe_mutex);
  918. synchronize_sched();
  919. for (i = 0; i < num; i++) {
  920. if (rps[i]->kp.addr) {
  921. __unregister_kprobe_bottom(&rps[i]->kp);
  922. cleanup_rp_inst(rps[i]);
  923. }
  924. }
  925. }
  926. #else /* CONFIG_KRETPROBES */
  927. int __kprobes register_kretprobe(struct kretprobe *rp)
  928. {
  929. return -ENOSYS;
  930. }
  931. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  932. {
  933. return -ENOSYS;
  934. }
  935. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  936. {
  937. }
  938. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  939. {
  940. }
  941. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  942. struct pt_regs *regs)
  943. {
  944. return 0;
  945. }
  946. #endif /* CONFIG_KRETPROBES */
  947. static int __init init_kprobes(void)
  948. {
  949. int i, err = 0;
  950. unsigned long offset = 0, size = 0;
  951. char *modname, namebuf[128];
  952. const char *symbol_name;
  953. void *addr;
  954. struct kprobe_blackpoint *kb;
  955. /* FIXME allocate the probe table, currently defined statically */
  956. /* initialize all list heads */
  957. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  958. INIT_HLIST_HEAD(&kprobe_table[i]);
  959. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  960. spin_lock_init(&(kretprobe_table_locks[i].lock));
  961. }
  962. /*
  963. * Lookup and populate the kprobe_blacklist.
  964. *
  965. * Unlike the kretprobe blacklist, we'll need to determine
  966. * the range of addresses that belong to the said functions,
  967. * since a kprobe need not necessarily be at the beginning
  968. * of a function.
  969. */
  970. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  971. kprobe_lookup_name(kb->name, addr);
  972. if (!addr)
  973. continue;
  974. kb->start_addr = (unsigned long)addr;
  975. symbol_name = kallsyms_lookup(kb->start_addr,
  976. &size, &offset, &modname, namebuf);
  977. if (!symbol_name)
  978. kb->range = 0;
  979. else
  980. kb->range = size;
  981. }
  982. if (kretprobe_blacklist_size) {
  983. /* lookup the function address from its name */
  984. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  985. kprobe_lookup_name(kretprobe_blacklist[i].name,
  986. kretprobe_blacklist[i].addr);
  987. if (!kretprobe_blacklist[i].addr)
  988. printk("kretprobe: lookup failed: %s\n",
  989. kretprobe_blacklist[i].name);
  990. }
  991. }
  992. /* By default, kprobes are enabled */
  993. kprobe_enabled = true;
  994. err = arch_init_kprobes();
  995. if (!err)
  996. err = register_die_notifier(&kprobe_exceptions_nb);
  997. kprobes_initialized = (err == 0);
  998. if (!err)
  999. init_test_probes();
  1000. return err;
  1001. }
  1002. #ifdef CONFIG_DEBUG_FS
  1003. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  1004. const char *sym, int offset,char *modname)
  1005. {
  1006. char *kprobe_type;
  1007. if (p->pre_handler == pre_handler_kretprobe)
  1008. kprobe_type = "r";
  1009. else if (p->pre_handler == setjmp_pre_handler)
  1010. kprobe_type = "j";
  1011. else
  1012. kprobe_type = "k";
  1013. if (sym)
  1014. seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
  1015. sym, offset, (modname ? modname : " "));
  1016. else
  1017. seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
  1018. }
  1019. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  1020. {
  1021. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  1022. }
  1023. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  1024. {
  1025. (*pos)++;
  1026. if (*pos >= KPROBE_TABLE_SIZE)
  1027. return NULL;
  1028. return pos;
  1029. }
  1030. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  1031. {
  1032. /* Nothing to do */
  1033. }
  1034. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  1035. {
  1036. struct hlist_head *head;
  1037. struct hlist_node *node;
  1038. struct kprobe *p, *kp;
  1039. const char *sym = NULL;
  1040. unsigned int i = *(loff_t *) v;
  1041. unsigned long offset = 0;
  1042. char *modname, namebuf[128];
  1043. head = &kprobe_table[i];
  1044. preempt_disable();
  1045. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1046. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  1047. &offset, &modname, namebuf);
  1048. if (p->pre_handler == aggr_pre_handler) {
  1049. list_for_each_entry_rcu(kp, &p->list, list)
  1050. report_probe(pi, kp, sym, offset, modname);
  1051. } else
  1052. report_probe(pi, p, sym, offset, modname);
  1053. }
  1054. preempt_enable();
  1055. return 0;
  1056. }
  1057. static struct seq_operations kprobes_seq_ops = {
  1058. .start = kprobe_seq_start,
  1059. .next = kprobe_seq_next,
  1060. .stop = kprobe_seq_stop,
  1061. .show = show_kprobe_addr
  1062. };
  1063. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  1064. {
  1065. return seq_open(filp, &kprobes_seq_ops);
  1066. }
  1067. static struct file_operations debugfs_kprobes_operations = {
  1068. .open = kprobes_open,
  1069. .read = seq_read,
  1070. .llseek = seq_lseek,
  1071. .release = seq_release,
  1072. };
  1073. static void __kprobes enable_all_kprobes(void)
  1074. {
  1075. struct hlist_head *head;
  1076. struct hlist_node *node;
  1077. struct kprobe *p;
  1078. unsigned int i;
  1079. mutex_lock(&kprobe_mutex);
  1080. /* If kprobes are already enabled, just return */
  1081. if (kprobe_enabled)
  1082. goto already_enabled;
  1083. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1084. head = &kprobe_table[i];
  1085. hlist_for_each_entry_rcu(p, node, head, hlist)
  1086. arch_arm_kprobe(p);
  1087. }
  1088. kprobe_enabled = true;
  1089. printk(KERN_INFO "Kprobes globally enabled\n");
  1090. already_enabled:
  1091. mutex_unlock(&kprobe_mutex);
  1092. return;
  1093. }
  1094. static void __kprobes disable_all_kprobes(void)
  1095. {
  1096. struct hlist_head *head;
  1097. struct hlist_node *node;
  1098. struct kprobe *p;
  1099. unsigned int i;
  1100. mutex_lock(&kprobe_mutex);
  1101. /* If kprobes are already disabled, just return */
  1102. if (!kprobe_enabled)
  1103. goto already_disabled;
  1104. kprobe_enabled = false;
  1105. printk(KERN_INFO "Kprobes globally disabled\n");
  1106. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1107. head = &kprobe_table[i];
  1108. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1109. if (!arch_trampoline_kprobe(p))
  1110. arch_disarm_kprobe(p);
  1111. }
  1112. }
  1113. mutex_unlock(&kprobe_mutex);
  1114. /* Allow all currently running kprobes to complete */
  1115. synchronize_sched();
  1116. return;
  1117. already_disabled:
  1118. mutex_unlock(&kprobe_mutex);
  1119. return;
  1120. }
  1121. /*
  1122. * XXX: The debugfs bool file interface doesn't allow for callbacks
  1123. * when the bool state is switched. We can reuse that facility when
  1124. * available
  1125. */
  1126. static ssize_t read_enabled_file_bool(struct file *file,
  1127. char __user *user_buf, size_t count, loff_t *ppos)
  1128. {
  1129. char buf[3];
  1130. if (kprobe_enabled)
  1131. buf[0] = '1';
  1132. else
  1133. buf[0] = '0';
  1134. buf[1] = '\n';
  1135. buf[2] = 0x00;
  1136. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  1137. }
  1138. static ssize_t write_enabled_file_bool(struct file *file,
  1139. const char __user *user_buf, size_t count, loff_t *ppos)
  1140. {
  1141. char buf[32];
  1142. int buf_size;
  1143. buf_size = min(count, (sizeof(buf)-1));
  1144. if (copy_from_user(buf, user_buf, buf_size))
  1145. return -EFAULT;
  1146. switch (buf[0]) {
  1147. case 'y':
  1148. case 'Y':
  1149. case '1':
  1150. enable_all_kprobes();
  1151. break;
  1152. case 'n':
  1153. case 'N':
  1154. case '0':
  1155. disable_all_kprobes();
  1156. break;
  1157. }
  1158. return count;
  1159. }
  1160. static struct file_operations fops_kp = {
  1161. .read = read_enabled_file_bool,
  1162. .write = write_enabled_file_bool,
  1163. };
  1164. static int __kprobes debugfs_kprobe_init(void)
  1165. {
  1166. struct dentry *dir, *file;
  1167. unsigned int value = 1;
  1168. dir = debugfs_create_dir("kprobes", NULL);
  1169. if (!dir)
  1170. return -ENOMEM;
  1171. file = debugfs_create_file("list", 0444, dir, NULL,
  1172. &debugfs_kprobes_operations);
  1173. if (!file) {
  1174. debugfs_remove(dir);
  1175. return -ENOMEM;
  1176. }
  1177. file = debugfs_create_file("enabled", 0600, dir,
  1178. &value, &fops_kp);
  1179. if (!file) {
  1180. debugfs_remove(dir);
  1181. return -ENOMEM;
  1182. }
  1183. return 0;
  1184. }
  1185. late_initcall(debugfs_kprobe_init);
  1186. #endif /* CONFIG_DEBUG_FS */
  1187. module_init(init_kprobes);
  1188. EXPORT_SYMBOL_GPL(register_kprobe);
  1189. EXPORT_SYMBOL_GPL(unregister_kprobe);
  1190. EXPORT_SYMBOL_GPL(register_kprobes);
  1191. EXPORT_SYMBOL_GPL(unregister_kprobes);
  1192. EXPORT_SYMBOL_GPL(register_jprobe);
  1193. EXPORT_SYMBOL_GPL(unregister_jprobe);
  1194. EXPORT_SYMBOL_GPL(register_jprobes);
  1195. EXPORT_SYMBOL_GPL(unregister_jprobes);
  1196. EXPORT_SYMBOL_GPL(jprobe_return);
  1197. EXPORT_SYMBOL_GPL(register_kretprobe);
  1198. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1199. EXPORT_SYMBOL_GPL(register_kretprobes);
  1200. EXPORT_SYMBOL_GPL(unregister_kretprobes);