kprobes.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/kdebug.h>
  46. #include <asm-generic/sections.h>
  47. #include <asm/cacheflush.h>
  48. #include <asm/errno.h>
  49. #include <asm/uaccess.h>
  50. #define KPROBE_HASH_BITS 6
  51. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  52. /*
  53. * Some oddball architectures like 64bit powerpc have function descriptors
  54. * so this must be overridable.
  55. */
  56. #ifndef kprobe_lookup_name
  57. #define kprobe_lookup_name(name, addr) \
  58. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  59. #endif
  60. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  61. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  62. /* NOTE: change this value only with kprobe_mutex held */
  63. static bool kprobe_enabled;
  64. DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  65. DEFINE_SPINLOCK(kretprobe_lock); /* Protects kretprobe_inst_table */
  66. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  67. /*
  68. * Normally, functions that we'd want to prohibit kprobes in, are marked
  69. * __kprobes. But, there are cases where such functions already belong to
  70. * a different section (__sched for preempt_schedule)
  71. *
  72. * For such cases, we now have a blacklist
  73. */
  74. struct kprobe_blackpoint kprobe_blacklist[] = {
  75. {"preempt_schedule",},
  76. {NULL} /* Terminator */
  77. };
  78. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  79. /*
  80. * kprobe->ainsn.insn points to the copy of the instruction to be
  81. * single-stepped. x86_64, POWER4 and above have no-exec support and
  82. * stepping on the instruction on a vmalloced/kmalloced/data page
  83. * is a recipe for disaster
  84. */
  85. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  86. struct kprobe_insn_page {
  87. struct hlist_node hlist;
  88. kprobe_opcode_t *insns; /* Page of instruction slots */
  89. char slot_used[INSNS_PER_PAGE];
  90. int nused;
  91. int ngarbage;
  92. };
  93. enum kprobe_slot_state {
  94. SLOT_CLEAN = 0,
  95. SLOT_DIRTY = 1,
  96. SLOT_USED = 2,
  97. };
  98. static struct hlist_head kprobe_insn_pages;
  99. static int kprobe_garbage_slots;
  100. static int collect_garbage_slots(void);
  101. static int __kprobes check_safety(void)
  102. {
  103. int ret = 0;
  104. #if defined(CONFIG_PREEMPT) && defined(CONFIG_PM)
  105. ret = freeze_processes();
  106. if (ret == 0) {
  107. struct task_struct *p, *q;
  108. do_each_thread(p, q) {
  109. if (p != current && p->state == TASK_RUNNING &&
  110. p->pid != 0) {
  111. printk("Check failed: %s is running\n",p->comm);
  112. ret = -1;
  113. goto loop_end;
  114. }
  115. } while_each_thread(p, q);
  116. }
  117. loop_end:
  118. thaw_processes();
  119. #else
  120. synchronize_sched();
  121. #endif
  122. return ret;
  123. }
  124. /**
  125. * get_insn_slot() - Find a slot on an executable page for an instruction.
  126. * We allocate an executable page if there's no room on existing ones.
  127. */
  128. kprobe_opcode_t __kprobes *get_insn_slot(void)
  129. {
  130. struct kprobe_insn_page *kip;
  131. struct hlist_node *pos;
  132. retry:
  133. hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
  134. if (kip->nused < INSNS_PER_PAGE) {
  135. int i;
  136. for (i = 0; i < INSNS_PER_PAGE; i++) {
  137. if (kip->slot_used[i] == SLOT_CLEAN) {
  138. kip->slot_used[i] = SLOT_USED;
  139. kip->nused++;
  140. return kip->insns + (i * MAX_INSN_SIZE);
  141. }
  142. }
  143. /* Surprise! No unused slots. Fix kip->nused. */
  144. kip->nused = INSNS_PER_PAGE;
  145. }
  146. }
  147. /* If there are any garbage slots, collect it and try again. */
  148. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  149. goto retry;
  150. }
  151. /* All out of space. Need to allocate a new page. Use slot 0. */
  152. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  153. if (!kip)
  154. return NULL;
  155. /*
  156. * Use module_alloc so this page is within +/- 2GB of where the
  157. * kernel image and loaded module images reside. This is required
  158. * so x86_64 can correctly handle the %rip-relative fixups.
  159. */
  160. kip->insns = module_alloc(PAGE_SIZE);
  161. if (!kip->insns) {
  162. kfree(kip);
  163. return NULL;
  164. }
  165. INIT_HLIST_NODE(&kip->hlist);
  166. hlist_add_head(&kip->hlist, &kprobe_insn_pages);
  167. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  168. kip->slot_used[0] = SLOT_USED;
  169. kip->nused = 1;
  170. kip->ngarbage = 0;
  171. return kip->insns;
  172. }
  173. /* Return 1 if all garbages are collected, otherwise 0. */
  174. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  175. {
  176. kip->slot_used[idx] = SLOT_CLEAN;
  177. kip->nused--;
  178. if (kip->nused == 0) {
  179. /*
  180. * Page is no longer in use. Free it unless
  181. * it's the last one. We keep the last one
  182. * so as not to have to set it up again the
  183. * next time somebody inserts a probe.
  184. */
  185. hlist_del(&kip->hlist);
  186. if (hlist_empty(&kprobe_insn_pages)) {
  187. INIT_HLIST_NODE(&kip->hlist);
  188. hlist_add_head(&kip->hlist,
  189. &kprobe_insn_pages);
  190. } else {
  191. module_free(NULL, kip->insns);
  192. kfree(kip);
  193. }
  194. return 1;
  195. }
  196. return 0;
  197. }
  198. static int __kprobes collect_garbage_slots(void)
  199. {
  200. struct kprobe_insn_page *kip;
  201. struct hlist_node *pos, *next;
  202. /* Ensure no-one is preepmted on the garbages */
  203. if (check_safety() != 0)
  204. return -EAGAIN;
  205. hlist_for_each_entry_safe(kip, pos, next, &kprobe_insn_pages, hlist) {
  206. int i;
  207. if (kip->ngarbage == 0)
  208. continue;
  209. kip->ngarbage = 0; /* we will collect all garbages */
  210. for (i = 0; i < INSNS_PER_PAGE; i++) {
  211. if (kip->slot_used[i] == SLOT_DIRTY &&
  212. collect_one_slot(kip, i))
  213. break;
  214. }
  215. }
  216. kprobe_garbage_slots = 0;
  217. return 0;
  218. }
  219. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  220. {
  221. struct kprobe_insn_page *kip;
  222. struct hlist_node *pos;
  223. hlist_for_each_entry(kip, pos, &kprobe_insn_pages, hlist) {
  224. if (kip->insns <= slot &&
  225. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  226. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  227. if (dirty) {
  228. kip->slot_used[i] = SLOT_DIRTY;
  229. kip->ngarbage++;
  230. } else {
  231. collect_one_slot(kip, i);
  232. }
  233. break;
  234. }
  235. }
  236. if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
  237. collect_garbage_slots();
  238. }
  239. #endif
  240. /* We have preemption disabled.. so it is safe to use __ versions */
  241. static inline void set_kprobe_instance(struct kprobe *kp)
  242. {
  243. __get_cpu_var(kprobe_instance) = kp;
  244. }
  245. static inline void reset_kprobe_instance(void)
  246. {
  247. __get_cpu_var(kprobe_instance) = NULL;
  248. }
  249. /*
  250. * This routine is called either:
  251. * - under the kprobe_mutex - during kprobe_[un]register()
  252. * OR
  253. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  254. */
  255. struct kprobe __kprobes *get_kprobe(void *addr)
  256. {
  257. struct hlist_head *head;
  258. struct hlist_node *node;
  259. struct kprobe *p;
  260. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  261. hlist_for_each_entry_rcu(p, node, head, hlist) {
  262. if (p->addr == addr)
  263. return p;
  264. }
  265. return NULL;
  266. }
  267. /*
  268. * Aggregate handlers for multiple kprobes support - these handlers
  269. * take care of invoking the individual kprobe handlers on p->list
  270. */
  271. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  272. {
  273. struct kprobe *kp;
  274. list_for_each_entry_rcu(kp, &p->list, list) {
  275. if (kp->pre_handler) {
  276. set_kprobe_instance(kp);
  277. if (kp->pre_handler(kp, regs))
  278. return 1;
  279. }
  280. reset_kprobe_instance();
  281. }
  282. return 0;
  283. }
  284. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  285. unsigned long flags)
  286. {
  287. struct kprobe *kp;
  288. list_for_each_entry_rcu(kp, &p->list, list) {
  289. if (kp->post_handler) {
  290. set_kprobe_instance(kp);
  291. kp->post_handler(kp, regs, flags);
  292. reset_kprobe_instance();
  293. }
  294. }
  295. }
  296. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  297. int trapnr)
  298. {
  299. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  300. /*
  301. * if we faulted "during" the execution of a user specified
  302. * probe handler, invoke just that probe's fault handler
  303. */
  304. if (cur && cur->fault_handler) {
  305. if (cur->fault_handler(cur, regs, trapnr))
  306. return 1;
  307. }
  308. return 0;
  309. }
  310. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  311. {
  312. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  313. int ret = 0;
  314. if (cur && cur->break_handler) {
  315. if (cur->break_handler(cur, regs))
  316. ret = 1;
  317. }
  318. reset_kprobe_instance();
  319. return ret;
  320. }
  321. /* Walks the list and increments nmissed count for multiprobe case */
  322. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  323. {
  324. struct kprobe *kp;
  325. if (p->pre_handler != aggr_pre_handler) {
  326. p->nmissed++;
  327. } else {
  328. list_for_each_entry_rcu(kp, &p->list, list)
  329. kp->nmissed++;
  330. }
  331. return;
  332. }
  333. /* Called with kretprobe_lock held */
  334. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  335. struct hlist_head *head)
  336. {
  337. /* remove rp inst off the rprobe_inst_table */
  338. hlist_del(&ri->hlist);
  339. if (ri->rp) {
  340. /* remove rp inst off the used list */
  341. hlist_del(&ri->uflist);
  342. /* put rp inst back onto the free list */
  343. INIT_HLIST_NODE(&ri->uflist);
  344. hlist_add_head(&ri->uflist, &ri->rp->free_instances);
  345. } else
  346. /* Unregistering */
  347. hlist_add_head(&ri->hlist, head);
  348. }
  349. struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk)
  350. {
  351. return &kretprobe_inst_table[hash_ptr(tsk, KPROBE_HASH_BITS)];
  352. }
  353. /*
  354. * This function is called from finish_task_switch when task tk becomes dead,
  355. * so that we can recycle any function-return probe instances associated
  356. * with this task. These left over instances represent probed functions
  357. * that have been called but will never return.
  358. */
  359. void __kprobes kprobe_flush_task(struct task_struct *tk)
  360. {
  361. struct kretprobe_instance *ri;
  362. struct hlist_head *head, empty_rp;
  363. struct hlist_node *node, *tmp;
  364. unsigned long flags = 0;
  365. INIT_HLIST_HEAD(&empty_rp);
  366. spin_lock_irqsave(&kretprobe_lock, flags);
  367. head = kretprobe_inst_table_head(tk);
  368. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  369. if (ri->task == tk)
  370. recycle_rp_inst(ri, &empty_rp);
  371. }
  372. spin_unlock_irqrestore(&kretprobe_lock, flags);
  373. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  374. hlist_del(&ri->hlist);
  375. kfree(ri);
  376. }
  377. }
  378. static inline void free_rp_inst(struct kretprobe *rp)
  379. {
  380. struct kretprobe_instance *ri;
  381. struct hlist_node *pos, *next;
  382. hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, uflist) {
  383. hlist_del(&ri->uflist);
  384. kfree(ri);
  385. }
  386. }
  387. static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  388. {
  389. unsigned long flags;
  390. struct kretprobe_instance *ri;
  391. struct hlist_node *pos, *next;
  392. /* No race here */
  393. spin_lock_irqsave(&kretprobe_lock, flags);
  394. hlist_for_each_entry_safe(ri, pos, next, &rp->used_instances, uflist) {
  395. ri->rp = NULL;
  396. hlist_del(&ri->uflist);
  397. }
  398. spin_unlock_irqrestore(&kretprobe_lock, flags);
  399. free_rp_inst(rp);
  400. }
  401. /*
  402. * Keep all fields in the kprobe consistent
  403. */
  404. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  405. {
  406. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  407. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  408. }
  409. /*
  410. * Add the new probe to old_p->list. Fail if this is the
  411. * second jprobe at the address - two jprobes can't coexist
  412. */
  413. static int __kprobes add_new_kprobe(struct kprobe *old_p, struct kprobe *p)
  414. {
  415. if (p->break_handler) {
  416. if (old_p->break_handler)
  417. return -EEXIST;
  418. list_add_tail_rcu(&p->list, &old_p->list);
  419. old_p->break_handler = aggr_break_handler;
  420. } else
  421. list_add_rcu(&p->list, &old_p->list);
  422. if (p->post_handler && !old_p->post_handler)
  423. old_p->post_handler = aggr_post_handler;
  424. return 0;
  425. }
  426. /*
  427. * Fill in the required fields of the "manager kprobe". Replace the
  428. * earlier kprobe in the hlist with the manager kprobe
  429. */
  430. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  431. {
  432. copy_kprobe(p, ap);
  433. flush_insn_slot(ap);
  434. ap->addr = p->addr;
  435. ap->pre_handler = aggr_pre_handler;
  436. ap->fault_handler = aggr_fault_handler;
  437. if (p->post_handler)
  438. ap->post_handler = aggr_post_handler;
  439. if (p->break_handler)
  440. ap->break_handler = aggr_break_handler;
  441. INIT_LIST_HEAD(&ap->list);
  442. list_add_rcu(&p->list, &ap->list);
  443. hlist_replace_rcu(&p->hlist, &ap->hlist);
  444. }
  445. /*
  446. * This is the second or subsequent kprobe at the address - handle
  447. * the intricacies
  448. */
  449. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  450. struct kprobe *p)
  451. {
  452. int ret = 0;
  453. struct kprobe *ap;
  454. if (old_p->pre_handler == aggr_pre_handler) {
  455. copy_kprobe(old_p, p);
  456. ret = add_new_kprobe(old_p, p);
  457. } else {
  458. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  459. if (!ap)
  460. return -ENOMEM;
  461. add_aggr_kprobe(ap, old_p);
  462. copy_kprobe(ap, p);
  463. ret = add_new_kprobe(ap, p);
  464. }
  465. return ret;
  466. }
  467. static int __kprobes in_kprobes_functions(unsigned long addr)
  468. {
  469. struct kprobe_blackpoint *kb;
  470. if (addr >= (unsigned long)__kprobes_text_start &&
  471. addr < (unsigned long)__kprobes_text_end)
  472. return -EINVAL;
  473. /*
  474. * If there exists a kprobe_blacklist, verify and
  475. * fail any probe registration in the prohibited area
  476. */
  477. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  478. if (kb->start_addr) {
  479. if (addr >= kb->start_addr &&
  480. addr < (kb->start_addr + kb->range))
  481. return -EINVAL;
  482. }
  483. }
  484. return 0;
  485. }
  486. /*
  487. * If we have a symbol_name argument, look it up and add the offset field
  488. * to it. This way, we can specify a relative address to a symbol.
  489. */
  490. static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  491. {
  492. kprobe_opcode_t *addr = p->addr;
  493. if (p->symbol_name) {
  494. if (addr)
  495. return NULL;
  496. kprobe_lookup_name(p->symbol_name, addr);
  497. }
  498. if (!addr)
  499. return NULL;
  500. return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  501. }
  502. static int __kprobes __register_kprobe(struct kprobe *p,
  503. unsigned long called_from)
  504. {
  505. int ret = 0;
  506. struct kprobe *old_p;
  507. struct module *probed_mod;
  508. kprobe_opcode_t *addr;
  509. addr = kprobe_addr(p);
  510. if (!addr)
  511. return -EINVAL;
  512. p->addr = addr;
  513. if (!kernel_text_address((unsigned long) p->addr) ||
  514. in_kprobes_functions((unsigned long) p->addr))
  515. return -EINVAL;
  516. p->mod_refcounted = 0;
  517. /*
  518. * Check if are we probing a module.
  519. */
  520. probed_mod = module_text_address((unsigned long) p->addr);
  521. if (probed_mod) {
  522. struct module *calling_mod = module_text_address(called_from);
  523. /*
  524. * We must allow modules to probe themself and in this case
  525. * avoid incrementing the module refcount, so as to allow
  526. * unloading of self probing modules.
  527. */
  528. if (calling_mod && calling_mod != probed_mod) {
  529. if (unlikely(!try_module_get(probed_mod)))
  530. return -EINVAL;
  531. p->mod_refcounted = 1;
  532. } else
  533. probed_mod = NULL;
  534. }
  535. p->nmissed = 0;
  536. INIT_LIST_HEAD(&p->list);
  537. mutex_lock(&kprobe_mutex);
  538. old_p = get_kprobe(p->addr);
  539. if (old_p) {
  540. ret = register_aggr_kprobe(old_p, p);
  541. goto out;
  542. }
  543. ret = arch_prepare_kprobe(p);
  544. if (ret)
  545. goto out;
  546. INIT_HLIST_NODE(&p->hlist);
  547. hlist_add_head_rcu(&p->hlist,
  548. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  549. if (kprobe_enabled)
  550. arch_arm_kprobe(p);
  551. out:
  552. mutex_unlock(&kprobe_mutex);
  553. if (ret && probed_mod)
  554. module_put(probed_mod);
  555. return ret;
  556. }
  557. /*
  558. * Unregister a kprobe without a scheduler synchronization.
  559. */
  560. static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  561. {
  562. struct kprobe *old_p, *list_p;
  563. old_p = get_kprobe(p->addr);
  564. if (unlikely(!old_p))
  565. return -EINVAL;
  566. if (p != old_p) {
  567. list_for_each_entry_rcu(list_p, &old_p->list, list)
  568. if (list_p == p)
  569. /* kprobe p is a valid probe */
  570. goto valid_p;
  571. return -EINVAL;
  572. }
  573. valid_p:
  574. if (old_p == p ||
  575. (old_p->pre_handler == aggr_pre_handler &&
  576. list_is_singular(&old_p->list))) {
  577. /*
  578. * Only probe on the hash list. Disarm only if kprobes are
  579. * enabled - otherwise, the breakpoint would already have
  580. * been removed. We save on flushing icache.
  581. */
  582. if (kprobe_enabled)
  583. arch_disarm_kprobe(p);
  584. hlist_del_rcu(&old_p->hlist);
  585. } else {
  586. if (p->break_handler)
  587. old_p->break_handler = NULL;
  588. if (p->post_handler) {
  589. list_for_each_entry_rcu(list_p, &old_p->list, list) {
  590. if ((list_p != p) && (list_p->post_handler))
  591. goto noclean;
  592. }
  593. old_p->post_handler = NULL;
  594. }
  595. noclean:
  596. list_del_rcu(&p->list);
  597. }
  598. return 0;
  599. }
  600. static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  601. {
  602. struct module *mod;
  603. struct kprobe *old_p;
  604. if (p->mod_refcounted) {
  605. mod = module_text_address((unsigned long)p->addr);
  606. if (mod)
  607. module_put(mod);
  608. }
  609. if (list_empty(&p->list) || list_is_singular(&p->list)) {
  610. if (!list_empty(&p->list)) {
  611. /* "p" is the last child of an aggr_kprobe */
  612. old_p = list_entry(p->list.next, struct kprobe, list);
  613. list_del(&p->list);
  614. kfree(old_p);
  615. }
  616. arch_remove_kprobe(p);
  617. }
  618. }
  619. static int __register_kprobes(struct kprobe **kps, int num,
  620. unsigned long called_from)
  621. {
  622. int i, ret = 0;
  623. if (num <= 0)
  624. return -EINVAL;
  625. for (i = 0; i < num; i++) {
  626. ret = __register_kprobe(kps[i], called_from);
  627. if (ret < 0 && i > 0) {
  628. unregister_kprobes(kps, i);
  629. break;
  630. }
  631. }
  632. return ret;
  633. }
  634. /*
  635. * Registration and unregistration functions for kprobe.
  636. */
  637. int __kprobes register_kprobe(struct kprobe *p)
  638. {
  639. return __register_kprobes(&p, 1,
  640. (unsigned long)__builtin_return_address(0));
  641. }
  642. void __kprobes unregister_kprobe(struct kprobe *p)
  643. {
  644. unregister_kprobes(&p, 1);
  645. }
  646. int __kprobes register_kprobes(struct kprobe **kps, int num)
  647. {
  648. return __register_kprobes(kps, num,
  649. (unsigned long)__builtin_return_address(0));
  650. }
  651. void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  652. {
  653. int i;
  654. if (num <= 0)
  655. return;
  656. mutex_lock(&kprobe_mutex);
  657. for (i = 0; i < num; i++)
  658. if (__unregister_kprobe_top(kps[i]) < 0)
  659. kps[i]->addr = NULL;
  660. mutex_unlock(&kprobe_mutex);
  661. synchronize_sched();
  662. for (i = 0; i < num; i++)
  663. if (kps[i]->addr)
  664. __unregister_kprobe_bottom(kps[i]);
  665. }
  666. static struct notifier_block kprobe_exceptions_nb = {
  667. .notifier_call = kprobe_exceptions_notify,
  668. .priority = 0x7fffffff /* we need to be notified first */
  669. };
  670. unsigned long __weak arch_deref_entry_point(void *entry)
  671. {
  672. return (unsigned long)entry;
  673. }
  674. int __kprobes register_jprobe(struct jprobe *jp)
  675. {
  676. unsigned long addr = arch_deref_entry_point(jp->entry);
  677. if (!kernel_text_address(addr))
  678. return -EINVAL;
  679. /* Todo: Verify probepoint is a function entry point */
  680. jp->kp.pre_handler = setjmp_pre_handler;
  681. jp->kp.break_handler = longjmp_break_handler;
  682. return __register_kprobe(&jp->kp,
  683. (unsigned long)__builtin_return_address(0));
  684. }
  685. void __kprobes unregister_jprobe(struct jprobe *jp)
  686. {
  687. unregister_kprobe(&jp->kp);
  688. }
  689. #ifdef CONFIG_KRETPROBES
  690. /*
  691. * This kprobe pre_handler is registered with every kretprobe. When probe
  692. * hits it will set up the return probe.
  693. */
  694. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  695. struct pt_regs *regs)
  696. {
  697. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  698. unsigned long flags = 0;
  699. /*TODO: consider to only swap the RA after the last pre_handler fired */
  700. spin_lock_irqsave(&kretprobe_lock, flags);
  701. if (!hlist_empty(&rp->free_instances)) {
  702. struct kretprobe_instance *ri;
  703. ri = hlist_entry(rp->free_instances.first,
  704. struct kretprobe_instance, uflist);
  705. ri->rp = rp;
  706. ri->task = current;
  707. if (rp->entry_handler && rp->entry_handler(ri, regs)) {
  708. spin_unlock_irqrestore(&kretprobe_lock, flags);
  709. return 0;
  710. }
  711. arch_prepare_kretprobe(ri, regs);
  712. /* XXX(hch): why is there no hlist_move_head? */
  713. hlist_del(&ri->uflist);
  714. hlist_add_head(&ri->uflist, &ri->rp->used_instances);
  715. hlist_add_head(&ri->hlist, kretprobe_inst_table_head(ri->task));
  716. } else
  717. rp->nmissed++;
  718. spin_unlock_irqrestore(&kretprobe_lock, flags);
  719. return 0;
  720. }
  721. static int __kprobes __register_kretprobe(struct kretprobe *rp,
  722. unsigned long called_from)
  723. {
  724. int ret = 0;
  725. struct kretprobe_instance *inst;
  726. int i;
  727. void *addr;
  728. if (kretprobe_blacklist_size) {
  729. addr = kprobe_addr(&rp->kp);
  730. if (!addr)
  731. return -EINVAL;
  732. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  733. if (kretprobe_blacklist[i].addr == addr)
  734. return -EINVAL;
  735. }
  736. }
  737. rp->kp.pre_handler = pre_handler_kretprobe;
  738. rp->kp.post_handler = NULL;
  739. rp->kp.fault_handler = NULL;
  740. rp->kp.break_handler = NULL;
  741. /* Pre-allocate memory for max kretprobe instances */
  742. if (rp->maxactive <= 0) {
  743. #ifdef CONFIG_PREEMPT
  744. rp->maxactive = max(10, 2 * NR_CPUS);
  745. #else
  746. rp->maxactive = NR_CPUS;
  747. #endif
  748. }
  749. INIT_HLIST_HEAD(&rp->used_instances);
  750. INIT_HLIST_HEAD(&rp->free_instances);
  751. for (i = 0; i < rp->maxactive; i++) {
  752. inst = kmalloc(sizeof(struct kretprobe_instance) +
  753. rp->data_size, GFP_KERNEL);
  754. if (inst == NULL) {
  755. free_rp_inst(rp);
  756. return -ENOMEM;
  757. }
  758. INIT_HLIST_NODE(&inst->uflist);
  759. hlist_add_head(&inst->uflist, &rp->free_instances);
  760. }
  761. rp->nmissed = 0;
  762. /* Establish function entry probe point */
  763. ret = __register_kprobe(&rp->kp, called_from);
  764. if (ret != 0)
  765. free_rp_inst(rp);
  766. return ret;
  767. }
  768. static int __register_kretprobes(struct kretprobe **rps, int num,
  769. unsigned long called_from)
  770. {
  771. int ret = 0, i;
  772. if (num <= 0)
  773. return -EINVAL;
  774. for (i = 0; i < num; i++) {
  775. ret = __register_kretprobe(rps[i], called_from);
  776. if (ret < 0 && i > 0) {
  777. unregister_kretprobes(rps, i);
  778. break;
  779. }
  780. }
  781. return ret;
  782. }
  783. int __kprobes register_kretprobe(struct kretprobe *rp)
  784. {
  785. return __register_kretprobes(&rp, 1,
  786. (unsigned long)__builtin_return_address(0));
  787. }
  788. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  789. {
  790. unregister_kretprobes(&rp, 1);
  791. }
  792. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  793. {
  794. return __register_kretprobes(rps, num,
  795. (unsigned long)__builtin_return_address(0));
  796. }
  797. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  798. {
  799. int i;
  800. if (num <= 0)
  801. return;
  802. mutex_lock(&kprobe_mutex);
  803. for (i = 0; i < num; i++)
  804. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  805. rps[i]->kp.addr = NULL;
  806. mutex_unlock(&kprobe_mutex);
  807. synchronize_sched();
  808. for (i = 0; i < num; i++) {
  809. if (rps[i]->kp.addr) {
  810. __unregister_kprobe_bottom(&rps[i]->kp);
  811. cleanup_rp_inst(rps[i]);
  812. }
  813. }
  814. }
  815. #else /* CONFIG_KRETPROBES */
  816. int __kprobes register_kretprobe(struct kretprobe *rp)
  817. {
  818. return -ENOSYS;
  819. }
  820. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  821. {
  822. return -ENOSYS;
  823. }
  824. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  825. {
  826. }
  827. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  828. {
  829. }
  830. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  831. struct pt_regs *regs)
  832. {
  833. return 0;
  834. }
  835. #endif /* CONFIG_KRETPROBES */
  836. static int __init init_kprobes(void)
  837. {
  838. int i, err = 0;
  839. unsigned long offset = 0, size = 0;
  840. char *modname, namebuf[128];
  841. const char *symbol_name;
  842. void *addr;
  843. struct kprobe_blackpoint *kb;
  844. /* FIXME allocate the probe table, currently defined statically */
  845. /* initialize all list heads */
  846. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  847. INIT_HLIST_HEAD(&kprobe_table[i]);
  848. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  849. }
  850. /*
  851. * Lookup and populate the kprobe_blacklist.
  852. *
  853. * Unlike the kretprobe blacklist, we'll need to determine
  854. * the range of addresses that belong to the said functions,
  855. * since a kprobe need not necessarily be at the beginning
  856. * of a function.
  857. */
  858. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  859. kprobe_lookup_name(kb->name, addr);
  860. if (!addr)
  861. continue;
  862. kb->start_addr = (unsigned long)addr;
  863. symbol_name = kallsyms_lookup(kb->start_addr,
  864. &size, &offset, &modname, namebuf);
  865. if (!symbol_name)
  866. kb->range = 0;
  867. else
  868. kb->range = size;
  869. }
  870. if (kretprobe_blacklist_size) {
  871. /* lookup the function address from its name */
  872. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  873. kprobe_lookup_name(kretprobe_blacklist[i].name,
  874. kretprobe_blacklist[i].addr);
  875. if (!kretprobe_blacklist[i].addr)
  876. printk("kretprobe: lookup failed: %s\n",
  877. kretprobe_blacklist[i].name);
  878. }
  879. }
  880. /* By default, kprobes are enabled */
  881. kprobe_enabled = true;
  882. err = arch_init_kprobes();
  883. if (!err)
  884. err = register_die_notifier(&kprobe_exceptions_nb);
  885. if (!err)
  886. init_test_probes();
  887. return err;
  888. }
  889. #ifdef CONFIG_DEBUG_FS
  890. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  891. const char *sym, int offset,char *modname)
  892. {
  893. char *kprobe_type;
  894. if (p->pre_handler == pre_handler_kretprobe)
  895. kprobe_type = "r";
  896. else if (p->pre_handler == setjmp_pre_handler)
  897. kprobe_type = "j";
  898. else
  899. kprobe_type = "k";
  900. if (sym)
  901. seq_printf(pi, "%p %s %s+0x%x %s\n", p->addr, kprobe_type,
  902. sym, offset, (modname ? modname : " "));
  903. else
  904. seq_printf(pi, "%p %s %p\n", p->addr, kprobe_type, p->addr);
  905. }
  906. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  907. {
  908. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  909. }
  910. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  911. {
  912. (*pos)++;
  913. if (*pos >= KPROBE_TABLE_SIZE)
  914. return NULL;
  915. return pos;
  916. }
  917. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  918. {
  919. /* Nothing to do */
  920. }
  921. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  922. {
  923. struct hlist_head *head;
  924. struct hlist_node *node;
  925. struct kprobe *p, *kp;
  926. const char *sym = NULL;
  927. unsigned int i = *(loff_t *) v;
  928. unsigned long offset = 0;
  929. char *modname, namebuf[128];
  930. head = &kprobe_table[i];
  931. preempt_disable();
  932. hlist_for_each_entry_rcu(p, node, head, hlist) {
  933. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  934. &offset, &modname, namebuf);
  935. if (p->pre_handler == aggr_pre_handler) {
  936. list_for_each_entry_rcu(kp, &p->list, list)
  937. report_probe(pi, kp, sym, offset, modname);
  938. } else
  939. report_probe(pi, p, sym, offset, modname);
  940. }
  941. preempt_enable();
  942. return 0;
  943. }
  944. static struct seq_operations kprobes_seq_ops = {
  945. .start = kprobe_seq_start,
  946. .next = kprobe_seq_next,
  947. .stop = kprobe_seq_stop,
  948. .show = show_kprobe_addr
  949. };
  950. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  951. {
  952. return seq_open(filp, &kprobes_seq_ops);
  953. }
  954. static struct file_operations debugfs_kprobes_operations = {
  955. .open = kprobes_open,
  956. .read = seq_read,
  957. .llseek = seq_lseek,
  958. .release = seq_release,
  959. };
  960. static void __kprobes enable_all_kprobes(void)
  961. {
  962. struct hlist_head *head;
  963. struct hlist_node *node;
  964. struct kprobe *p;
  965. unsigned int i;
  966. mutex_lock(&kprobe_mutex);
  967. /* If kprobes are already enabled, just return */
  968. if (kprobe_enabled)
  969. goto already_enabled;
  970. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  971. head = &kprobe_table[i];
  972. hlist_for_each_entry_rcu(p, node, head, hlist)
  973. arch_arm_kprobe(p);
  974. }
  975. kprobe_enabled = true;
  976. printk(KERN_INFO "Kprobes globally enabled\n");
  977. already_enabled:
  978. mutex_unlock(&kprobe_mutex);
  979. return;
  980. }
  981. static void __kprobes disable_all_kprobes(void)
  982. {
  983. struct hlist_head *head;
  984. struct hlist_node *node;
  985. struct kprobe *p;
  986. unsigned int i;
  987. mutex_lock(&kprobe_mutex);
  988. /* If kprobes are already disabled, just return */
  989. if (!kprobe_enabled)
  990. goto already_disabled;
  991. kprobe_enabled = false;
  992. printk(KERN_INFO "Kprobes globally disabled\n");
  993. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  994. head = &kprobe_table[i];
  995. hlist_for_each_entry_rcu(p, node, head, hlist) {
  996. if (!arch_trampoline_kprobe(p))
  997. arch_disarm_kprobe(p);
  998. }
  999. }
  1000. mutex_unlock(&kprobe_mutex);
  1001. /* Allow all currently running kprobes to complete */
  1002. synchronize_sched();
  1003. return;
  1004. already_disabled:
  1005. mutex_unlock(&kprobe_mutex);
  1006. return;
  1007. }
  1008. /*
  1009. * XXX: The debugfs bool file interface doesn't allow for callbacks
  1010. * when the bool state is switched. We can reuse that facility when
  1011. * available
  1012. */
  1013. static ssize_t read_enabled_file_bool(struct file *file,
  1014. char __user *user_buf, size_t count, loff_t *ppos)
  1015. {
  1016. char buf[3];
  1017. if (kprobe_enabled)
  1018. buf[0] = '1';
  1019. else
  1020. buf[0] = '0';
  1021. buf[1] = '\n';
  1022. buf[2] = 0x00;
  1023. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  1024. }
  1025. static ssize_t write_enabled_file_bool(struct file *file,
  1026. const char __user *user_buf, size_t count, loff_t *ppos)
  1027. {
  1028. char buf[32];
  1029. int buf_size;
  1030. buf_size = min(count, (sizeof(buf)-1));
  1031. if (copy_from_user(buf, user_buf, buf_size))
  1032. return -EFAULT;
  1033. switch (buf[0]) {
  1034. case 'y':
  1035. case 'Y':
  1036. case '1':
  1037. enable_all_kprobes();
  1038. break;
  1039. case 'n':
  1040. case 'N':
  1041. case '0':
  1042. disable_all_kprobes();
  1043. break;
  1044. }
  1045. return count;
  1046. }
  1047. static struct file_operations fops_kp = {
  1048. .read = read_enabled_file_bool,
  1049. .write = write_enabled_file_bool,
  1050. };
  1051. static int __kprobes debugfs_kprobe_init(void)
  1052. {
  1053. struct dentry *dir, *file;
  1054. unsigned int value = 1;
  1055. dir = debugfs_create_dir("kprobes", NULL);
  1056. if (!dir)
  1057. return -ENOMEM;
  1058. file = debugfs_create_file("list", 0444, dir, NULL,
  1059. &debugfs_kprobes_operations);
  1060. if (!file) {
  1061. debugfs_remove(dir);
  1062. return -ENOMEM;
  1063. }
  1064. file = debugfs_create_file("enabled", 0600, dir,
  1065. &value, &fops_kp);
  1066. if (!file) {
  1067. debugfs_remove(dir);
  1068. return -ENOMEM;
  1069. }
  1070. return 0;
  1071. }
  1072. late_initcall(debugfs_kprobe_init);
  1073. #endif /* CONFIG_DEBUG_FS */
  1074. module_init(init_kprobes);
  1075. EXPORT_SYMBOL_GPL(register_kprobe);
  1076. EXPORT_SYMBOL_GPL(unregister_kprobe);
  1077. EXPORT_SYMBOL_GPL(register_kprobes);
  1078. EXPORT_SYMBOL_GPL(unregister_kprobes);
  1079. EXPORT_SYMBOL_GPL(register_jprobe);
  1080. EXPORT_SYMBOL_GPL(unregister_jprobe);
  1081. #ifdef CONFIG_KPROBES
  1082. EXPORT_SYMBOL_GPL(jprobe_return);
  1083. #endif
  1084. #ifdef CONFIG_KPROBES
  1085. EXPORT_SYMBOL_GPL(register_kretprobe);
  1086. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1087. EXPORT_SYMBOL_GPL(register_kretprobes);
  1088. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  1089. #endif