kprobes.c 38 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/kdebug.h>
  46. #include <linux/memory.h>
  47. #include <asm-generic/sections.h>
  48. #include <asm/cacheflush.h>
  49. #include <asm/errno.h>
  50. #include <asm/uaccess.h>
  51. #define KPROBE_HASH_BITS 6
  52. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  53. /*
  54. * Some oddball architectures like 64bit powerpc have function descriptors
  55. * so this must be overridable.
  56. */
  57. #ifndef kprobe_lookup_name
  58. #define kprobe_lookup_name(name, addr) \
  59. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  60. #endif
  61. static int kprobes_initialized;
  62. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  63. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  64. /* NOTE: change this value only with kprobe_mutex held */
  65. static bool kprobes_all_disarmed;
  66. static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  67. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  68. static struct {
  69. spinlock_t lock ____cacheline_aligned_in_smp;
  70. } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  71. static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  72. {
  73. return &(kretprobe_table_locks[hash].lock);
  74. }
  75. /*
  76. * Normally, functions that we'd want to prohibit kprobes in, are marked
  77. * __kprobes. But, there are cases where such functions already belong to
  78. * a different section (__sched for preempt_schedule)
  79. *
  80. * For such cases, we now have a blacklist
  81. */
  82. static struct kprobe_blackpoint kprobe_blacklist[] = {
  83. {"preempt_schedule",},
  84. {"native_get_debugreg",},
  85. {"irq_entries_start",},
  86. {"common_interrupt",},
  87. {"mcount",}, /* mcount can be called from everywhere */
  88. {NULL} /* Terminator */
  89. };
  90. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  91. /*
  92. * kprobe->ainsn.insn points to the copy of the instruction to be
  93. * single-stepped. x86_64, POWER4 and above have no-exec support and
  94. * stepping on the instruction on a vmalloced/kmalloced/data page
  95. * is a recipe for disaster
  96. */
  97. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  98. struct kprobe_insn_page {
  99. struct list_head list;
  100. kprobe_opcode_t *insns; /* Page of instruction slots */
  101. char slot_used[INSNS_PER_PAGE];
  102. int nused;
  103. int ngarbage;
  104. };
  105. enum kprobe_slot_state {
  106. SLOT_CLEAN = 0,
  107. SLOT_DIRTY = 1,
  108. SLOT_USED = 2,
  109. };
  110. static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
  111. static LIST_HEAD(kprobe_insn_pages);
  112. static int kprobe_garbage_slots;
  113. static int collect_garbage_slots(void);
  114. static int __kprobes check_safety(void)
  115. {
  116. int ret = 0;
  117. #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
  118. ret = freeze_processes();
  119. if (ret == 0) {
  120. struct task_struct *p, *q;
  121. do_each_thread(p, q) {
  122. if (p != current && p->state == TASK_RUNNING &&
  123. p->pid != 0) {
  124. printk("Check failed: %s is running\n",p->comm);
  125. ret = -1;
  126. goto loop_end;
  127. }
  128. } while_each_thread(p, q);
  129. }
  130. loop_end:
  131. thaw_processes();
  132. #else
  133. synchronize_sched();
  134. #endif
  135. return ret;
  136. }
  137. /**
  138. * __get_insn_slot() - Find a slot on an executable page for an instruction.
  139. * We allocate an executable page if there's no room on existing ones.
  140. */
  141. static kprobe_opcode_t __kprobes *__get_insn_slot(void)
  142. {
  143. struct kprobe_insn_page *kip;
  144. retry:
  145. list_for_each_entry(kip, &kprobe_insn_pages, list) {
  146. if (kip->nused < INSNS_PER_PAGE) {
  147. int i;
  148. for (i = 0; i < INSNS_PER_PAGE; i++) {
  149. if (kip->slot_used[i] == SLOT_CLEAN) {
  150. kip->slot_used[i] = SLOT_USED;
  151. kip->nused++;
  152. return kip->insns + (i * MAX_INSN_SIZE);
  153. }
  154. }
  155. /* Surprise! No unused slots. Fix kip->nused. */
  156. kip->nused = INSNS_PER_PAGE;
  157. }
  158. }
  159. /* If there are any garbage slots, collect it and try again. */
  160. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  161. goto retry;
  162. }
  163. /* All out of space. Need to allocate a new page. Use slot 0. */
  164. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  165. if (!kip)
  166. return NULL;
  167. /*
  168. * Use module_alloc so this page is within +/- 2GB of where the
  169. * kernel image and loaded module images reside. This is required
  170. * so x86_64 can correctly handle the %rip-relative fixups.
  171. */
  172. kip->insns = module_alloc(PAGE_SIZE);
  173. if (!kip->insns) {
  174. kfree(kip);
  175. return NULL;
  176. }
  177. INIT_LIST_HEAD(&kip->list);
  178. list_add(&kip->list, &kprobe_insn_pages);
  179. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  180. kip->slot_used[0] = SLOT_USED;
  181. kip->nused = 1;
  182. kip->ngarbage = 0;
  183. return kip->insns;
  184. }
  185. kprobe_opcode_t __kprobes *get_insn_slot(void)
  186. {
  187. kprobe_opcode_t *ret;
  188. mutex_lock(&kprobe_insn_mutex);
  189. ret = __get_insn_slot();
  190. mutex_unlock(&kprobe_insn_mutex);
  191. return ret;
  192. }
  193. /* Return 1 if all garbages are collected, otherwise 0. */
  194. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  195. {
  196. kip->slot_used[idx] = SLOT_CLEAN;
  197. kip->nused--;
  198. if (kip->nused == 0) {
  199. /*
  200. * Page is no longer in use. Free it unless
  201. * it's the last one. We keep the last one
  202. * so as not to have to set it up again the
  203. * next time somebody inserts a probe.
  204. */
  205. if (!list_is_singular(&kprobe_insn_pages)) {
  206. list_del(&kip->list);
  207. module_free(NULL, kip->insns);
  208. kfree(kip);
  209. }
  210. return 1;
  211. }
  212. return 0;
  213. }
  214. static int __kprobes collect_garbage_slots(void)
  215. {
  216. struct kprobe_insn_page *kip, *next;
  217. /* Ensure no-one is preepmted on the garbages */
  218. if (check_safety())
  219. return -EAGAIN;
  220. list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
  221. int i;
  222. if (kip->ngarbage == 0)
  223. continue;
  224. kip->ngarbage = 0; /* we will collect all garbages */
  225. for (i = 0; i < INSNS_PER_PAGE; i++) {
  226. if (kip->slot_used[i] == SLOT_DIRTY &&
  227. collect_one_slot(kip, i))
  228. break;
  229. }
  230. }
  231. kprobe_garbage_slots = 0;
  232. return 0;
  233. }
  234. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  235. {
  236. struct kprobe_insn_page *kip;
  237. mutex_lock(&kprobe_insn_mutex);
  238. list_for_each_entry(kip, &kprobe_insn_pages, list) {
  239. if (kip->insns <= slot &&
  240. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  241. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  242. if (dirty) {
  243. kip->slot_used[i] = SLOT_DIRTY;
  244. kip->ngarbage++;
  245. } else
  246. collect_one_slot(kip, i);
  247. break;
  248. }
  249. }
  250. if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
  251. collect_garbage_slots();
  252. mutex_unlock(&kprobe_insn_mutex);
  253. }
  254. #endif
  255. /* We have preemption disabled.. so it is safe to use __ versions */
  256. static inline void set_kprobe_instance(struct kprobe *kp)
  257. {
  258. __get_cpu_var(kprobe_instance) = kp;
  259. }
  260. static inline void reset_kprobe_instance(void)
  261. {
  262. __get_cpu_var(kprobe_instance) = NULL;
  263. }
  264. /*
  265. * This routine is called either:
  266. * - under the kprobe_mutex - during kprobe_[un]register()
  267. * OR
  268. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  269. */
  270. struct kprobe __kprobes *get_kprobe(void *addr)
  271. {
  272. struct hlist_head *head;
  273. struct hlist_node *node;
  274. struct kprobe *p;
  275. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  276. hlist_for_each_entry_rcu(p, node, head, hlist) {
  277. if (p->addr == addr)
  278. return p;
  279. }
  280. return NULL;
  281. }
  282. /* Arm a kprobe with text_mutex */
  283. static void __kprobes arm_kprobe(struct kprobe *kp)
  284. {
  285. mutex_lock(&text_mutex);
  286. arch_arm_kprobe(kp);
  287. mutex_unlock(&text_mutex);
  288. }
  289. /* Disarm a kprobe with text_mutex */
  290. static void __kprobes disarm_kprobe(struct kprobe *kp)
  291. {
  292. mutex_lock(&text_mutex);
  293. arch_disarm_kprobe(kp);
  294. mutex_unlock(&text_mutex);
  295. }
  296. /*
  297. * Aggregate handlers for multiple kprobes support - these handlers
  298. * take care of invoking the individual kprobe handlers on p->list
  299. */
  300. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  301. {
  302. struct kprobe *kp;
  303. list_for_each_entry_rcu(kp, &p->list, list) {
  304. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  305. set_kprobe_instance(kp);
  306. if (kp->pre_handler(kp, regs))
  307. return 1;
  308. }
  309. reset_kprobe_instance();
  310. }
  311. return 0;
  312. }
  313. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  314. unsigned long flags)
  315. {
  316. struct kprobe *kp;
  317. list_for_each_entry_rcu(kp, &p->list, list) {
  318. if (kp->post_handler && likely(!kprobe_disabled(kp))) {
  319. set_kprobe_instance(kp);
  320. kp->post_handler(kp, regs, flags);
  321. reset_kprobe_instance();
  322. }
  323. }
  324. }
  325. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  326. int trapnr)
  327. {
  328. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  329. /*
  330. * if we faulted "during" the execution of a user specified
  331. * probe handler, invoke just that probe's fault handler
  332. */
  333. if (cur && cur->fault_handler) {
  334. if (cur->fault_handler(cur, regs, trapnr))
  335. return 1;
  336. }
  337. return 0;
  338. }
  339. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  340. {
  341. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  342. int ret = 0;
  343. if (cur && cur->break_handler) {
  344. if (cur->break_handler(cur, regs))
  345. ret = 1;
  346. }
  347. reset_kprobe_instance();
  348. return ret;
  349. }
  350. /* Walks the list and increments nmissed count for multiprobe case */
  351. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  352. {
  353. struct kprobe *kp;
  354. if (p->pre_handler != aggr_pre_handler) {
  355. p->nmissed++;
  356. } else {
  357. list_for_each_entry_rcu(kp, &p->list, list)
  358. kp->nmissed++;
  359. }
  360. return;
  361. }
  362. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  363. struct hlist_head *head)
  364. {
  365. struct kretprobe *rp = ri->rp;
  366. /* remove rp inst off the rprobe_inst_table */
  367. hlist_del(&ri->hlist);
  368. INIT_HLIST_NODE(&ri->hlist);
  369. if (likely(rp)) {
  370. spin_lock(&rp->lock);
  371. hlist_add_head(&ri->hlist, &rp->free_instances);
  372. spin_unlock(&rp->lock);
  373. } else
  374. /* Unregistering */
  375. hlist_add_head(&ri->hlist, head);
  376. }
  377. void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
  378. struct hlist_head **head, unsigned long *flags)
  379. {
  380. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  381. spinlock_t *hlist_lock;
  382. *head = &kretprobe_inst_table[hash];
  383. hlist_lock = kretprobe_table_lock_ptr(hash);
  384. spin_lock_irqsave(hlist_lock, *flags);
  385. }
  386. static void __kprobes kretprobe_table_lock(unsigned long hash,
  387. unsigned long *flags)
  388. {
  389. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  390. spin_lock_irqsave(hlist_lock, *flags);
  391. }
  392. void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
  393. unsigned long *flags)
  394. {
  395. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  396. spinlock_t *hlist_lock;
  397. hlist_lock = kretprobe_table_lock_ptr(hash);
  398. spin_unlock_irqrestore(hlist_lock, *flags);
  399. }
  400. void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
  401. {
  402. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  403. spin_unlock_irqrestore(hlist_lock, *flags);
  404. }
  405. /*
  406. * This function is called from finish_task_switch when task tk becomes dead,
  407. * so that we can recycle any function-return probe instances associated
  408. * with this task. These left over instances represent probed functions
  409. * that have been called but will never return.
  410. */
  411. void __kprobes kprobe_flush_task(struct task_struct *tk)
  412. {
  413. struct kretprobe_instance *ri;
  414. struct hlist_head *head, empty_rp;
  415. struct hlist_node *node, *tmp;
  416. unsigned long hash, flags = 0;
  417. if (unlikely(!kprobes_initialized))
  418. /* Early boot. kretprobe_table_locks not yet initialized. */
  419. return;
  420. hash = hash_ptr(tk, KPROBE_HASH_BITS);
  421. head = &kretprobe_inst_table[hash];
  422. kretprobe_table_lock(hash, &flags);
  423. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  424. if (ri->task == tk)
  425. recycle_rp_inst(ri, &empty_rp);
  426. }
  427. kretprobe_table_unlock(hash, &flags);
  428. INIT_HLIST_HEAD(&empty_rp);
  429. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  430. hlist_del(&ri->hlist);
  431. kfree(ri);
  432. }
  433. }
  434. static inline void free_rp_inst(struct kretprobe *rp)
  435. {
  436. struct kretprobe_instance *ri;
  437. struct hlist_node *pos, *next;
  438. hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
  439. hlist_del(&ri->hlist);
  440. kfree(ri);
  441. }
  442. }
  443. static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  444. {
  445. unsigned long flags, hash;
  446. struct kretprobe_instance *ri;
  447. struct hlist_node *pos, *next;
  448. struct hlist_head *head;
  449. /* No race here */
  450. for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  451. kretprobe_table_lock(hash, &flags);
  452. head = &kretprobe_inst_table[hash];
  453. hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
  454. if (ri->rp == rp)
  455. ri->rp = NULL;
  456. }
  457. kretprobe_table_unlock(hash, &flags);
  458. }
  459. free_rp_inst(rp);
  460. }
  461. /*
  462. * Keep all fields in the kprobe consistent
  463. */
  464. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  465. {
  466. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  467. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  468. }
  469. /*
  470. * Add the new probe to ap->list. Fail if this is the
  471. * second jprobe at the address - two jprobes can't coexist
  472. */
  473. static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
  474. {
  475. BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
  476. if (p->break_handler) {
  477. if (ap->break_handler)
  478. return -EEXIST;
  479. list_add_tail_rcu(&p->list, &ap->list);
  480. ap->break_handler = aggr_break_handler;
  481. } else
  482. list_add_rcu(&p->list, &ap->list);
  483. if (p->post_handler && !ap->post_handler)
  484. ap->post_handler = aggr_post_handler;
  485. if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
  486. ap->flags &= ~KPROBE_FLAG_DISABLED;
  487. if (!kprobes_all_disarmed)
  488. /* Arm the breakpoint again. */
  489. arm_kprobe(ap);
  490. }
  491. return 0;
  492. }
  493. /*
  494. * Fill in the required fields of the "manager kprobe". Replace the
  495. * earlier kprobe in the hlist with the manager kprobe
  496. */
  497. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  498. {
  499. copy_kprobe(p, ap);
  500. flush_insn_slot(ap);
  501. ap->addr = p->addr;
  502. ap->flags = p->flags;
  503. ap->pre_handler = aggr_pre_handler;
  504. ap->fault_handler = aggr_fault_handler;
  505. /* We don't care the kprobe which has gone. */
  506. if (p->post_handler && !kprobe_gone(p))
  507. ap->post_handler = aggr_post_handler;
  508. if (p->break_handler && !kprobe_gone(p))
  509. ap->break_handler = aggr_break_handler;
  510. INIT_LIST_HEAD(&ap->list);
  511. list_add_rcu(&p->list, &ap->list);
  512. hlist_replace_rcu(&p->hlist, &ap->hlist);
  513. }
  514. /*
  515. * This is the second or subsequent kprobe at the address - handle
  516. * the intricacies
  517. */
  518. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  519. struct kprobe *p)
  520. {
  521. int ret = 0;
  522. struct kprobe *ap = old_p;
  523. if (old_p->pre_handler != aggr_pre_handler) {
  524. /* If old_p is not an aggr_probe, create new aggr_kprobe. */
  525. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  526. if (!ap)
  527. return -ENOMEM;
  528. add_aggr_kprobe(ap, old_p);
  529. }
  530. if (kprobe_gone(ap)) {
  531. /*
  532. * Attempting to insert new probe at the same location that
  533. * had a probe in the module vaddr area which already
  534. * freed. So, the instruction slot has already been
  535. * released. We need a new slot for the new probe.
  536. */
  537. ret = arch_prepare_kprobe(ap);
  538. if (ret)
  539. /*
  540. * Even if fail to allocate new slot, don't need to
  541. * free aggr_probe. It will be used next time, or
  542. * freed by unregister_kprobe.
  543. */
  544. return ret;
  545. /*
  546. * Clear gone flag to prevent allocating new slot again, and
  547. * set disabled flag because it is not armed yet.
  548. */
  549. ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  550. | KPROBE_FLAG_DISABLED;
  551. }
  552. copy_kprobe(ap, p);
  553. return add_new_kprobe(ap, p);
  554. }
  555. /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
  556. static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
  557. {
  558. struct kprobe *kp;
  559. list_for_each_entry_rcu(kp, &p->list, list) {
  560. if (!kprobe_disabled(kp))
  561. /*
  562. * There is an active probe on the list.
  563. * We can't disable aggr_kprobe.
  564. */
  565. return 0;
  566. }
  567. p->flags |= KPROBE_FLAG_DISABLED;
  568. return 1;
  569. }
  570. static int __kprobes in_kprobes_functions(unsigned long addr)
  571. {
  572. struct kprobe_blackpoint *kb;
  573. if (addr >= (unsigned long)__kprobes_text_start &&
  574. addr < (unsigned long)__kprobes_text_end)
  575. return -EINVAL;
  576. /*
  577. * If there exists a kprobe_blacklist, verify and
  578. * fail any probe registration in the prohibited area
  579. */
  580. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  581. if (kb->start_addr) {
  582. if (addr >= kb->start_addr &&
  583. addr < (kb->start_addr + kb->range))
  584. return -EINVAL;
  585. }
  586. }
  587. return 0;
  588. }
  589. /*
  590. * If we have a symbol_name argument, look it up and add the offset field
  591. * to it. This way, we can specify a relative address to a symbol.
  592. */
  593. static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  594. {
  595. kprobe_opcode_t *addr = p->addr;
  596. if (p->symbol_name) {
  597. if (addr)
  598. return NULL;
  599. kprobe_lookup_name(p->symbol_name, addr);
  600. }
  601. if (!addr)
  602. return NULL;
  603. return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  604. }
  605. /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  606. static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
  607. {
  608. struct kprobe *old_p, *list_p;
  609. old_p = get_kprobe(p->addr);
  610. if (unlikely(!old_p))
  611. return NULL;
  612. if (p != old_p) {
  613. list_for_each_entry_rcu(list_p, &old_p->list, list)
  614. if (list_p == p)
  615. /* kprobe p is a valid probe */
  616. goto valid;
  617. return NULL;
  618. }
  619. valid:
  620. return old_p;
  621. }
  622. /* Return error if the kprobe is being re-registered */
  623. static inline int check_kprobe_rereg(struct kprobe *p)
  624. {
  625. int ret = 0;
  626. struct kprobe *old_p;
  627. mutex_lock(&kprobe_mutex);
  628. old_p = __get_valid_kprobe(p);
  629. if (old_p)
  630. ret = -EINVAL;
  631. mutex_unlock(&kprobe_mutex);
  632. return ret;
  633. }
  634. int __kprobes register_kprobe(struct kprobe *p)
  635. {
  636. int ret = 0;
  637. struct kprobe *old_p;
  638. struct module *probed_mod;
  639. kprobe_opcode_t *addr;
  640. addr = kprobe_addr(p);
  641. if (!addr)
  642. return -EINVAL;
  643. p->addr = addr;
  644. ret = check_kprobe_rereg(p);
  645. if (ret)
  646. return ret;
  647. preempt_disable();
  648. if (!kernel_text_address((unsigned long) p->addr) ||
  649. in_kprobes_functions((unsigned long) p->addr)) {
  650. preempt_enable();
  651. return -EINVAL;
  652. }
  653. /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  654. p->flags &= KPROBE_FLAG_DISABLED;
  655. /*
  656. * Check if are we probing a module.
  657. */
  658. probed_mod = __module_text_address((unsigned long) p->addr);
  659. if (probed_mod) {
  660. /*
  661. * We must hold a refcount of the probed module while updating
  662. * its code to prohibit unexpected unloading.
  663. */
  664. if (unlikely(!try_module_get(probed_mod))) {
  665. preempt_enable();
  666. return -EINVAL;
  667. }
  668. /*
  669. * If the module freed .init.text, we couldn't insert
  670. * kprobes in there.
  671. */
  672. if (within_module_init((unsigned long)p->addr, probed_mod) &&
  673. probed_mod->state != MODULE_STATE_COMING) {
  674. module_put(probed_mod);
  675. preempt_enable();
  676. return -EINVAL;
  677. }
  678. }
  679. preempt_enable();
  680. p->nmissed = 0;
  681. INIT_LIST_HEAD(&p->list);
  682. mutex_lock(&kprobe_mutex);
  683. old_p = get_kprobe(p->addr);
  684. if (old_p) {
  685. ret = register_aggr_kprobe(old_p, p);
  686. goto out;
  687. }
  688. mutex_lock(&text_mutex);
  689. ret = arch_prepare_kprobe(p);
  690. if (ret)
  691. goto out_unlock_text;
  692. INIT_HLIST_NODE(&p->hlist);
  693. hlist_add_head_rcu(&p->hlist,
  694. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  695. if (!kprobes_all_disarmed && !kprobe_disabled(p))
  696. arch_arm_kprobe(p);
  697. out_unlock_text:
  698. mutex_unlock(&text_mutex);
  699. out:
  700. mutex_unlock(&kprobe_mutex);
  701. if (probed_mod)
  702. module_put(probed_mod);
  703. return ret;
  704. }
  705. EXPORT_SYMBOL_GPL(register_kprobe);
  706. /*
  707. * Unregister a kprobe without a scheduler synchronization.
  708. */
  709. static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  710. {
  711. struct kprobe *old_p, *list_p;
  712. old_p = __get_valid_kprobe(p);
  713. if (old_p == NULL)
  714. return -EINVAL;
  715. if (old_p == p ||
  716. (old_p->pre_handler == aggr_pre_handler &&
  717. list_is_singular(&old_p->list))) {
  718. /*
  719. * Only probe on the hash list. Disarm only if kprobes are
  720. * enabled and not gone - otherwise, the breakpoint would
  721. * already have been removed. We save on flushing icache.
  722. */
  723. if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
  724. disarm_kprobe(p);
  725. hlist_del_rcu(&old_p->hlist);
  726. } else {
  727. if (p->break_handler && !kprobe_gone(p))
  728. old_p->break_handler = NULL;
  729. if (p->post_handler && !kprobe_gone(p)) {
  730. list_for_each_entry_rcu(list_p, &old_p->list, list) {
  731. if ((list_p != p) && (list_p->post_handler))
  732. goto noclean;
  733. }
  734. old_p->post_handler = NULL;
  735. }
  736. noclean:
  737. list_del_rcu(&p->list);
  738. if (!kprobe_disabled(old_p)) {
  739. try_to_disable_aggr_kprobe(old_p);
  740. if (!kprobes_all_disarmed && kprobe_disabled(old_p))
  741. disarm_kprobe(old_p);
  742. }
  743. }
  744. return 0;
  745. }
  746. static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  747. {
  748. struct kprobe *old_p;
  749. if (list_empty(&p->list))
  750. arch_remove_kprobe(p);
  751. else if (list_is_singular(&p->list)) {
  752. /* "p" is the last child of an aggr_kprobe */
  753. old_p = list_entry(p->list.next, struct kprobe, list);
  754. list_del(&p->list);
  755. arch_remove_kprobe(old_p);
  756. kfree(old_p);
  757. }
  758. }
  759. int __kprobes register_kprobes(struct kprobe **kps, int num)
  760. {
  761. int i, ret = 0;
  762. if (num <= 0)
  763. return -EINVAL;
  764. for (i = 0; i < num; i++) {
  765. ret = register_kprobe(kps[i]);
  766. if (ret < 0) {
  767. if (i > 0)
  768. unregister_kprobes(kps, i);
  769. break;
  770. }
  771. }
  772. return ret;
  773. }
  774. EXPORT_SYMBOL_GPL(register_kprobes);
  775. void __kprobes unregister_kprobe(struct kprobe *p)
  776. {
  777. unregister_kprobes(&p, 1);
  778. }
  779. EXPORT_SYMBOL_GPL(unregister_kprobe);
  780. void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  781. {
  782. int i;
  783. if (num <= 0)
  784. return;
  785. mutex_lock(&kprobe_mutex);
  786. for (i = 0; i < num; i++)
  787. if (__unregister_kprobe_top(kps[i]) < 0)
  788. kps[i]->addr = NULL;
  789. mutex_unlock(&kprobe_mutex);
  790. synchronize_sched();
  791. for (i = 0; i < num; i++)
  792. if (kps[i]->addr)
  793. __unregister_kprobe_bottom(kps[i]);
  794. }
  795. EXPORT_SYMBOL_GPL(unregister_kprobes);
  796. static struct notifier_block kprobe_exceptions_nb = {
  797. .notifier_call = kprobe_exceptions_notify,
  798. .priority = 0x7fffffff /* we need to be notified first */
  799. };
  800. unsigned long __weak arch_deref_entry_point(void *entry)
  801. {
  802. return (unsigned long)entry;
  803. }
  804. int __kprobes register_jprobes(struct jprobe **jps, int num)
  805. {
  806. struct jprobe *jp;
  807. int ret = 0, i;
  808. if (num <= 0)
  809. return -EINVAL;
  810. for (i = 0; i < num; i++) {
  811. unsigned long addr;
  812. jp = jps[i];
  813. addr = arch_deref_entry_point(jp->entry);
  814. if (!kernel_text_address(addr))
  815. ret = -EINVAL;
  816. else {
  817. /* Todo: Verify probepoint is a function entry point */
  818. jp->kp.pre_handler = setjmp_pre_handler;
  819. jp->kp.break_handler = longjmp_break_handler;
  820. ret = register_kprobe(&jp->kp);
  821. }
  822. if (ret < 0) {
  823. if (i > 0)
  824. unregister_jprobes(jps, i);
  825. break;
  826. }
  827. }
  828. return ret;
  829. }
  830. EXPORT_SYMBOL_GPL(register_jprobes);
  831. int __kprobes register_jprobe(struct jprobe *jp)
  832. {
  833. return register_jprobes(&jp, 1);
  834. }
  835. EXPORT_SYMBOL_GPL(register_jprobe);
  836. void __kprobes unregister_jprobe(struct jprobe *jp)
  837. {
  838. unregister_jprobes(&jp, 1);
  839. }
  840. EXPORT_SYMBOL_GPL(unregister_jprobe);
  841. void __kprobes unregister_jprobes(struct jprobe **jps, int num)
  842. {
  843. int i;
  844. if (num <= 0)
  845. return;
  846. mutex_lock(&kprobe_mutex);
  847. for (i = 0; i < num; i++)
  848. if (__unregister_kprobe_top(&jps[i]->kp) < 0)
  849. jps[i]->kp.addr = NULL;
  850. mutex_unlock(&kprobe_mutex);
  851. synchronize_sched();
  852. for (i = 0; i < num; i++) {
  853. if (jps[i]->kp.addr)
  854. __unregister_kprobe_bottom(&jps[i]->kp);
  855. }
  856. }
  857. EXPORT_SYMBOL_GPL(unregister_jprobes);
  858. #ifdef CONFIG_KRETPROBES
  859. /*
  860. * This kprobe pre_handler is registered with every kretprobe. When probe
  861. * hits it will set up the return probe.
  862. */
  863. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  864. struct pt_regs *regs)
  865. {
  866. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  867. unsigned long hash, flags = 0;
  868. struct kretprobe_instance *ri;
  869. /*TODO: consider to only swap the RA after the last pre_handler fired */
  870. hash = hash_ptr(current, KPROBE_HASH_BITS);
  871. spin_lock_irqsave(&rp->lock, flags);
  872. if (!hlist_empty(&rp->free_instances)) {
  873. ri = hlist_entry(rp->free_instances.first,
  874. struct kretprobe_instance, hlist);
  875. hlist_del(&ri->hlist);
  876. spin_unlock_irqrestore(&rp->lock, flags);
  877. ri->rp = rp;
  878. ri->task = current;
  879. if (rp->entry_handler && rp->entry_handler(ri, regs))
  880. return 0;
  881. arch_prepare_kretprobe(ri, regs);
  882. /* XXX(hch): why is there no hlist_move_head? */
  883. INIT_HLIST_NODE(&ri->hlist);
  884. kretprobe_table_lock(hash, &flags);
  885. hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  886. kretprobe_table_unlock(hash, &flags);
  887. } else {
  888. rp->nmissed++;
  889. spin_unlock_irqrestore(&rp->lock, flags);
  890. }
  891. return 0;
  892. }
  893. int __kprobes register_kretprobe(struct kretprobe *rp)
  894. {
  895. int ret = 0;
  896. struct kretprobe_instance *inst;
  897. int i;
  898. void *addr;
  899. if (kretprobe_blacklist_size) {
  900. addr = kprobe_addr(&rp->kp);
  901. if (!addr)
  902. return -EINVAL;
  903. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  904. if (kretprobe_blacklist[i].addr == addr)
  905. return -EINVAL;
  906. }
  907. }
  908. rp->kp.pre_handler = pre_handler_kretprobe;
  909. rp->kp.post_handler = NULL;
  910. rp->kp.fault_handler = NULL;
  911. rp->kp.break_handler = NULL;
  912. /* Pre-allocate memory for max kretprobe instances */
  913. if (rp->maxactive <= 0) {
  914. #ifdef CONFIG_PREEMPT
  915. rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
  916. #else
  917. rp->maxactive = num_possible_cpus();
  918. #endif
  919. }
  920. spin_lock_init(&rp->lock);
  921. INIT_HLIST_HEAD(&rp->free_instances);
  922. for (i = 0; i < rp->maxactive; i++) {
  923. inst = kmalloc(sizeof(struct kretprobe_instance) +
  924. rp->data_size, GFP_KERNEL);
  925. if (inst == NULL) {
  926. free_rp_inst(rp);
  927. return -ENOMEM;
  928. }
  929. INIT_HLIST_NODE(&inst->hlist);
  930. hlist_add_head(&inst->hlist, &rp->free_instances);
  931. }
  932. rp->nmissed = 0;
  933. /* Establish function entry probe point */
  934. ret = register_kprobe(&rp->kp);
  935. if (ret != 0)
  936. free_rp_inst(rp);
  937. return ret;
  938. }
  939. EXPORT_SYMBOL_GPL(register_kretprobe);
  940. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  941. {
  942. int ret = 0, i;
  943. if (num <= 0)
  944. return -EINVAL;
  945. for (i = 0; i < num; i++) {
  946. ret = register_kretprobe(rps[i]);
  947. if (ret < 0) {
  948. if (i > 0)
  949. unregister_kretprobes(rps, i);
  950. break;
  951. }
  952. }
  953. return ret;
  954. }
  955. EXPORT_SYMBOL_GPL(register_kretprobes);
  956. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  957. {
  958. unregister_kretprobes(&rp, 1);
  959. }
  960. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  961. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  962. {
  963. int i;
  964. if (num <= 0)
  965. return;
  966. mutex_lock(&kprobe_mutex);
  967. for (i = 0; i < num; i++)
  968. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  969. rps[i]->kp.addr = NULL;
  970. mutex_unlock(&kprobe_mutex);
  971. synchronize_sched();
  972. for (i = 0; i < num; i++) {
  973. if (rps[i]->kp.addr) {
  974. __unregister_kprobe_bottom(&rps[i]->kp);
  975. cleanup_rp_inst(rps[i]);
  976. }
  977. }
  978. }
  979. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  980. #else /* CONFIG_KRETPROBES */
  981. int __kprobes register_kretprobe(struct kretprobe *rp)
  982. {
  983. return -ENOSYS;
  984. }
  985. EXPORT_SYMBOL_GPL(register_kretprobe);
  986. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  987. {
  988. return -ENOSYS;
  989. }
  990. EXPORT_SYMBOL_GPL(register_kretprobes);
  991. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  992. {
  993. }
  994. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  995. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  996. {
  997. }
  998. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  999. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  1000. struct pt_regs *regs)
  1001. {
  1002. return 0;
  1003. }
  1004. #endif /* CONFIG_KRETPROBES */
  1005. /* Set the kprobe gone and remove its instruction buffer. */
  1006. static void __kprobes kill_kprobe(struct kprobe *p)
  1007. {
  1008. struct kprobe *kp;
  1009. p->flags |= KPROBE_FLAG_GONE;
  1010. if (p->pre_handler == aggr_pre_handler) {
  1011. /*
  1012. * If this is an aggr_kprobe, we have to list all the
  1013. * chained probes and mark them GONE.
  1014. */
  1015. list_for_each_entry_rcu(kp, &p->list, list)
  1016. kp->flags |= KPROBE_FLAG_GONE;
  1017. p->post_handler = NULL;
  1018. p->break_handler = NULL;
  1019. }
  1020. /*
  1021. * Here, we can remove insn_slot safely, because no thread calls
  1022. * the original probed function (which will be freed soon) any more.
  1023. */
  1024. arch_remove_kprobe(p);
  1025. }
  1026. void __kprobes dump_kprobe(struct kprobe *kp)
  1027. {
  1028. printk(KERN_WARNING "Dumping kprobe:\n");
  1029. printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
  1030. kp->symbol_name, kp->addr, kp->offset);
  1031. }
  1032. /* Module notifier call back, checking kprobes on the module */
  1033. static int __kprobes kprobes_module_callback(struct notifier_block *nb,
  1034. unsigned long val, void *data)
  1035. {
  1036. struct module *mod = data;
  1037. struct hlist_head *head;
  1038. struct hlist_node *node;
  1039. struct kprobe *p;
  1040. unsigned int i;
  1041. int checkcore = (val == MODULE_STATE_GOING);
  1042. if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
  1043. return NOTIFY_DONE;
  1044. /*
  1045. * When MODULE_STATE_GOING was notified, both of module .text and
  1046. * .init.text sections would be freed. When MODULE_STATE_LIVE was
  1047. * notified, only .init.text section would be freed. We need to
  1048. * disable kprobes which have been inserted in the sections.
  1049. */
  1050. mutex_lock(&kprobe_mutex);
  1051. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1052. head = &kprobe_table[i];
  1053. hlist_for_each_entry_rcu(p, node, head, hlist)
  1054. if (within_module_init((unsigned long)p->addr, mod) ||
  1055. (checkcore &&
  1056. within_module_core((unsigned long)p->addr, mod))) {
  1057. /*
  1058. * The vaddr this probe is installed will soon
  1059. * be vfreed buy not synced to disk. Hence,
  1060. * disarming the breakpoint isn't needed.
  1061. */
  1062. kill_kprobe(p);
  1063. }
  1064. }
  1065. mutex_unlock(&kprobe_mutex);
  1066. return NOTIFY_DONE;
  1067. }
  1068. static struct notifier_block kprobe_module_nb = {
  1069. .notifier_call = kprobes_module_callback,
  1070. .priority = 0
  1071. };
  1072. static int __init init_kprobes(void)
  1073. {
  1074. int i, err = 0;
  1075. unsigned long offset = 0, size = 0;
  1076. char *modname, namebuf[128];
  1077. const char *symbol_name;
  1078. void *addr;
  1079. struct kprobe_blackpoint *kb;
  1080. /* FIXME allocate the probe table, currently defined statically */
  1081. /* initialize all list heads */
  1082. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1083. INIT_HLIST_HEAD(&kprobe_table[i]);
  1084. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  1085. spin_lock_init(&(kretprobe_table_locks[i].lock));
  1086. }
  1087. /*
  1088. * Lookup and populate the kprobe_blacklist.
  1089. *
  1090. * Unlike the kretprobe blacklist, we'll need to determine
  1091. * the range of addresses that belong to the said functions,
  1092. * since a kprobe need not necessarily be at the beginning
  1093. * of a function.
  1094. */
  1095. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  1096. kprobe_lookup_name(kb->name, addr);
  1097. if (!addr)
  1098. continue;
  1099. kb->start_addr = (unsigned long)addr;
  1100. symbol_name = kallsyms_lookup(kb->start_addr,
  1101. &size, &offset, &modname, namebuf);
  1102. if (!symbol_name)
  1103. kb->range = 0;
  1104. else
  1105. kb->range = size;
  1106. }
  1107. if (kretprobe_blacklist_size) {
  1108. /* lookup the function address from its name */
  1109. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1110. kprobe_lookup_name(kretprobe_blacklist[i].name,
  1111. kretprobe_blacklist[i].addr);
  1112. if (!kretprobe_blacklist[i].addr)
  1113. printk("kretprobe: lookup failed: %s\n",
  1114. kretprobe_blacklist[i].name);
  1115. }
  1116. }
  1117. /* By default, kprobes are armed */
  1118. kprobes_all_disarmed = false;
  1119. err = arch_init_kprobes();
  1120. if (!err)
  1121. err = register_die_notifier(&kprobe_exceptions_nb);
  1122. if (!err)
  1123. err = register_module_notifier(&kprobe_module_nb);
  1124. kprobes_initialized = (err == 0);
  1125. if (!err)
  1126. init_test_probes();
  1127. return err;
  1128. }
  1129. #ifdef CONFIG_DEBUG_FS
  1130. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  1131. const char *sym, int offset,char *modname)
  1132. {
  1133. char *kprobe_type;
  1134. if (p->pre_handler == pre_handler_kretprobe)
  1135. kprobe_type = "r";
  1136. else if (p->pre_handler == setjmp_pre_handler)
  1137. kprobe_type = "j";
  1138. else
  1139. kprobe_type = "k";
  1140. if (sym)
  1141. seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
  1142. p->addr, kprobe_type, sym, offset,
  1143. (modname ? modname : " "),
  1144. (kprobe_gone(p) ? "[GONE]" : ""),
  1145. ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1146. "[DISABLED]" : ""));
  1147. else
  1148. seq_printf(pi, "%p %s %p %s%s\n",
  1149. p->addr, kprobe_type, p->addr,
  1150. (kprobe_gone(p) ? "[GONE]" : ""),
  1151. ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1152. "[DISABLED]" : ""));
  1153. }
  1154. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  1155. {
  1156. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  1157. }
  1158. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  1159. {
  1160. (*pos)++;
  1161. if (*pos >= KPROBE_TABLE_SIZE)
  1162. return NULL;
  1163. return pos;
  1164. }
  1165. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  1166. {
  1167. /* Nothing to do */
  1168. }
  1169. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  1170. {
  1171. struct hlist_head *head;
  1172. struct hlist_node *node;
  1173. struct kprobe *p, *kp;
  1174. const char *sym = NULL;
  1175. unsigned int i = *(loff_t *) v;
  1176. unsigned long offset = 0;
  1177. char *modname, namebuf[128];
  1178. head = &kprobe_table[i];
  1179. preempt_disable();
  1180. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1181. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  1182. &offset, &modname, namebuf);
  1183. if (p->pre_handler == aggr_pre_handler) {
  1184. list_for_each_entry_rcu(kp, &p->list, list)
  1185. report_probe(pi, kp, sym, offset, modname);
  1186. } else
  1187. report_probe(pi, p, sym, offset, modname);
  1188. }
  1189. preempt_enable();
  1190. return 0;
  1191. }
  1192. static const struct seq_operations kprobes_seq_ops = {
  1193. .start = kprobe_seq_start,
  1194. .next = kprobe_seq_next,
  1195. .stop = kprobe_seq_stop,
  1196. .show = show_kprobe_addr
  1197. };
  1198. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  1199. {
  1200. return seq_open(filp, &kprobes_seq_ops);
  1201. }
  1202. static const struct file_operations debugfs_kprobes_operations = {
  1203. .open = kprobes_open,
  1204. .read = seq_read,
  1205. .llseek = seq_lseek,
  1206. .release = seq_release,
  1207. };
  1208. /* Disable one kprobe */
  1209. int __kprobes disable_kprobe(struct kprobe *kp)
  1210. {
  1211. int ret = 0;
  1212. struct kprobe *p;
  1213. mutex_lock(&kprobe_mutex);
  1214. /* Check whether specified probe is valid. */
  1215. p = __get_valid_kprobe(kp);
  1216. if (unlikely(p == NULL)) {
  1217. ret = -EINVAL;
  1218. goto out;
  1219. }
  1220. /* If the probe is already disabled (or gone), just return */
  1221. if (kprobe_disabled(kp))
  1222. goto out;
  1223. kp->flags |= KPROBE_FLAG_DISABLED;
  1224. if (p != kp)
  1225. /* When kp != p, p is always enabled. */
  1226. try_to_disable_aggr_kprobe(p);
  1227. if (!kprobes_all_disarmed && kprobe_disabled(p))
  1228. disarm_kprobe(p);
  1229. out:
  1230. mutex_unlock(&kprobe_mutex);
  1231. return ret;
  1232. }
  1233. EXPORT_SYMBOL_GPL(disable_kprobe);
  1234. /* Enable one kprobe */
  1235. int __kprobes enable_kprobe(struct kprobe *kp)
  1236. {
  1237. int ret = 0;
  1238. struct kprobe *p;
  1239. mutex_lock(&kprobe_mutex);
  1240. /* Check whether specified probe is valid. */
  1241. p = __get_valid_kprobe(kp);
  1242. if (unlikely(p == NULL)) {
  1243. ret = -EINVAL;
  1244. goto out;
  1245. }
  1246. if (kprobe_gone(kp)) {
  1247. /* This kprobe has gone, we couldn't enable it. */
  1248. ret = -EINVAL;
  1249. goto out;
  1250. }
  1251. if (!kprobes_all_disarmed && kprobe_disabled(p))
  1252. arm_kprobe(p);
  1253. p->flags &= ~KPROBE_FLAG_DISABLED;
  1254. if (p != kp)
  1255. kp->flags &= ~KPROBE_FLAG_DISABLED;
  1256. out:
  1257. mutex_unlock(&kprobe_mutex);
  1258. return ret;
  1259. }
  1260. EXPORT_SYMBOL_GPL(enable_kprobe);
  1261. static void __kprobes arm_all_kprobes(void)
  1262. {
  1263. struct hlist_head *head;
  1264. struct hlist_node *node;
  1265. struct kprobe *p;
  1266. unsigned int i;
  1267. mutex_lock(&kprobe_mutex);
  1268. /* If kprobes are armed, just return */
  1269. if (!kprobes_all_disarmed)
  1270. goto already_enabled;
  1271. mutex_lock(&text_mutex);
  1272. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1273. head = &kprobe_table[i];
  1274. hlist_for_each_entry_rcu(p, node, head, hlist)
  1275. if (!kprobe_disabled(p))
  1276. arch_arm_kprobe(p);
  1277. }
  1278. mutex_unlock(&text_mutex);
  1279. kprobes_all_disarmed = false;
  1280. printk(KERN_INFO "Kprobes globally enabled\n");
  1281. already_enabled:
  1282. mutex_unlock(&kprobe_mutex);
  1283. return;
  1284. }
  1285. static void __kprobes disarm_all_kprobes(void)
  1286. {
  1287. struct hlist_head *head;
  1288. struct hlist_node *node;
  1289. struct kprobe *p;
  1290. unsigned int i;
  1291. mutex_lock(&kprobe_mutex);
  1292. /* If kprobes are already disarmed, just return */
  1293. if (kprobes_all_disarmed)
  1294. goto already_disabled;
  1295. kprobes_all_disarmed = true;
  1296. printk(KERN_INFO "Kprobes globally disabled\n");
  1297. mutex_lock(&text_mutex);
  1298. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1299. head = &kprobe_table[i];
  1300. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1301. if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
  1302. arch_disarm_kprobe(p);
  1303. }
  1304. }
  1305. mutex_unlock(&text_mutex);
  1306. mutex_unlock(&kprobe_mutex);
  1307. /* Allow all currently running kprobes to complete */
  1308. synchronize_sched();
  1309. return;
  1310. already_disabled:
  1311. mutex_unlock(&kprobe_mutex);
  1312. return;
  1313. }
  1314. /*
  1315. * XXX: The debugfs bool file interface doesn't allow for callbacks
  1316. * when the bool state is switched. We can reuse that facility when
  1317. * available
  1318. */
  1319. static ssize_t read_enabled_file_bool(struct file *file,
  1320. char __user *user_buf, size_t count, loff_t *ppos)
  1321. {
  1322. char buf[3];
  1323. if (!kprobes_all_disarmed)
  1324. buf[0] = '1';
  1325. else
  1326. buf[0] = '0';
  1327. buf[1] = '\n';
  1328. buf[2] = 0x00;
  1329. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  1330. }
  1331. static ssize_t write_enabled_file_bool(struct file *file,
  1332. const char __user *user_buf, size_t count, loff_t *ppos)
  1333. {
  1334. char buf[32];
  1335. int buf_size;
  1336. buf_size = min(count, (sizeof(buf)-1));
  1337. if (copy_from_user(buf, user_buf, buf_size))
  1338. return -EFAULT;
  1339. switch (buf[0]) {
  1340. case 'y':
  1341. case 'Y':
  1342. case '1':
  1343. arm_all_kprobes();
  1344. break;
  1345. case 'n':
  1346. case 'N':
  1347. case '0':
  1348. disarm_all_kprobes();
  1349. break;
  1350. }
  1351. return count;
  1352. }
  1353. static const struct file_operations fops_kp = {
  1354. .read = read_enabled_file_bool,
  1355. .write = write_enabled_file_bool,
  1356. };
  1357. static int __kprobes debugfs_kprobe_init(void)
  1358. {
  1359. struct dentry *dir, *file;
  1360. unsigned int value = 1;
  1361. dir = debugfs_create_dir("kprobes", NULL);
  1362. if (!dir)
  1363. return -ENOMEM;
  1364. file = debugfs_create_file("list", 0444, dir, NULL,
  1365. &debugfs_kprobes_operations);
  1366. if (!file) {
  1367. debugfs_remove(dir);
  1368. return -ENOMEM;
  1369. }
  1370. file = debugfs_create_file("enabled", 0600, dir,
  1371. &value, &fops_kp);
  1372. if (!file) {
  1373. debugfs_remove(dir);
  1374. return -ENOMEM;
  1375. }
  1376. return 0;
  1377. }
  1378. late_initcall(debugfs_kprobe_init);
  1379. #endif /* CONFIG_DEBUG_FS */
  1380. module_init(init_kprobes);
  1381. /* defined in arch/.../kernel/kprobes.c */
  1382. EXPORT_SYMBOL_GPL(jprobe_return);