kprobes.c 38 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/kdebug.h>
  46. #include <linux/memory.h>
  47. #include <asm-generic/sections.h>
  48. #include <asm/cacheflush.h>
  49. #include <asm/errno.h>
  50. #include <asm/uaccess.h>
  51. #define KPROBE_HASH_BITS 6
  52. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  53. /*
  54. * Some oddball architectures like 64bit powerpc have function descriptors
  55. * so this must be overridable.
  56. */
  57. #ifndef kprobe_lookup_name
  58. #define kprobe_lookup_name(name, addr) \
  59. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  60. #endif
  61. static int kprobes_initialized;
  62. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  63. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  64. /* NOTE: change this value only with kprobe_mutex held */
  65. static bool kprobes_all_disarmed;
  66. static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  67. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  68. static struct {
  69. spinlock_t lock ____cacheline_aligned_in_smp;
  70. } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  71. static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  72. {
  73. return &(kretprobe_table_locks[hash].lock);
  74. }
  75. /*
  76. * Normally, functions that we'd want to prohibit kprobes in, are marked
  77. * __kprobes. But, there are cases where such functions already belong to
  78. * a different section (__sched for preempt_schedule)
  79. *
  80. * For such cases, we now have a blacklist
  81. */
  82. static struct kprobe_blackpoint kprobe_blacklist[] = {
  83. {"preempt_schedule",},
  84. {"native_get_debugreg",},
  85. {"irq_entries_start",},
  86. {"common_interrupt",},
  87. {NULL} /* Terminator */
  88. };
  89. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  90. /*
  91. * kprobe->ainsn.insn points to the copy of the instruction to be
  92. * single-stepped. x86_64, POWER4 and above have no-exec support and
  93. * stepping on the instruction on a vmalloced/kmalloced/data page
  94. * is a recipe for disaster
  95. */
  96. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  97. struct kprobe_insn_page {
  98. struct list_head list;
  99. kprobe_opcode_t *insns; /* Page of instruction slots */
  100. char slot_used[INSNS_PER_PAGE];
  101. int nused;
  102. int ngarbage;
  103. };
  104. enum kprobe_slot_state {
  105. SLOT_CLEAN = 0,
  106. SLOT_DIRTY = 1,
  107. SLOT_USED = 2,
  108. };
  109. static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
  110. static LIST_HEAD(kprobe_insn_pages);
  111. static int kprobe_garbage_slots;
  112. static int collect_garbage_slots(void);
  113. /**
  114. * __get_insn_slot() - Find a slot on an executable page for an instruction.
  115. * We allocate an executable page if there's no room on existing ones.
  116. */
  117. static kprobe_opcode_t __kprobes *__get_insn_slot(void)
  118. {
  119. struct kprobe_insn_page *kip;
  120. retry:
  121. list_for_each_entry(kip, &kprobe_insn_pages, list) {
  122. if (kip->nused < INSNS_PER_PAGE) {
  123. int i;
  124. for (i = 0; i < INSNS_PER_PAGE; i++) {
  125. if (kip->slot_used[i] == SLOT_CLEAN) {
  126. kip->slot_used[i] = SLOT_USED;
  127. kip->nused++;
  128. return kip->insns + (i * MAX_INSN_SIZE);
  129. }
  130. }
  131. /* Surprise! No unused slots. Fix kip->nused. */
  132. kip->nused = INSNS_PER_PAGE;
  133. }
  134. }
  135. /* If there are any garbage slots, collect it and try again. */
  136. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  137. goto retry;
  138. }
  139. /* All out of space. Need to allocate a new page. Use slot 0. */
  140. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  141. if (!kip)
  142. return NULL;
  143. /*
  144. * Use module_alloc so this page is within +/- 2GB of where the
  145. * kernel image and loaded module images reside. This is required
  146. * so x86_64 can correctly handle the %rip-relative fixups.
  147. */
  148. kip->insns = module_alloc(PAGE_SIZE);
  149. if (!kip->insns) {
  150. kfree(kip);
  151. return NULL;
  152. }
  153. INIT_LIST_HEAD(&kip->list);
  154. list_add(&kip->list, &kprobe_insn_pages);
  155. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  156. kip->slot_used[0] = SLOT_USED;
  157. kip->nused = 1;
  158. kip->ngarbage = 0;
  159. return kip->insns;
  160. }
  161. kprobe_opcode_t __kprobes *get_insn_slot(void)
  162. {
  163. kprobe_opcode_t *ret;
  164. mutex_lock(&kprobe_insn_mutex);
  165. ret = __get_insn_slot();
  166. mutex_unlock(&kprobe_insn_mutex);
  167. return ret;
  168. }
  169. /* Return 1 if all garbages are collected, otherwise 0. */
  170. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  171. {
  172. kip->slot_used[idx] = SLOT_CLEAN;
  173. kip->nused--;
  174. if (kip->nused == 0) {
  175. /*
  176. * Page is no longer in use. Free it unless
  177. * it's the last one. We keep the last one
  178. * so as not to have to set it up again the
  179. * next time somebody inserts a probe.
  180. */
  181. if (!list_is_singular(&kprobe_insn_pages)) {
  182. list_del(&kip->list);
  183. module_free(NULL, kip->insns);
  184. kfree(kip);
  185. }
  186. return 1;
  187. }
  188. return 0;
  189. }
  190. static int __kprobes collect_garbage_slots(void)
  191. {
  192. struct kprobe_insn_page *kip, *next;
  193. /* Ensure no-one is interrupted on the garbages */
  194. synchronize_sched();
  195. list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
  196. int i;
  197. if (kip->ngarbage == 0)
  198. continue;
  199. kip->ngarbage = 0; /* we will collect all garbages */
  200. for (i = 0; i < INSNS_PER_PAGE; i++) {
  201. if (kip->slot_used[i] == SLOT_DIRTY &&
  202. collect_one_slot(kip, i))
  203. break;
  204. }
  205. }
  206. kprobe_garbage_slots = 0;
  207. return 0;
  208. }
  209. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  210. {
  211. struct kprobe_insn_page *kip;
  212. mutex_lock(&kprobe_insn_mutex);
  213. list_for_each_entry(kip, &kprobe_insn_pages, list) {
  214. if (kip->insns <= slot &&
  215. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  216. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  217. if (dirty) {
  218. kip->slot_used[i] = SLOT_DIRTY;
  219. kip->ngarbage++;
  220. } else
  221. collect_one_slot(kip, i);
  222. break;
  223. }
  224. }
  225. if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
  226. collect_garbage_slots();
  227. mutex_unlock(&kprobe_insn_mutex);
  228. }
  229. #endif
  230. /* We have preemption disabled.. so it is safe to use __ versions */
  231. static inline void set_kprobe_instance(struct kprobe *kp)
  232. {
  233. __get_cpu_var(kprobe_instance) = kp;
  234. }
  235. static inline void reset_kprobe_instance(void)
  236. {
  237. __get_cpu_var(kprobe_instance) = NULL;
  238. }
  239. /*
  240. * This routine is called either:
  241. * - under the kprobe_mutex - during kprobe_[un]register()
  242. * OR
  243. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  244. */
  245. struct kprobe __kprobes *get_kprobe(void *addr)
  246. {
  247. struct hlist_head *head;
  248. struct hlist_node *node;
  249. struct kprobe *p;
  250. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  251. hlist_for_each_entry_rcu(p, node, head, hlist) {
  252. if (p->addr == addr)
  253. return p;
  254. }
  255. return NULL;
  256. }
  257. /* Arm a kprobe with text_mutex */
  258. static void __kprobes arm_kprobe(struct kprobe *kp)
  259. {
  260. mutex_lock(&text_mutex);
  261. arch_arm_kprobe(kp);
  262. mutex_unlock(&text_mutex);
  263. }
  264. /* Disarm a kprobe with text_mutex */
  265. static void __kprobes disarm_kprobe(struct kprobe *kp)
  266. {
  267. mutex_lock(&text_mutex);
  268. arch_disarm_kprobe(kp);
  269. mutex_unlock(&text_mutex);
  270. }
  271. /*
  272. * Aggregate handlers for multiple kprobes support - these handlers
  273. * take care of invoking the individual kprobe handlers on p->list
  274. */
  275. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  276. {
  277. struct kprobe *kp;
  278. list_for_each_entry_rcu(kp, &p->list, list) {
  279. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  280. set_kprobe_instance(kp);
  281. if (kp->pre_handler(kp, regs))
  282. return 1;
  283. }
  284. reset_kprobe_instance();
  285. }
  286. return 0;
  287. }
  288. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  289. unsigned long flags)
  290. {
  291. struct kprobe *kp;
  292. list_for_each_entry_rcu(kp, &p->list, list) {
  293. if (kp->post_handler && likely(!kprobe_disabled(kp))) {
  294. set_kprobe_instance(kp);
  295. kp->post_handler(kp, regs, flags);
  296. reset_kprobe_instance();
  297. }
  298. }
  299. }
  300. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  301. int trapnr)
  302. {
  303. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  304. /*
  305. * if we faulted "during" the execution of a user specified
  306. * probe handler, invoke just that probe's fault handler
  307. */
  308. if (cur && cur->fault_handler) {
  309. if (cur->fault_handler(cur, regs, trapnr))
  310. return 1;
  311. }
  312. return 0;
  313. }
  314. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  315. {
  316. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  317. int ret = 0;
  318. if (cur && cur->break_handler) {
  319. if (cur->break_handler(cur, regs))
  320. ret = 1;
  321. }
  322. reset_kprobe_instance();
  323. return ret;
  324. }
  325. /* Walks the list and increments nmissed count for multiprobe case */
  326. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  327. {
  328. struct kprobe *kp;
  329. if (p->pre_handler != aggr_pre_handler) {
  330. p->nmissed++;
  331. } else {
  332. list_for_each_entry_rcu(kp, &p->list, list)
  333. kp->nmissed++;
  334. }
  335. return;
  336. }
  337. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  338. struct hlist_head *head)
  339. {
  340. struct kretprobe *rp = ri->rp;
  341. /* remove rp inst off the rprobe_inst_table */
  342. hlist_del(&ri->hlist);
  343. INIT_HLIST_NODE(&ri->hlist);
  344. if (likely(rp)) {
  345. spin_lock(&rp->lock);
  346. hlist_add_head(&ri->hlist, &rp->free_instances);
  347. spin_unlock(&rp->lock);
  348. } else
  349. /* Unregistering */
  350. hlist_add_head(&ri->hlist, head);
  351. }
  352. void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
  353. struct hlist_head **head, unsigned long *flags)
  354. {
  355. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  356. spinlock_t *hlist_lock;
  357. *head = &kretprobe_inst_table[hash];
  358. hlist_lock = kretprobe_table_lock_ptr(hash);
  359. spin_lock_irqsave(hlist_lock, *flags);
  360. }
  361. static void __kprobes kretprobe_table_lock(unsigned long hash,
  362. unsigned long *flags)
  363. {
  364. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  365. spin_lock_irqsave(hlist_lock, *flags);
  366. }
  367. void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
  368. unsigned long *flags)
  369. {
  370. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  371. spinlock_t *hlist_lock;
  372. hlist_lock = kretprobe_table_lock_ptr(hash);
  373. spin_unlock_irqrestore(hlist_lock, *flags);
  374. }
  375. void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
  376. {
  377. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  378. spin_unlock_irqrestore(hlist_lock, *flags);
  379. }
  380. /*
  381. * This function is called from finish_task_switch when task tk becomes dead,
  382. * so that we can recycle any function-return probe instances associated
  383. * with this task. These left over instances represent probed functions
  384. * that have been called but will never return.
  385. */
  386. void __kprobes kprobe_flush_task(struct task_struct *tk)
  387. {
  388. struct kretprobe_instance *ri;
  389. struct hlist_head *head, empty_rp;
  390. struct hlist_node *node, *tmp;
  391. unsigned long hash, flags = 0;
  392. if (unlikely(!kprobes_initialized))
  393. /* Early boot. kretprobe_table_locks not yet initialized. */
  394. return;
  395. hash = hash_ptr(tk, KPROBE_HASH_BITS);
  396. head = &kretprobe_inst_table[hash];
  397. kretprobe_table_lock(hash, &flags);
  398. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  399. if (ri->task == tk)
  400. recycle_rp_inst(ri, &empty_rp);
  401. }
  402. kretprobe_table_unlock(hash, &flags);
  403. INIT_HLIST_HEAD(&empty_rp);
  404. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  405. hlist_del(&ri->hlist);
  406. kfree(ri);
  407. }
  408. }
  409. static inline void free_rp_inst(struct kretprobe *rp)
  410. {
  411. struct kretprobe_instance *ri;
  412. struct hlist_node *pos, *next;
  413. hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
  414. hlist_del(&ri->hlist);
  415. kfree(ri);
  416. }
  417. }
  418. static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  419. {
  420. unsigned long flags, hash;
  421. struct kretprobe_instance *ri;
  422. struct hlist_node *pos, *next;
  423. struct hlist_head *head;
  424. /* No race here */
  425. for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  426. kretprobe_table_lock(hash, &flags);
  427. head = &kretprobe_inst_table[hash];
  428. hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
  429. if (ri->rp == rp)
  430. ri->rp = NULL;
  431. }
  432. kretprobe_table_unlock(hash, &flags);
  433. }
  434. free_rp_inst(rp);
  435. }
  436. /*
  437. * Keep all fields in the kprobe consistent
  438. */
  439. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  440. {
  441. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  442. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  443. }
  444. /*
  445. * Add the new probe to ap->list. Fail if this is the
  446. * second jprobe at the address - two jprobes can't coexist
  447. */
  448. static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
  449. {
  450. BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
  451. if (p->break_handler) {
  452. if (ap->break_handler)
  453. return -EEXIST;
  454. list_add_tail_rcu(&p->list, &ap->list);
  455. ap->break_handler = aggr_break_handler;
  456. } else
  457. list_add_rcu(&p->list, &ap->list);
  458. if (p->post_handler && !ap->post_handler)
  459. ap->post_handler = aggr_post_handler;
  460. if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
  461. ap->flags &= ~KPROBE_FLAG_DISABLED;
  462. if (!kprobes_all_disarmed)
  463. /* Arm the breakpoint again. */
  464. arm_kprobe(ap);
  465. }
  466. return 0;
  467. }
  468. /*
  469. * Fill in the required fields of the "manager kprobe". Replace the
  470. * earlier kprobe in the hlist with the manager kprobe
  471. */
  472. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  473. {
  474. copy_kprobe(p, ap);
  475. flush_insn_slot(ap);
  476. ap->addr = p->addr;
  477. ap->flags = p->flags;
  478. ap->pre_handler = aggr_pre_handler;
  479. ap->fault_handler = aggr_fault_handler;
  480. /* We don't care the kprobe which has gone. */
  481. if (p->post_handler && !kprobe_gone(p))
  482. ap->post_handler = aggr_post_handler;
  483. if (p->break_handler && !kprobe_gone(p))
  484. ap->break_handler = aggr_break_handler;
  485. INIT_LIST_HEAD(&ap->list);
  486. list_add_rcu(&p->list, &ap->list);
  487. hlist_replace_rcu(&p->hlist, &ap->hlist);
  488. }
  489. /*
  490. * This is the second or subsequent kprobe at the address - handle
  491. * the intricacies
  492. */
  493. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  494. struct kprobe *p)
  495. {
  496. int ret = 0;
  497. struct kprobe *ap = old_p;
  498. if (old_p->pre_handler != aggr_pre_handler) {
  499. /* If old_p is not an aggr_probe, create new aggr_kprobe. */
  500. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  501. if (!ap)
  502. return -ENOMEM;
  503. add_aggr_kprobe(ap, old_p);
  504. }
  505. if (kprobe_gone(ap)) {
  506. /*
  507. * Attempting to insert new probe at the same location that
  508. * had a probe in the module vaddr area which already
  509. * freed. So, the instruction slot has already been
  510. * released. We need a new slot for the new probe.
  511. */
  512. ret = arch_prepare_kprobe(ap);
  513. if (ret)
  514. /*
  515. * Even if fail to allocate new slot, don't need to
  516. * free aggr_probe. It will be used next time, or
  517. * freed by unregister_kprobe.
  518. */
  519. return ret;
  520. /*
  521. * Clear gone flag to prevent allocating new slot again, and
  522. * set disabled flag because it is not armed yet.
  523. */
  524. ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  525. | KPROBE_FLAG_DISABLED;
  526. }
  527. copy_kprobe(ap, p);
  528. return add_new_kprobe(ap, p);
  529. }
  530. /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
  531. static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
  532. {
  533. struct kprobe *kp;
  534. list_for_each_entry_rcu(kp, &p->list, list) {
  535. if (!kprobe_disabled(kp))
  536. /*
  537. * There is an active probe on the list.
  538. * We can't disable aggr_kprobe.
  539. */
  540. return 0;
  541. }
  542. p->flags |= KPROBE_FLAG_DISABLED;
  543. return 1;
  544. }
  545. static int __kprobes in_kprobes_functions(unsigned long addr)
  546. {
  547. struct kprobe_blackpoint *kb;
  548. if (addr >= (unsigned long)__kprobes_text_start &&
  549. addr < (unsigned long)__kprobes_text_end)
  550. return -EINVAL;
  551. /*
  552. * If there exists a kprobe_blacklist, verify and
  553. * fail any probe registration in the prohibited area
  554. */
  555. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  556. if (kb->start_addr) {
  557. if (addr >= kb->start_addr &&
  558. addr < (kb->start_addr + kb->range))
  559. return -EINVAL;
  560. }
  561. }
  562. return 0;
  563. }
  564. /*
  565. * If we have a symbol_name argument, look it up and add the offset field
  566. * to it. This way, we can specify a relative address to a symbol.
  567. */
  568. static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  569. {
  570. kprobe_opcode_t *addr = p->addr;
  571. if (p->symbol_name) {
  572. if (addr)
  573. return NULL;
  574. kprobe_lookup_name(p->symbol_name, addr);
  575. }
  576. if (!addr)
  577. return NULL;
  578. return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  579. }
  580. /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  581. static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
  582. {
  583. struct kprobe *old_p, *list_p;
  584. old_p = get_kprobe(p->addr);
  585. if (unlikely(!old_p))
  586. return NULL;
  587. if (p != old_p) {
  588. list_for_each_entry_rcu(list_p, &old_p->list, list)
  589. if (list_p == p)
  590. /* kprobe p is a valid probe */
  591. goto valid;
  592. return NULL;
  593. }
  594. valid:
  595. return old_p;
  596. }
  597. /* Return error if the kprobe is being re-registered */
  598. static inline int check_kprobe_rereg(struct kprobe *p)
  599. {
  600. int ret = 0;
  601. struct kprobe *old_p;
  602. mutex_lock(&kprobe_mutex);
  603. old_p = __get_valid_kprobe(p);
  604. if (old_p)
  605. ret = -EINVAL;
  606. mutex_unlock(&kprobe_mutex);
  607. return ret;
  608. }
  609. int __kprobes register_kprobe(struct kprobe *p)
  610. {
  611. int ret = 0;
  612. struct kprobe *old_p;
  613. struct module *probed_mod;
  614. kprobe_opcode_t *addr;
  615. addr = kprobe_addr(p);
  616. if (!addr)
  617. return -EINVAL;
  618. p->addr = addr;
  619. ret = check_kprobe_rereg(p);
  620. if (ret)
  621. return ret;
  622. preempt_disable();
  623. if (!kernel_text_address((unsigned long) p->addr) ||
  624. in_kprobes_functions((unsigned long) p->addr)) {
  625. preempt_enable();
  626. return -EINVAL;
  627. }
  628. /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  629. p->flags &= KPROBE_FLAG_DISABLED;
  630. /*
  631. * Check if are we probing a module.
  632. */
  633. probed_mod = __module_text_address((unsigned long) p->addr);
  634. if (probed_mod) {
  635. /*
  636. * We must hold a refcount of the probed module while updating
  637. * its code to prohibit unexpected unloading.
  638. */
  639. if (unlikely(!try_module_get(probed_mod))) {
  640. preempt_enable();
  641. return -EINVAL;
  642. }
  643. /*
  644. * If the module freed .init.text, we couldn't insert
  645. * kprobes in there.
  646. */
  647. if (within_module_init((unsigned long)p->addr, probed_mod) &&
  648. probed_mod->state != MODULE_STATE_COMING) {
  649. module_put(probed_mod);
  650. preempt_enable();
  651. return -EINVAL;
  652. }
  653. }
  654. preempt_enable();
  655. p->nmissed = 0;
  656. INIT_LIST_HEAD(&p->list);
  657. mutex_lock(&kprobe_mutex);
  658. old_p = get_kprobe(p->addr);
  659. if (old_p) {
  660. ret = register_aggr_kprobe(old_p, p);
  661. goto out;
  662. }
  663. mutex_lock(&text_mutex);
  664. ret = arch_prepare_kprobe(p);
  665. if (ret)
  666. goto out_unlock_text;
  667. INIT_HLIST_NODE(&p->hlist);
  668. hlist_add_head_rcu(&p->hlist,
  669. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  670. if (!kprobes_all_disarmed && !kprobe_disabled(p))
  671. arch_arm_kprobe(p);
  672. out_unlock_text:
  673. mutex_unlock(&text_mutex);
  674. out:
  675. mutex_unlock(&kprobe_mutex);
  676. if (probed_mod)
  677. module_put(probed_mod);
  678. return ret;
  679. }
  680. EXPORT_SYMBOL_GPL(register_kprobe);
  681. /*
  682. * Unregister a kprobe without a scheduler synchronization.
  683. */
  684. static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  685. {
  686. struct kprobe *old_p, *list_p;
  687. old_p = __get_valid_kprobe(p);
  688. if (old_p == NULL)
  689. return -EINVAL;
  690. if (old_p == p ||
  691. (old_p->pre_handler == aggr_pre_handler &&
  692. list_is_singular(&old_p->list))) {
  693. /*
  694. * Only probe on the hash list. Disarm only if kprobes are
  695. * enabled and not gone - otherwise, the breakpoint would
  696. * already have been removed. We save on flushing icache.
  697. */
  698. if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
  699. disarm_kprobe(p);
  700. hlist_del_rcu(&old_p->hlist);
  701. } else {
  702. if (p->break_handler && !kprobe_gone(p))
  703. old_p->break_handler = NULL;
  704. if (p->post_handler && !kprobe_gone(p)) {
  705. list_for_each_entry_rcu(list_p, &old_p->list, list) {
  706. if ((list_p != p) && (list_p->post_handler))
  707. goto noclean;
  708. }
  709. old_p->post_handler = NULL;
  710. }
  711. noclean:
  712. list_del_rcu(&p->list);
  713. if (!kprobe_disabled(old_p)) {
  714. try_to_disable_aggr_kprobe(old_p);
  715. if (!kprobes_all_disarmed && kprobe_disabled(old_p))
  716. disarm_kprobe(old_p);
  717. }
  718. }
  719. return 0;
  720. }
  721. static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  722. {
  723. struct kprobe *old_p;
  724. if (list_empty(&p->list))
  725. arch_remove_kprobe(p);
  726. else if (list_is_singular(&p->list)) {
  727. /* "p" is the last child of an aggr_kprobe */
  728. old_p = list_entry(p->list.next, struct kprobe, list);
  729. list_del(&p->list);
  730. arch_remove_kprobe(old_p);
  731. kfree(old_p);
  732. }
  733. }
  734. int __kprobes register_kprobes(struct kprobe **kps, int num)
  735. {
  736. int i, ret = 0;
  737. if (num <= 0)
  738. return -EINVAL;
  739. for (i = 0; i < num; i++) {
  740. ret = register_kprobe(kps[i]);
  741. if (ret < 0) {
  742. if (i > 0)
  743. unregister_kprobes(kps, i);
  744. break;
  745. }
  746. }
  747. return ret;
  748. }
  749. EXPORT_SYMBOL_GPL(register_kprobes);
  750. void __kprobes unregister_kprobe(struct kprobe *p)
  751. {
  752. unregister_kprobes(&p, 1);
  753. }
  754. EXPORT_SYMBOL_GPL(unregister_kprobe);
  755. void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  756. {
  757. int i;
  758. if (num <= 0)
  759. return;
  760. mutex_lock(&kprobe_mutex);
  761. for (i = 0; i < num; i++)
  762. if (__unregister_kprobe_top(kps[i]) < 0)
  763. kps[i]->addr = NULL;
  764. mutex_unlock(&kprobe_mutex);
  765. synchronize_sched();
  766. for (i = 0; i < num; i++)
  767. if (kps[i]->addr)
  768. __unregister_kprobe_bottom(kps[i]);
  769. }
  770. EXPORT_SYMBOL_GPL(unregister_kprobes);
  771. static struct notifier_block kprobe_exceptions_nb = {
  772. .notifier_call = kprobe_exceptions_notify,
  773. .priority = 0x7fffffff /* we need to be notified first */
  774. };
  775. unsigned long __weak arch_deref_entry_point(void *entry)
  776. {
  777. return (unsigned long)entry;
  778. }
  779. int __kprobes register_jprobes(struct jprobe **jps, int num)
  780. {
  781. struct jprobe *jp;
  782. int ret = 0, i;
  783. if (num <= 0)
  784. return -EINVAL;
  785. for (i = 0; i < num; i++) {
  786. unsigned long addr;
  787. jp = jps[i];
  788. addr = arch_deref_entry_point(jp->entry);
  789. if (!kernel_text_address(addr))
  790. ret = -EINVAL;
  791. else {
  792. /* Todo: Verify probepoint is a function entry point */
  793. jp->kp.pre_handler = setjmp_pre_handler;
  794. jp->kp.break_handler = longjmp_break_handler;
  795. ret = register_kprobe(&jp->kp);
  796. }
  797. if (ret < 0) {
  798. if (i > 0)
  799. unregister_jprobes(jps, i);
  800. break;
  801. }
  802. }
  803. return ret;
  804. }
  805. EXPORT_SYMBOL_GPL(register_jprobes);
  806. int __kprobes register_jprobe(struct jprobe *jp)
  807. {
  808. return register_jprobes(&jp, 1);
  809. }
  810. EXPORT_SYMBOL_GPL(register_jprobe);
  811. void __kprobes unregister_jprobe(struct jprobe *jp)
  812. {
  813. unregister_jprobes(&jp, 1);
  814. }
  815. EXPORT_SYMBOL_GPL(unregister_jprobe);
  816. void __kprobes unregister_jprobes(struct jprobe **jps, int num)
  817. {
  818. int i;
  819. if (num <= 0)
  820. return;
  821. mutex_lock(&kprobe_mutex);
  822. for (i = 0; i < num; i++)
  823. if (__unregister_kprobe_top(&jps[i]->kp) < 0)
  824. jps[i]->kp.addr = NULL;
  825. mutex_unlock(&kprobe_mutex);
  826. synchronize_sched();
  827. for (i = 0; i < num; i++) {
  828. if (jps[i]->kp.addr)
  829. __unregister_kprobe_bottom(&jps[i]->kp);
  830. }
  831. }
  832. EXPORT_SYMBOL_GPL(unregister_jprobes);
  833. #ifdef CONFIG_KRETPROBES
  834. /*
  835. * This kprobe pre_handler is registered with every kretprobe. When probe
  836. * hits it will set up the return probe.
  837. */
  838. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  839. struct pt_regs *regs)
  840. {
  841. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  842. unsigned long hash, flags = 0;
  843. struct kretprobe_instance *ri;
  844. /*TODO: consider to only swap the RA after the last pre_handler fired */
  845. hash = hash_ptr(current, KPROBE_HASH_BITS);
  846. spin_lock_irqsave(&rp->lock, flags);
  847. if (!hlist_empty(&rp->free_instances)) {
  848. ri = hlist_entry(rp->free_instances.first,
  849. struct kretprobe_instance, hlist);
  850. hlist_del(&ri->hlist);
  851. spin_unlock_irqrestore(&rp->lock, flags);
  852. ri->rp = rp;
  853. ri->task = current;
  854. if (rp->entry_handler && rp->entry_handler(ri, regs))
  855. return 0;
  856. arch_prepare_kretprobe(ri, regs);
  857. /* XXX(hch): why is there no hlist_move_head? */
  858. INIT_HLIST_NODE(&ri->hlist);
  859. kretprobe_table_lock(hash, &flags);
  860. hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  861. kretprobe_table_unlock(hash, &flags);
  862. } else {
  863. rp->nmissed++;
  864. spin_unlock_irqrestore(&rp->lock, flags);
  865. }
  866. return 0;
  867. }
  868. int __kprobes register_kretprobe(struct kretprobe *rp)
  869. {
  870. int ret = 0;
  871. struct kretprobe_instance *inst;
  872. int i;
  873. void *addr;
  874. if (kretprobe_blacklist_size) {
  875. addr = kprobe_addr(&rp->kp);
  876. if (!addr)
  877. return -EINVAL;
  878. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  879. if (kretprobe_blacklist[i].addr == addr)
  880. return -EINVAL;
  881. }
  882. }
  883. rp->kp.pre_handler = pre_handler_kretprobe;
  884. rp->kp.post_handler = NULL;
  885. rp->kp.fault_handler = NULL;
  886. rp->kp.break_handler = NULL;
  887. /* Pre-allocate memory for max kretprobe instances */
  888. if (rp->maxactive <= 0) {
  889. #ifdef CONFIG_PREEMPT
  890. rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
  891. #else
  892. rp->maxactive = num_possible_cpus();
  893. #endif
  894. }
  895. spin_lock_init(&rp->lock);
  896. INIT_HLIST_HEAD(&rp->free_instances);
  897. for (i = 0; i < rp->maxactive; i++) {
  898. inst = kmalloc(sizeof(struct kretprobe_instance) +
  899. rp->data_size, GFP_KERNEL);
  900. if (inst == NULL) {
  901. free_rp_inst(rp);
  902. return -ENOMEM;
  903. }
  904. INIT_HLIST_NODE(&inst->hlist);
  905. hlist_add_head(&inst->hlist, &rp->free_instances);
  906. }
  907. rp->nmissed = 0;
  908. /* Establish function entry probe point */
  909. ret = register_kprobe(&rp->kp);
  910. if (ret != 0)
  911. free_rp_inst(rp);
  912. return ret;
  913. }
  914. EXPORT_SYMBOL_GPL(register_kretprobe);
  915. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  916. {
  917. int ret = 0, i;
  918. if (num <= 0)
  919. return -EINVAL;
  920. for (i = 0; i < num; i++) {
  921. ret = register_kretprobe(rps[i]);
  922. if (ret < 0) {
  923. if (i > 0)
  924. unregister_kretprobes(rps, i);
  925. break;
  926. }
  927. }
  928. return ret;
  929. }
  930. EXPORT_SYMBOL_GPL(register_kretprobes);
  931. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  932. {
  933. unregister_kretprobes(&rp, 1);
  934. }
  935. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  936. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  937. {
  938. int i;
  939. if (num <= 0)
  940. return;
  941. mutex_lock(&kprobe_mutex);
  942. for (i = 0; i < num; i++)
  943. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  944. rps[i]->kp.addr = NULL;
  945. mutex_unlock(&kprobe_mutex);
  946. synchronize_sched();
  947. for (i = 0; i < num; i++) {
  948. if (rps[i]->kp.addr) {
  949. __unregister_kprobe_bottom(&rps[i]->kp);
  950. cleanup_rp_inst(rps[i]);
  951. }
  952. }
  953. }
  954. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  955. #else /* CONFIG_KRETPROBES */
  956. int __kprobes register_kretprobe(struct kretprobe *rp)
  957. {
  958. return -ENOSYS;
  959. }
  960. EXPORT_SYMBOL_GPL(register_kretprobe);
  961. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  962. {
  963. return -ENOSYS;
  964. }
  965. EXPORT_SYMBOL_GPL(register_kretprobes);
  966. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  967. {
  968. }
  969. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  970. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  971. {
  972. }
  973. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  974. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  975. struct pt_regs *regs)
  976. {
  977. return 0;
  978. }
  979. #endif /* CONFIG_KRETPROBES */
  980. /* Set the kprobe gone and remove its instruction buffer. */
  981. static void __kprobes kill_kprobe(struct kprobe *p)
  982. {
  983. struct kprobe *kp;
  984. p->flags |= KPROBE_FLAG_GONE;
  985. if (p->pre_handler == aggr_pre_handler) {
  986. /*
  987. * If this is an aggr_kprobe, we have to list all the
  988. * chained probes and mark them GONE.
  989. */
  990. list_for_each_entry_rcu(kp, &p->list, list)
  991. kp->flags |= KPROBE_FLAG_GONE;
  992. p->post_handler = NULL;
  993. p->break_handler = NULL;
  994. }
  995. /*
  996. * Here, we can remove insn_slot safely, because no thread calls
  997. * the original probed function (which will be freed soon) any more.
  998. */
  999. arch_remove_kprobe(p);
  1000. }
  1001. void __kprobes dump_kprobe(struct kprobe *kp)
  1002. {
  1003. printk(KERN_WARNING "Dumping kprobe:\n");
  1004. printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
  1005. kp->symbol_name, kp->addr, kp->offset);
  1006. }
  1007. /* Module notifier call back, checking kprobes on the module */
  1008. static int __kprobes kprobes_module_callback(struct notifier_block *nb,
  1009. unsigned long val, void *data)
  1010. {
  1011. struct module *mod = data;
  1012. struct hlist_head *head;
  1013. struct hlist_node *node;
  1014. struct kprobe *p;
  1015. unsigned int i;
  1016. int checkcore = (val == MODULE_STATE_GOING);
  1017. if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
  1018. return NOTIFY_DONE;
  1019. /*
  1020. * When MODULE_STATE_GOING was notified, both of module .text and
  1021. * .init.text sections would be freed. When MODULE_STATE_LIVE was
  1022. * notified, only .init.text section would be freed. We need to
  1023. * disable kprobes which have been inserted in the sections.
  1024. */
  1025. mutex_lock(&kprobe_mutex);
  1026. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1027. head = &kprobe_table[i];
  1028. hlist_for_each_entry_rcu(p, node, head, hlist)
  1029. if (within_module_init((unsigned long)p->addr, mod) ||
  1030. (checkcore &&
  1031. within_module_core((unsigned long)p->addr, mod))) {
  1032. /*
  1033. * The vaddr this probe is installed will soon
  1034. * be vfreed buy not synced to disk. Hence,
  1035. * disarming the breakpoint isn't needed.
  1036. */
  1037. kill_kprobe(p);
  1038. }
  1039. }
  1040. mutex_unlock(&kprobe_mutex);
  1041. return NOTIFY_DONE;
  1042. }
  1043. static struct notifier_block kprobe_module_nb = {
  1044. .notifier_call = kprobes_module_callback,
  1045. .priority = 0
  1046. };
  1047. static int __init init_kprobes(void)
  1048. {
  1049. int i, err = 0;
  1050. unsigned long offset = 0, size = 0;
  1051. char *modname, namebuf[128];
  1052. const char *symbol_name;
  1053. void *addr;
  1054. struct kprobe_blackpoint *kb;
  1055. /* FIXME allocate the probe table, currently defined statically */
  1056. /* initialize all list heads */
  1057. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1058. INIT_HLIST_HEAD(&kprobe_table[i]);
  1059. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  1060. spin_lock_init(&(kretprobe_table_locks[i].lock));
  1061. }
  1062. /*
  1063. * Lookup and populate the kprobe_blacklist.
  1064. *
  1065. * Unlike the kretprobe blacklist, we'll need to determine
  1066. * the range of addresses that belong to the said functions,
  1067. * since a kprobe need not necessarily be at the beginning
  1068. * of a function.
  1069. */
  1070. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  1071. kprobe_lookup_name(kb->name, addr);
  1072. if (!addr)
  1073. continue;
  1074. kb->start_addr = (unsigned long)addr;
  1075. symbol_name = kallsyms_lookup(kb->start_addr,
  1076. &size, &offset, &modname, namebuf);
  1077. if (!symbol_name)
  1078. kb->range = 0;
  1079. else
  1080. kb->range = size;
  1081. }
  1082. if (kretprobe_blacklist_size) {
  1083. /* lookup the function address from its name */
  1084. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1085. kprobe_lookup_name(kretprobe_blacklist[i].name,
  1086. kretprobe_blacklist[i].addr);
  1087. if (!kretprobe_blacklist[i].addr)
  1088. printk("kretprobe: lookup failed: %s\n",
  1089. kretprobe_blacklist[i].name);
  1090. }
  1091. }
  1092. /* By default, kprobes are armed */
  1093. kprobes_all_disarmed = false;
  1094. err = arch_init_kprobes();
  1095. if (!err)
  1096. err = register_die_notifier(&kprobe_exceptions_nb);
  1097. if (!err)
  1098. err = register_module_notifier(&kprobe_module_nb);
  1099. kprobes_initialized = (err == 0);
  1100. if (!err)
  1101. init_test_probes();
  1102. return err;
  1103. }
  1104. #ifdef CONFIG_DEBUG_FS
  1105. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  1106. const char *sym, int offset,char *modname)
  1107. {
  1108. char *kprobe_type;
  1109. if (p->pre_handler == pre_handler_kretprobe)
  1110. kprobe_type = "r";
  1111. else if (p->pre_handler == setjmp_pre_handler)
  1112. kprobe_type = "j";
  1113. else
  1114. kprobe_type = "k";
  1115. if (sym)
  1116. seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
  1117. p->addr, kprobe_type, sym, offset,
  1118. (modname ? modname : " "),
  1119. (kprobe_gone(p) ? "[GONE]" : ""),
  1120. ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1121. "[DISABLED]" : ""));
  1122. else
  1123. seq_printf(pi, "%p %s %p %s%s\n",
  1124. p->addr, kprobe_type, p->addr,
  1125. (kprobe_gone(p) ? "[GONE]" : ""),
  1126. ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1127. "[DISABLED]" : ""));
  1128. }
  1129. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  1130. {
  1131. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  1132. }
  1133. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  1134. {
  1135. (*pos)++;
  1136. if (*pos >= KPROBE_TABLE_SIZE)
  1137. return NULL;
  1138. return pos;
  1139. }
  1140. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  1141. {
  1142. /* Nothing to do */
  1143. }
  1144. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  1145. {
  1146. struct hlist_head *head;
  1147. struct hlist_node *node;
  1148. struct kprobe *p, *kp;
  1149. const char *sym = NULL;
  1150. unsigned int i = *(loff_t *) v;
  1151. unsigned long offset = 0;
  1152. char *modname, namebuf[128];
  1153. head = &kprobe_table[i];
  1154. preempt_disable();
  1155. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1156. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  1157. &offset, &modname, namebuf);
  1158. if (p->pre_handler == aggr_pre_handler) {
  1159. list_for_each_entry_rcu(kp, &p->list, list)
  1160. report_probe(pi, kp, sym, offset, modname);
  1161. } else
  1162. report_probe(pi, p, sym, offset, modname);
  1163. }
  1164. preempt_enable();
  1165. return 0;
  1166. }
  1167. static const struct seq_operations kprobes_seq_ops = {
  1168. .start = kprobe_seq_start,
  1169. .next = kprobe_seq_next,
  1170. .stop = kprobe_seq_stop,
  1171. .show = show_kprobe_addr
  1172. };
  1173. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  1174. {
  1175. return seq_open(filp, &kprobes_seq_ops);
  1176. }
  1177. static const struct file_operations debugfs_kprobes_operations = {
  1178. .open = kprobes_open,
  1179. .read = seq_read,
  1180. .llseek = seq_lseek,
  1181. .release = seq_release,
  1182. };
  1183. /* Disable one kprobe */
  1184. int __kprobes disable_kprobe(struct kprobe *kp)
  1185. {
  1186. int ret = 0;
  1187. struct kprobe *p;
  1188. mutex_lock(&kprobe_mutex);
  1189. /* Check whether specified probe is valid. */
  1190. p = __get_valid_kprobe(kp);
  1191. if (unlikely(p == NULL)) {
  1192. ret = -EINVAL;
  1193. goto out;
  1194. }
  1195. /* If the probe is already disabled (or gone), just return */
  1196. if (kprobe_disabled(kp))
  1197. goto out;
  1198. kp->flags |= KPROBE_FLAG_DISABLED;
  1199. if (p != kp)
  1200. /* When kp != p, p is always enabled. */
  1201. try_to_disable_aggr_kprobe(p);
  1202. if (!kprobes_all_disarmed && kprobe_disabled(p))
  1203. disarm_kprobe(p);
  1204. out:
  1205. mutex_unlock(&kprobe_mutex);
  1206. return ret;
  1207. }
  1208. EXPORT_SYMBOL_GPL(disable_kprobe);
  1209. /* Enable one kprobe */
  1210. int __kprobes enable_kprobe(struct kprobe *kp)
  1211. {
  1212. int ret = 0;
  1213. struct kprobe *p;
  1214. mutex_lock(&kprobe_mutex);
  1215. /* Check whether specified probe is valid. */
  1216. p = __get_valid_kprobe(kp);
  1217. if (unlikely(p == NULL)) {
  1218. ret = -EINVAL;
  1219. goto out;
  1220. }
  1221. if (kprobe_gone(kp)) {
  1222. /* This kprobe has gone, we couldn't enable it. */
  1223. ret = -EINVAL;
  1224. goto out;
  1225. }
  1226. if (!kprobes_all_disarmed && kprobe_disabled(p))
  1227. arm_kprobe(p);
  1228. p->flags &= ~KPROBE_FLAG_DISABLED;
  1229. if (p != kp)
  1230. kp->flags &= ~KPROBE_FLAG_DISABLED;
  1231. out:
  1232. mutex_unlock(&kprobe_mutex);
  1233. return ret;
  1234. }
  1235. EXPORT_SYMBOL_GPL(enable_kprobe);
  1236. static void __kprobes arm_all_kprobes(void)
  1237. {
  1238. struct hlist_head *head;
  1239. struct hlist_node *node;
  1240. struct kprobe *p;
  1241. unsigned int i;
  1242. mutex_lock(&kprobe_mutex);
  1243. /* If kprobes are armed, just return */
  1244. if (!kprobes_all_disarmed)
  1245. goto already_enabled;
  1246. mutex_lock(&text_mutex);
  1247. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1248. head = &kprobe_table[i];
  1249. hlist_for_each_entry_rcu(p, node, head, hlist)
  1250. if (!kprobe_disabled(p))
  1251. arch_arm_kprobe(p);
  1252. }
  1253. mutex_unlock(&text_mutex);
  1254. kprobes_all_disarmed = false;
  1255. printk(KERN_INFO "Kprobes globally enabled\n");
  1256. already_enabled:
  1257. mutex_unlock(&kprobe_mutex);
  1258. return;
  1259. }
  1260. static void __kprobes disarm_all_kprobes(void)
  1261. {
  1262. struct hlist_head *head;
  1263. struct hlist_node *node;
  1264. struct kprobe *p;
  1265. unsigned int i;
  1266. mutex_lock(&kprobe_mutex);
  1267. /* If kprobes are already disarmed, just return */
  1268. if (kprobes_all_disarmed)
  1269. goto already_disabled;
  1270. kprobes_all_disarmed = true;
  1271. printk(KERN_INFO "Kprobes globally disabled\n");
  1272. mutex_lock(&text_mutex);
  1273. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1274. head = &kprobe_table[i];
  1275. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1276. if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
  1277. arch_disarm_kprobe(p);
  1278. }
  1279. }
  1280. mutex_unlock(&text_mutex);
  1281. mutex_unlock(&kprobe_mutex);
  1282. /* Allow all currently running kprobes to complete */
  1283. synchronize_sched();
  1284. return;
  1285. already_disabled:
  1286. mutex_unlock(&kprobe_mutex);
  1287. return;
  1288. }
  1289. /*
  1290. * XXX: The debugfs bool file interface doesn't allow for callbacks
  1291. * when the bool state is switched. We can reuse that facility when
  1292. * available
  1293. */
  1294. static ssize_t read_enabled_file_bool(struct file *file,
  1295. char __user *user_buf, size_t count, loff_t *ppos)
  1296. {
  1297. char buf[3];
  1298. if (!kprobes_all_disarmed)
  1299. buf[0] = '1';
  1300. else
  1301. buf[0] = '0';
  1302. buf[1] = '\n';
  1303. buf[2] = 0x00;
  1304. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  1305. }
  1306. static ssize_t write_enabled_file_bool(struct file *file,
  1307. const char __user *user_buf, size_t count, loff_t *ppos)
  1308. {
  1309. char buf[32];
  1310. int buf_size;
  1311. buf_size = min(count, (sizeof(buf)-1));
  1312. if (copy_from_user(buf, user_buf, buf_size))
  1313. return -EFAULT;
  1314. switch (buf[0]) {
  1315. case 'y':
  1316. case 'Y':
  1317. case '1':
  1318. arm_all_kprobes();
  1319. break;
  1320. case 'n':
  1321. case 'N':
  1322. case '0':
  1323. disarm_all_kprobes();
  1324. break;
  1325. }
  1326. return count;
  1327. }
  1328. static const struct file_operations fops_kp = {
  1329. .read = read_enabled_file_bool,
  1330. .write = write_enabled_file_bool,
  1331. };
  1332. static int __kprobes debugfs_kprobe_init(void)
  1333. {
  1334. struct dentry *dir, *file;
  1335. unsigned int value = 1;
  1336. dir = debugfs_create_dir("kprobes", NULL);
  1337. if (!dir)
  1338. return -ENOMEM;
  1339. file = debugfs_create_file("list", 0444, dir, NULL,
  1340. &debugfs_kprobes_operations);
  1341. if (!file) {
  1342. debugfs_remove(dir);
  1343. return -ENOMEM;
  1344. }
  1345. file = debugfs_create_file("enabled", 0600, dir,
  1346. &value, &fops_kp);
  1347. if (!file) {
  1348. debugfs_remove(dir);
  1349. return -ENOMEM;
  1350. }
  1351. return 0;
  1352. }
  1353. late_initcall(debugfs_kprobe_init);
  1354. #endif /* CONFIG_DEBUG_FS */
  1355. module_init(init_kprobes);
  1356. /* defined in arch/.../kernel/kprobes.c */
  1357. EXPORT_SYMBOL_GPL(jprobe_return);