kprobes.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/sysctl.h>
  46. #include <linux/kdebug.h>
  47. #include <linux/memory.h>
  48. #include <linux/ftrace.h>
  49. #include <linux/cpu.h>
  50. #include <linux/jump_label.h>
  51. #include <asm-generic/sections.h>
  52. #include <asm/cacheflush.h>
  53. #include <asm/errno.h>
  54. #include <asm/uaccess.h>
  55. #define KPROBE_HASH_BITS 6
  56. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  57. /*
  58. * Some oddball architectures like 64bit powerpc have function descriptors
  59. * so this must be overridable.
  60. */
  61. #ifndef kprobe_lookup_name
  62. #define kprobe_lookup_name(name, addr) \
  63. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  64. #endif
  65. static int kprobes_initialized;
  66. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  67. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  68. /* NOTE: change this value only with kprobe_mutex held */
  69. static bool kprobes_all_disarmed;
  70. static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  71. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  72. static struct {
  73. spinlock_t lock ____cacheline_aligned_in_smp;
  74. } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  75. static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  76. {
  77. return &(kretprobe_table_locks[hash].lock);
  78. }
  79. /*
  80. * Normally, functions that we'd want to prohibit kprobes in, are marked
  81. * __kprobes. But, there are cases where such functions already belong to
  82. * a different section (__sched for preempt_schedule)
  83. *
  84. * For such cases, we now have a blacklist
  85. */
  86. static struct kprobe_blackpoint kprobe_blacklist[] = {
  87. {"preempt_schedule",},
  88. {"native_get_debugreg",},
  89. {"irq_entries_start",},
  90. {"common_interrupt",},
  91. {"mcount",}, /* mcount can be called from everywhere */
  92. {NULL} /* Terminator */
  93. };
  94. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  95. /*
  96. * kprobe->ainsn.insn points to the copy of the instruction to be
  97. * single-stepped. x86_64, POWER4 and above have no-exec support and
  98. * stepping on the instruction on a vmalloced/kmalloced/data page
  99. * is a recipe for disaster
  100. */
  101. struct kprobe_insn_page {
  102. struct list_head list;
  103. kprobe_opcode_t *insns; /* Page of instruction slots */
  104. int nused;
  105. int ngarbage;
  106. char slot_used[];
  107. };
  108. #define KPROBE_INSN_PAGE_SIZE(slots) \
  109. (offsetof(struct kprobe_insn_page, slot_used) + \
  110. (sizeof(char) * (slots)))
  111. struct kprobe_insn_cache {
  112. struct list_head pages; /* list of kprobe_insn_page */
  113. size_t insn_size; /* size of instruction slot */
  114. int nr_garbage;
  115. };
  116. static int slots_per_page(struct kprobe_insn_cache *c)
  117. {
  118. return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t));
  119. }
  120. enum kprobe_slot_state {
  121. SLOT_CLEAN = 0,
  122. SLOT_DIRTY = 1,
  123. SLOT_USED = 2,
  124. };
  125. static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */
  126. static struct kprobe_insn_cache kprobe_insn_slots = {
  127. .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
  128. .insn_size = MAX_INSN_SIZE,
  129. .nr_garbage = 0,
  130. };
  131. static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c);
  132. /**
  133. * __get_insn_slot() - Find a slot on an executable page for an instruction.
  134. * We allocate an executable page if there's no room on existing ones.
  135. */
  136. static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
  137. {
  138. struct kprobe_insn_page *kip;
  139. retry:
  140. list_for_each_entry(kip, &c->pages, list) {
  141. if (kip->nused < slots_per_page(c)) {
  142. int i;
  143. for (i = 0; i < slots_per_page(c); i++) {
  144. if (kip->slot_used[i] == SLOT_CLEAN) {
  145. kip->slot_used[i] = SLOT_USED;
  146. kip->nused++;
  147. return kip->insns + (i * c->insn_size);
  148. }
  149. }
  150. /* kip->nused is broken. Fix it. */
  151. kip->nused = slots_per_page(c);
  152. WARN_ON(1);
  153. }
  154. }
  155. /* If there are any garbage slots, collect it and try again. */
  156. if (c->nr_garbage && collect_garbage_slots(c) == 0)
  157. goto retry;
  158. /* All out of space. Need to allocate a new page. */
  159. kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL);
  160. if (!kip)
  161. return NULL;
  162. /*
  163. * Use module_alloc so this page is within +/- 2GB of where the
  164. * kernel image and loaded module images reside. This is required
  165. * so x86_64 can correctly handle the %rip-relative fixups.
  166. */
  167. kip->insns = module_alloc(PAGE_SIZE);
  168. if (!kip->insns) {
  169. kfree(kip);
  170. return NULL;
  171. }
  172. INIT_LIST_HEAD(&kip->list);
  173. memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c));
  174. kip->slot_used[0] = SLOT_USED;
  175. kip->nused = 1;
  176. kip->ngarbage = 0;
  177. list_add(&kip->list, &c->pages);
  178. return kip->insns;
  179. }
  180. kprobe_opcode_t __kprobes *get_insn_slot(void)
  181. {
  182. kprobe_opcode_t *ret = NULL;
  183. mutex_lock(&kprobe_insn_mutex);
  184. ret = __get_insn_slot(&kprobe_insn_slots);
  185. mutex_unlock(&kprobe_insn_mutex);
  186. return ret;
  187. }
  188. /* Return 1 if all garbages are collected, otherwise 0. */
  189. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  190. {
  191. kip->slot_used[idx] = SLOT_CLEAN;
  192. kip->nused--;
  193. if (kip->nused == 0) {
  194. /*
  195. * Page is no longer in use. Free it unless
  196. * it's the last one. We keep the last one
  197. * so as not to have to set it up again the
  198. * next time somebody inserts a probe.
  199. */
  200. if (!list_is_singular(&kip->list)) {
  201. list_del(&kip->list);
  202. module_free(NULL, kip->insns);
  203. kfree(kip);
  204. }
  205. return 1;
  206. }
  207. return 0;
  208. }
  209. static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c)
  210. {
  211. struct kprobe_insn_page *kip, *next;
  212. /* Ensure no-one is interrupted on the garbages */
  213. synchronize_sched();
  214. list_for_each_entry_safe(kip, next, &c->pages, list) {
  215. int i;
  216. if (kip->ngarbage == 0)
  217. continue;
  218. kip->ngarbage = 0; /* we will collect all garbages */
  219. for (i = 0; i < slots_per_page(c); i++) {
  220. if (kip->slot_used[i] == SLOT_DIRTY &&
  221. collect_one_slot(kip, i))
  222. break;
  223. }
  224. }
  225. c->nr_garbage = 0;
  226. return 0;
  227. }
  228. static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c,
  229. kprobe_opcode_t *slot, int dirty)
  230. {
  231. struct kprobe_insn_page *kip;
  232. list_for_each_entry(kip, &c->pages, list) {
  233. long idx = ((long)slot - (long)kip->insns) /
  234. (c->insn_size * sizeof(kprobe_opcode_t));
  235. if (idx >= 0 && idx < slots_per_page(c)) {
  236. WARN_ON(kip->slot_used[idx] != SLOT_USED);
  237. if (dirty) {
  238. kip->slot_used[idx] = SLOT_DIRTY;
  239. kip->ngarbage++;
  240. if (++c->nr_garbage > slots_per_page(c))
  241. collect_garbage_slots(c);
  242. } else
  243. collect_one_slot(kip, idx);
  244. return;
  245. }
  246. }
  247. /* Could not free this slot. */
  248. WARN_ON(1);
  249. }
  250. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  251. {
  252. mutex_lock(&kprobe_insn_mutex);
  253. __free_insn_slot(&kprobe_insn_slots, slot, dirty);
  254. mutex_unlock(&kprobe_insn_mutex);
  255. }
  256. #ifdef CONFIG_OPTPROBES
  257. /* For optimized_kprobe buffer */
  258. static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */
  259. static struct kprobe_insn_cache kprobe_optinsn_slots = {
  260. .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
  261. /* .insn_size is initialized later */
  262. .nr_garbage = 0,
  263. };
  264. /* Get a slot for optimized_kprobe buffer */
  265. kprobe_opcode_t __kprobes *get_optinsn_slot(void)
  266. {
  267. kprobe_opcode_t *ret = NULL;
  268. mutex_lock(&kprobe_optinsn_mutex);
  269. ret = __get_insn_slot(&kprobe_optinsn_slots);
  270. mutex_unlock(&kprobe_optinsn_mutex);
  271. return ret;
  272. }
  273. void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty)
  274. {
  275. mutex_lock(&kprobe_optinsn_mutex);
  276. __free_insn_slot(&kprobe_optinsn_slots, slot, dirty);
  277. mutex_unlock(&kprobe_optinsn_mutex);
  278. }
  279. #endif
  280. #endif
  281. /* We have preemption disabled.. so it is safe to use __ versions */
  282. static inline void set_kprobe_instance(struct kprobe *kp)
  283. {
  284. __get_cpu_var(kprobe_instance) = kp;
  285. }
  286. static inline void reset_kprobe_instance(void)
  287. {
  288. __get_cpu_var(kprobe_instance) = NULL;
  289. }
  290. /*
  291. * This routine is called either:
  292. * - under the kprobe_mutex - during kprobe_[un]register()
  293. * OR
  294. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  295. */
  296. struct kprobe __kprobes *get_kprobe(void *addr)
  297. {
  298. struct hlist_head *head;
  299. struct hlist_node *node;
  300. struct kprobe *p;
  301. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  302. hlist_for_each_entry_rcu(p, node, head, hlist) {
  303. if (p->addr == addr)
  304. return p;
  305. }
  306. return NULL;
  307. }
  308. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs);
  309. /* Return true if the kprobe is an aggregator */
  310. static inline int kprobe_aggrprobe(struct kprobe *p)
  311. {
  312. return p->pre_handler == aggr_pre_handler;
  313. }
  314. /*
  315. * Keep all fields in the kprobe consistent
  316. */
  317. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  318. {
  319. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  320. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  321. }
  322. #ifdef CONFIG_OPTPROBES
  323. /* NOTE: change this value only with kprobe_mutex held */
  324. static bool kprobes_allow_optimization;
  325. /*
  326. * Call all pre_handler on the list, but ignores its return value.
  327. * This must be called from arch-dep optimized caller.
  328. */
  329. void __kprobes opt_pre_handler(struct kprobe *p, struct pt_regs *regs)
  330. {
  331. struct kprobe *kp;
  332. list_for_each_entry_rcu(kp, &p->list, list) {
  333. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  334. set_kprobe_instance(kp);
  335. kp->pre_handler(kp, regs);
  336. }
  337. reset_kprobe_instance();
  338. }
  339. }
  340. /* Return true(!0) if the kprobe is ready for optimization. */
  341. static inline int kprobe_optready(struct kprobe *p)
  342. {
  343. struct optimized_kprobe *op;
  344. if (kprobe_aggrprobe(p)) {
  345. op = container_of(p, struct optimized_kprobe, kp);
  346. return arch_prepared_optinsn(&op->optinsn);
  347. }
  348. return 0;
  349. }
  350. /*
  351. * Return an optimized kprobe whose optimizing code replaces
  352. * instructions including addr (exclude breakpoint).
  353. */
  354. static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
  355. {
  356. int i;
  357. struct kprobe *p = NULL;
  358. struct optimized_kprobe *op;
  359. /* Don't check i == 0, since that is a breakpoint case. */
  360. for (i = 1; !p && i < MAX_OPTIMIZED_LENGTH; i++)
  361. p = get_kprobe((void *)(addr - i));
  362. if (p && kprobe_optready(p)) {
  363. op = container_of(p, struct optimized_kprobe, kp);
  364. if (arch_within_optimized_kprobe(op, addr))
  365. return p;
  366. }
  367. return NULL;
  368. }
  369. /* Optimization staging list, protected by kprobe_mutex */
  370. static LIST_HEAD(optimizing_list);
  371. static void kprobe_optimizer(struct work_struct *work);
  372. static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
  373. #define OPTIMIZE_DELAY 5
  374. /* Kprobe jump optimizer */
  375. static __kprobes void kprobe_optimizer(struct work_struct *work)
  376. {
  377. struct optimized_kprobe *op, *tmp;
  378. /* Lock modules while optimizing kprobes */
  379. mutex_lock(&module_mutex);
  380. mutex_lock(&kprobe_mutex);
  381. if (kprobes_all_disarmed || !kprobes_allow_optimization)
  382. goto end;
  383. /*
  384. * Wait for quiesence period to ensure all running interrupts
  385. * are done. Because optprobe may modify multiple instructions
  386. * there is a chance that Nth instruction is interrupted. In that
  387. * case, running interrupt can return to 2nd-Nth byte of jump
  388. * instruction. This wait is for avoiding it.
  389. */
  390. synchronize_sched();
  391. /*
  392. * The optimization/unoptimization refers online_cpus via
  393. * stop_machine() and cpu-hotplug modifies online_cpus.
  394. * And same time, text_mutex will be held in cpu-hotplug and here.
  395. * This combination can cause a deadlock (cpu-hotplug try to lock
  396. * text_mutex but stop_machine can not be done because online_cpus
  397. * has been changed)
  398. * To avoid this deadlock, we need to call get_online_cpus()
  399. * for preventing cpu-hotplug outside of text_mutex locking.
  400. */
  401. get_online_cpus();
  402. mutex_lock(&text_mutex);
  403. list_for_each_entry_safe(op, tmp, &optimizing_list, list) {
  404. WARN_ON(kprobe_disabled(&op->kp));
  405. if (arch_optimize_kprobe(op) < 0)
  406. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  407. list_del_init(&op->list);
  408. }
  409. mutex_unlock(&text_mutex);
  410. put_online_cpus();
  411. end:
  412. mutex_unlock(&kprobe_mutex);
  413. mutex_unlock(&module_mutex);
  414. }
  415. /* Optimize kprobe if p is ready to be optimized */
  416. static __kprobes void optimize_kprobe(struct kprobe *p)
  417. {
  418. struct optimized_kprobe *op;
  419. /* Check if the kprobe is disabled or not ready for optimization. */
  420. if (!kprobe_optready(p) || !kprobes_allow_optimization ||
  421. (kprobe_disabled(p) || kprobes_all_disarmed))
  422. return;
  423. /* Both of break_handler and post_handler are not supported. */
  424. if (p->break_handler || p->post_handler)
  425. return;
  426. op = container_of(p, struct optimized_kprobe, kp);
  427. /* Check there is no other kprobes at the optimized instructions */
  428. if (arch_check_optimized_kprobe(op) < 0)
  429. return;
  430. /* Check if it is already optimized. */
  431. if (op->kp.flags & KPROBE_FLAG_OPTIMIZED)
  432. return;
  433. op->kp.flags |= KPROBE_FLAG_OPTIMIZED;
  434. list_add(&op->list, &optimizing_list);
  435. if (!delayed_work_pending(&optimizing_work))
  436. schedule_delayed_work(&optimizing_work, OPTIMIZE_DELAY);
  437. }
  438. /* Unoptimize a kprobe if p is optimized */
  439. static __kprobes void unoptimize_kprobe(struct kprobe *p)
  440. {
  441. struct optimized_kprobe *op;
  442. if ((p->flags & KPROBE_FLAG_OPTIMIZED) && kprobe_aggrprobe(p)) {
  443. op = container_of(p, struct optimized_kprobe, kp);
  444. if (!list_empty(&op->list))
  445. /* Dequeue from the optimization queue */
  446. list_del_init(&op->list);
  447. else
  448. /* Replace jump with break */
  449. arch_unoptimize_kprobe(op);
  450. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  451. }
  452. }
  453. /* Remove optimized instructions */
  454. static void __kprobes kill_optimized_kprobe(struct kprobe *p)
  455. {
  456. struct optimized_kprobe *op;
  457. op = container_of(p, struct optimized_kprobe, kp);
  458. if (!list_empty(&op->list)) {
  459. /* Dequeue from the optimization queue */
  460. list_del_init(&op->list);
  461. op->kp.flags &= ~KPROBE_FLAG_OPTIMIZED;
  462. }
  463. /* Don't unoptimize, because the target code will be freed. */
  464. arch_remove_optimized_kprobe(op);
  465. }
  466. /* Try to prepare optimized instructions */
  467. static __kprobes void prepare_optimized_kprobe(struct kprobe *p)
  468. {
  469. struct optimized_kprobe *op;
  470. op = container_of(p, struct optimized_kprobe, kp);
  471. arch_prepare_optimized_kprobe(op);
  472. }
  473. /* Free optimized instructions and optimized_kprobe */
  474. static __kprobes void free_aggr_kprobe(struct kprobe *p)
  475. {
  476. struct optimized_kprobe *op;
  477. op = container_of(p, struct optimized_kprobe, kp);
  478. arch_remove_optimized_kprobe(op);
  479. kfree(op);
  480. }
  481. /* Allocate new optimized_kprobe and try to prepare optimized instructions */
  482. static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  483. {
  484. struct optimized_kprobe *op;
  485. op = kzalloc(sizeof(struct optimized_kprobe), GFP_KERNEL);
  486. if (!op)
  487. return NULL;
  488. INIT_LIST_HEAD(&op->list);
  489. op->kp.addr = p->addr;
  490. arch_prepare_optimized_kprobe(op);
  491. return &op->kp;
  492. }
  493. static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p);
  494. /*
  495. * Prepare an optimized_kprobe and optimize it
  496. * NOTE: p must be a normal registered kprobe
  497. */
  498. static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
  499. {
  500. struct kprobe *ap;
  501. struct optimized_kprobe *op;
  502. ap = alloc_aggr_kprobe(p);
  503. if (!ap)
  504. return;
  505. op = container_of(ap, struct optimized_kprobe, kp);
  506. if (!arch_prepared_optinsn(&op->optinsn)) {
  507. /* If failed to setup optimizing, fallback to kprobe */
  508. free_aggr_kprobe(ap);
  509. return;
  510. }
  511. init_aggr_kprobe(ap, p);
  512. optimize_kprobe(ap);
  513. }
  514. #ifdef CONFIG_SYSCTL
  515. static void __kprobes optimize_all_kprobes(void)
  516. {
  517. struct hlist_head *head;
  518. struct hlist_node *node;
  519. struct kprobe *p;
  520. unsigned int i;
  521. /* If optimization is already allowed, just return */
  522. if (kprobes_allow_optimization)
  523. return;
  524. kprobes_allow_optimization = true;
  525. mutex_lock(&text_mutex);
  526. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  527. head = &kprobe_table[i];
  528. hlist_for_each_entry_rcu(p, node, head, hlist)
  529. if (!kprobe_disabled(p))
  530. optimize_kprobe(p);
  531. }
  532. mutex_unlock(&text_mutex);
  533. printk(KERN_INFO "Kprobes globally optimized\n");
  534. }
  535. static void __kprobes unoptimize_all_kprobes(void)
  536. {
  537. struct hlist_head *head;
  538. struct hlist_node *node;
  539. struct kprobe *p;
  540. unsigned int i;
  541. /* If optimization is already prohibited, just return */
  542. if (!kprobes_allow_optimization)
  543. return;
  544. kprobes_allow_optimization = false;
  545. printk(KERN_INFO "Kprobes globally unoptimized\n");
  546. get_online_cpus(); /* For avoiding text_mutex deadlock */
  547. mutex_lock(&text_mutex);
  548. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  549. head = &kprobe_table[i];
  550. hlist_for_each_entry_rcu(p, node, head, hlist) {
  551. if (!kprobe_disabled(p))
  552. unoptimize_kprobe(p);
  553. }
  554. }
  555. mutex_unlock(&text_mutex);
  556. put_online_cpus();
  557. /* Allow all currently running kprobes to complete */
  558. synchronize_sched();
  559. }
  560. int sysctl_kprobes_optimization;
  561. int proc_kprobes_optimization_handler(struct ctl_table *table, int write,
  562. void __user *buffer, size_t *length,
  563. loff_t *ppos)
  564. {
  565. int ret;
  566. mutex_lock(&kprobe_mutex);
  567. sysctl_kprobes_optimization = kprobes_allow_optimization ? 1 : 0;
  568. ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
  569. if (sysctl_kprobes_optimization)
  570. optimize_all_kprobes();
  571. else
  572. unoptimize_all_kprobes();
  573. mutex_unlock(&kprobe_mutex);
  574. return ret;
  575. }
  576. #endif /* CONFIG_SYSCTL */
  577. static void __kprobes __arm_kprobe(struct kprobe *p)
  578. {
  579. struct kprobe *old_p;
  580. /* Check collision with other optimized kprobes */
  581. old_p = get_optimized_kprobe((unsigned long)p->addr);
  582. if (unlikely(old_p))
  583. unoptimize_kprobe(old_p); /* Fallback to unoptimized kprobe */
  584. arch_arm_kprobe(p);
  585. optimize_kprobe(p); /* Try to optimize (add kprobe to a list) */
  586. }
  587. static void __kprobes __disarm_kprobe(struct kprobe *p)
  588. {
  589. struct kprobe *old_p;
  590. unoptimize_kprobe(p); /* Try to unoptimize */
  591. arch_disarm_kprobe(p);
  592. /* If another kprobe was blocked, optimize it. */
  593. old_p = get_optimized_kprobe((unsigned long)p->addr);
  594. if (unlikely(old_p))
  595. optimize_kprobe(old_p);
  596. }
  597. #else /* !CONFIG_OPTPROBES */
  598. #define optimize_kprobe(p) do {} while (0)
  599. #define unoptimize_kprobe(p) do {} while (0)
  600. #define kill_optimized_kprobe(p) do {} while (0)
  601. #define prepare_optimized_kprobe(p) do {} while (0)
  602. #define try_to_optimize_kprobe(p) do {} while (0)
  603. #define __arm_kprobe(p) arch_arm_kprobe(p)
  604. #define __disarm_kprobe(p) arch_disarm_kprobe(p)
  605. static __kprobes void free_aggr_kprobe(struct kprobe *p)
  606. {
  607. kfree(p);
  608. }
  609. static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
  610. {
  611. return kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  612. }
  613. #endif /* CONFIG_OPTPROBES */
  614. /* Arm a kprobe with text_mutex */
  615. static void __kprobes arm_kprobe(struct kprobe *kp)
  616. {
  617. /*
  618. * Here, since __arm_kprobe() doesn't use stop_machine(),
  619. * this doesn't cause deadlock on text_mutex. So, we don't
  620. * need get_online_cpus().
  621. */
  622. mutex_lock(&text_mutex);
  623. __arm_kprobe(kp);
  624. mutex_unlock(&text_mutex);
  625. }
  626. /* Disarm a kprobe with text_mutex */
  627. static void __kprobes disarm_kprobe(struct kprobe *kp)
  628. {
  629. get_online_cpus(); /* For avoiding text_mutex deadlock */
  630. mutex_lock(&text_mutex);
  631. __disarm_kprobe(kp);
  632. mutex_unlock(&text_mutex);
  633. put_online_cpus();
  634. }
  635. /*
  636. * Aggregate handlers for multiple kprobes support - these handlers
  637. * take care of invoking the individual kprobe handlers on p->list
  638. */
  639. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  640. {
  641. struct kprobe *kp;
  642. list_for_each_entry_rcu(kp, &p->list, list) {
  643. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  644. set_kprobe_instance(kp);
  645. if (kp->pre_handler(kp, regs))
  646. return 1;
  647. }
  648. reset_kprobe_instance();
  649. }
  650. return 0;
  651. }
  652. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  653. unsigned long flags)
  654. {
  655. struct kprobe *kp;
  656. list_for_each_entry_rcu(kp, &p->list, list) {
  657. if (kp->post_handler && likely(!kprobe_disabled(kp))) {
  658. set_kprobe_instance(kp);
  659. kp->post_handler(kp, regs, flags);
  660. reset_kprobe_instance();
  661. }
  662. }
  663. }
  664. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  665. int trapnr)
  666. {
  667. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  668. /*
  669. * if we faulted "during" the execution of a user specified
  670. * probe handler, invoke just that probe's fault handler
  671. */
  672. if (cur && cur->fault_handler) {
  673. if (cur->fault_handler(cur, regs, trapnr))
  674. return 1;
  675. }
  676. return 0;
  677. }
  678. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  679. {
  680. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  681. int ret = 0;
  682. if (cur && cur->break_handler) {
  683. if (cur->break_handler(cur, regs))
  684. ret = 1;
  685. }
  686. reset_kprobe_instance();
  687. return ret;
  688. }
  689. /* Walks the list and increments nmissed count for multiprobe case */
  690. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  691. {
  692. struct kprobe *kp;
  693. if (!kprobe_aggrprobe(p)) {
  694. p->nmissed++;
  695. } else {
  696. list_for_each_entry_rcu(kp, &p->list, list)
  697. kp->nmissed++;
  698. }
  699. return;
  700. }
  701. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  702. struct hlist_head *head)
  703. {
  704. struct kretprobe *rp = ri->rp;
  705. /* remove rp inst off the rprobe_inst_table */
  706. hlist_del(&ri->hlist);
  707. INIT_HLIST_NODE(&ri->hlist);
  708. if (likely(rp)) {
  709. spin_lock(&rp->lock);
  710. hlist_add_head(&ri->hlist, &rp->free_instances);
  711. spin_unlock(&rp->lock);
  712. } else
  713. /* Unregistering */
  714. hlist_add_head(&ri->hlist, head);
  715. }
  716. void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
  717. struct hlist_head **head, unsigned long *flags)
  718. __acquires(hlist_lock)
  719. {
  720. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  721. spinlock_t *hlist_lock;
  722. *head = &kretprobe_inst_table[hash];
  723. hlist_lock = kretprobe_table_lock_ptr(hash);
  724. spin_lock_irqsave(hlist_lock, *flags);
  725. }
  726. static void __kprobes kretprobe_table_lock(unsigned long hash,
  727. unsigned long *flags)
  728. __acquires(hlist_lock)
  729. {
  730. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  731. spin_lock_irqsave(hlist_lock, *flags);
  732. }
  733. void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
  734. unsigned long *flags)
  735. __releases(hlist_lock)
  736. {
  737. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  738. spinlock_t *hlist_lock;
  739. hlist_lock = kretprobe_table_lock_ptr(hash);
  740. spin_unlock_irqrestore(hlist_lock, *flags);
  741. }
  742. static void __kprobes kretprobe_table_unlock(unsigned long hash,
  743. unsigned long *flags)
  744. __releases(hlist_lock)
  745. {
  746. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  747. spin_unlock_irqrestore(hlist_lock, *flags);
  748. }
  749. /*
  750. * This function is called from finish_task_switch when task tk becomes dead,
  751. * so that we can recycle any function-return probe instances associated
  752. * with this task. These left over instances represent probed functions
  753. * that have been called but will never return.
  754. */
  755. void __kprobes kprobe_flush_task(struct task_struct *tk)
  756. {
  757. struct kretprobe_instance *ri;
  758. struct hlist_head *head, empty_rp;
  759. struct hlist_node *node, *tmp;
  760. unsigned long hash, flags = 0;
  761. if (unlikely(!kprobes_initialized))
  762. /* Early boot. kretprobe_table_locks not yet initialized. */
  763. return;
  764. hash = hash_ptr(tk, KPROBE_HASH_BITS);
  765. head = &kretprobe_inst_table[hash];
  766. kretprobe_table_lock(hash, &flags);
  767. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  768. if (ri->task == tk)
  769. recycle_rp_inst(ri, &empty_rp);
  770. }
  771. kretprobe_table_unlock(hash, &flags);
  772. INIT_HLIST_HEAD(&empty_rp);
  773. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  774. hlist_del(&ri->hlist);
  775. kfree(ri);
  776. }
  777. }
  778. static inline void free_rp_inst(struct kretprobe *rp)
  779. {
  780. struct kretprobe_instance *ri;
  781. struct hlist_node *pos, *next;
  782. hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
  783. hlist_del(&ri->hlist);
  784. kfree(ri);
  785. }
  786. }
  787. static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  788. {
  789. unsigned long flags, hash;
  790. struct kretprobe_instance *ri;
  791. struct hlist_node *pos, *next;
  792. struct hlist_head *head;
  793. /* No race here */
  794. for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  795. kretprobe_table_lock(hash, &flags);
  796. head = &kretprobe_inst_table[hash];
  797. hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
  798. if (ri->rp == rp)
  799. ri->rp = NULL;
  800. }
  801. kretprobe_table_unlock(hash, &flags);
  802. }
  803. free_rp_inst(rp);
  804. }
  805. /*
  806. * Add the new probe to ap->list. Fail if this is the
  807. * second jprobe at the address - two jprobes can't coexist
  808. */
  809. static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
  810. {
  811. BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
  812. if (p->break_handler || p->post_handler)
  813. unoptimize_kprobe(ap); /* Fall back to normal kprobe */
  814. if (p->break_handler) {
  815. if (ap->break_handler)
  816. return -EEXIST;
  817. list_add_tail_rcu(&p->list, &ap->list);
  818. ap->break_handler = aggr_break_handler;
  819. } else
  820. list_add_rcu(&p->list, &ap->list);
  821. if (p->post_handler && !ap->post_handler)
  822. ap->post_handler = aggr_post_handler;
  823. if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
  824. ap->flags &= ~KPROBE_FLAG_DISABLED;
  825. if (!kprobes_all_disarmed)
  826. /* Arm the breakpoint again. */
  827. __arm_kprobe(ap);
  828. }
  829. return 0;
  830. }
  831. /*
  832. * Fill in the required fields of the "manager kprobe". Replace the
  833. * earlier kprobe in the hlist with the manager kprobe
  834. */
  835. static void __kprobes init_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  836. {
  837. /* Copy p's insn slot to ap */
  838. copy_kprobe(p, ap);
  839. flush_insn_slot(ap);
  840. ap->addr = p->addr;
  841. ap->flags = p->flags & ~KPROBE_FLAG_OPTIMIZED;
  842. ap->pre_handler = aggr_pre_handler;
  843. ap->fault_handler = aggr_fault_handler;
  844. /* We don't care the kprobe which has gone. */
  845. if (p->post_handler && !kprobe_gone(p))
  846. ap->post_handler = aggr_post_handler;
  847. if (p->break_handler && !kprobe_gone(p))
  848. ap->break_handler = aggr_break_handler;
  849. INIT_LIST_HEAD(&ap->list);
  850. INIT_HLIST_NODE(&ap->hlist);
  851. list_add_rcu(&p->list, &ap->list);
  852. hlist_replace_rcu(&p->hlist, &ap->hlist);
  853. }
  854. /*
  855. * This is the second or subsequent kprobe at the address - handle
  856. * the intricacies
  857. */
  858. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  859. struct kprobe *p)
  860. {
  861. int ret = 0;
  862. struct kprobe *ap = old_p;
  863. if (!kprobe_aggrprobe(old_p)) {
  864. /* If old_p is not an aggr_kprobe, create new aggr_kprobe. */
  865. ap = alloc_aggr_kprobe(old_p);
  866. if (!ap)
  867. return -ENOMEM;
  868. init_aggr_kprobe(ap, old_p);
  869. }
  870. if (kprobe_gone(ap)) {
  871. /*
  872. * Attempting to insert new probe at the same location that
  873. * had a probe in the module vaddr area which already
  874. * freed. So, the instruction slot has already been
  875. * released. We need a new slot for the new probe.
  876. */
  877. ret = arch_prepare_kprobe(ap);
  878. if (ret)
  879. /*
  880. * Even if fail to allocate new slot, don't need to
  881. * free aggr_probe. It will be used next time, or
  882. * freed by unregister_kprobe.
  883. */
  884. return ret;
  885. /* Prepare optimized instructions if possible. */
  886. prepare_optimized_kprobe(ap);
  887. /*
  888. * Clear gone flag to prevent allocating new slot again, and
  889. * set disabled flag because it is not armed yet.
  890. */
  891. ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  892. | KPROBE_FLAG_DISABLED;
  893. }
  894. /* Copy ap's insn slot to p */
  895. copy_kprobe(ap, p);
  896. return add_new_kprobe(ap, p);
  897. }
  898. /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
  899. static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
  900. {
  901. struct kprobe *kp;
  902. list_for_each_entry_rcu(kp, &p->list, list) {
  903. if (!kprobe_disabled(kp))
  904. /*
  905. * There is an active probe on the list.
  906. * We can't disable aggr_kprobe.
  907. */
  908. return 0;
  909. }
  910. p->flags |= KPROBE_FLAG_DISABLED;
  911. return 1;
  912. }
  913. static int __kprobes in_kprobes_functions(unsigned long addr)
  914. {
  915. struct kprobe_blackpoint *kb;
  916. if (addr >= (unsigned long)__kprobes_text_start &&
  917. addr < (unsigned long)__kprobes_text_end)
  918. return -EINVAL;
  919. /*
  920. * If there exists a kprobe_blacklist, verify and
  921. * fail any probe registration in the prohibited area
  922. */
  923. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  924. if (kb->start_addr) {
  925. if (addr >= kb->start_addr &&
  926. addr < (kb->start_addr + kb->range))
  927. return -EINVAL;
  928. }
  929. }
  930. return 0;
  931. }
  932. /*
  933. * If we have a symbol_name argument, look it up and add the offset field
  934. * to it. This way, we can specify a relative address to a symbol.
  935. */
  936. static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  937. {
  938. kprobe_opcode_t *addr = p->addr;
  939. if (p->symbol_name) {
  940. if (addr)
  941. return NULL;
  942. kprobe_lookup_name(p->symbol_name, addr);
  943. }
  944. if (!addr)
  945. return NULL;
  946. return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  947. }
  948. /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  949. static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
  950. {
  951. struct kprobe *old_p, *list_p;
  952. old_p = get_kprobe(p->addr);
  953. if (unlikely(!old_p))
  954. return NULL;
  955. if (p != old_p) {
  956. list_for_each_entry_rcu(list_p, &old_p->list, list)
  957. if (list_p == p)
  958. /* kprobe p is a valid probe */
  959. goto valid;
  960. return NULL;
  961. }
  962. valid:
  963. return old_p;
  964. }
  965. /* Return error if the kprobe is being re-registered */
  966. static inline int check_kprobe_rereg(struct kprobe *p)
  967. {
  968. int ret = 0;
  969. struct kprobe *old_p;
  970. mutex_lock(&kprobe_mutex);
  971. old_p = __get_valid_kprobe(p);
  972. if (old_p)
  973. ret = -EINVAL;
  974. mutex_unlock(&kprobe_mutex);
  975. return ret;
  976. }
  977. int __kprobes register_kprobe(struct kprobe *p)
  978. {
  979. int ret = 0;
  980. struct kprobe *old_p;
  981. struct module *probed_mod;
  982. kprobe_opcode_t *addr;
  983. addr = kprobe_addr(p);
  984. if (!addr)
  985. return -EINVAL;
  986. p->addr = addr;
  987. ret = check_kprobe_rereg(p);
  988. if (ret)
  989. return ret;
  990. preempt_disable();
  991. if (!kernel_text_address((unsigned long) p->addr) ||
  992. in_kprobes_functions((unsigned long) p->addr) ||
  993. ftrace_text_reserved(p->addr, p->addr) ||
  994. jump_label_text_reserved(p->addr, p->addr)) {
  995. preempt_enable();
  996. return -EINVAL;
  997. }
  998. /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  999. p->flags &= KPROBE_FLAG_DISABLED;
  1000. /*
  1001. * Check if are we probing a module.
  1002. */
  1003. probed_mod = __module_text_address((unsigned long) p->addr);
  1004. if (probed_mod) {
  1005. /*
  1006. * We must hold a refcount of the probed module while updating
  1007. * its code to prohibit unexpected unloading.
  1008. */
  1009. if (unlikely(!try_module_get(probed_mod))) {
  1010. preempt_enable();
  1011. return -EINVAL;
  1012. }
  1013. /*
  1014. * If the module freed .init.text, we couldn't insert
  1015. * kprobes in there.
  1016. */
  1017. if (within_module_init((unsigned long)p->addr, probed_mod) &&
  1018. probed_mod->state != MODULE_STATE_COMING) {
  1019. module_put(probed_mod);
  1020. preempt_enable();
  1021. return -EINVAL;
  1022. }
  1023. }
  1024. preempt_enable();
  1025. p->nmissed = 0;
  1026. INIT_LIST_HEAD(&p->list);
  1027. mutex_lock(&kprobe_mutex);
  1028. get_online_cpus(); /* For avoiding text_mutex deadlock. */
  1029. mutex_lock(&text_mutex);
  1030. old_p = get_kprobe(p->addr);
  1031. if (old_p) {
  1032. /* Since this may unoptimize old_p, locking text_mutex. */
  1033. ret = register_aggr_kprobe(old_p, p);
  1034. goto out;
  1035. }
  1036. ret = arch_prepare_kprobe(p);
  1037. if (ret)
  1038. goto out;
  1039. INIT_HLIST_NODE(&p->hlist);
  1040. hlist_add_head_rcu(&p->hlist,
  1041. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  1042. if (!kprobes_all_disarmed && !kprobe_disabled(p))
  1043. __arm_kprobe(p);
  1044. /* Try to optimize kprobe */
  1045. try_to_optimize_kprobe(p);
  1046. out:
  1047. mutex_unlock(&text_mutex);
  1048. put_online_cpus();
  1049. mutex_unlock(&kprobe_mutex);
  1050. if (probed_mod)
  1051. module_put(probed_mod);
  1052. return ret;
  1053. }
  1054. EXPORT_SYMBOL_GPL(register_kprobe);
  1055. /*
  1056. * Unregister a kprobe without a scheduler synchronization.
  1057. */
  1058. static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  1059. {
  1060. struct kprobe *old_p, *list_p;
  1061. old_p = __get_valid_kprobe(p);
  1062. if (old_p == NULL)
  1063. return -EINVAL;
  1064. if (old_p == p ||
  1065. (kprobe_aggrprobe(old_p) &&
  1066. list_is_singular(&old_p->list))) {
  1067. /*
  1068. * Only probe on the hash list. Disarm only if kprobes are
  1069. * enabled and not gone - otherwise, the breakpoint would
  1070. * already have been removed. We save on flushing icache.
  1071. */
  1072. if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
  1073. disarm_kprobe(old_p);
  1074. hlist_del_rcu(&old_p->hlist);
  1075. } else {
  1076. if (p->break_handler && !kprobe_gone(p))
  1077. old_p->break_handler = NULL;
  1078. if (p->post_handler && !kprobe_gone(p)) {
  1079. list_for_each_entry_rcu(list_p, &old_p->list, list) {
  1080. if ((list_p != p) && (list_p->post_handler))
  1081. goto noclean;
  1082. }
  1083. old_p->post_handler = NULL;
  1084. }
  1085. noclean:
  1086. list_del_rcu(&p->list);
  1087. if (!kprobe_disabled(old_p)) {
  1088. try_to_disable_aggr_kprobe(old_p);
  1089. if (!kprobes_all_disarmed) {
  1090. if (kprobe_disabled(old_p))
  1091. disarm_kprobe(old_p);
  1092. else
  1093. /* Try to optimize this probe again */
  1094. optimize_kprobe(old_p);
  1095. }
  1096. }
  1097. }
  1098. return 0;
  1099. }
  1100. static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  1101. {
  1102. struct kprobe *old_p;
  1103. if (list_empty(&p->list))
  1104. arch_remove_kprobe(p);
  1105. else if (list_is_singular(&p->list)) {
  1106. /* "p" is the last child of an aggr_kprobe */
  1107. old_p = list_entry(p->list.next, struct kprobe, list);
  1108. list_del(&p->list);
  1109. arch_remove_kprobe(old_p);
  1110. free_aggr_kprobe(old_p);
  1111. }
  1112. }
  1113. int __kprobes register_kprobes(struct kprobe **kps, int num)
  1114. {
  1115. int i, ret = 0;
  1116. if (num <= 0)
  1117. return -EINVAL;
  1118. for (i = 0; i < num; i++) {
  1119. ret = register_kprobe(kps[i]);
  1120. if (ret < 0) {
  1121. if (i > 0)
  1122. unregister_kprobes(kps, i);
  1123. break;
  1124. }
  1125. }
  1126. return ret;
  1127. }
  1128. EXPORT_SYMBOL_GPL(register_kprobes);
  1129. void __kprobes unregister_kprobe(struct kprobe *p)
  1130. {
  1131. unregister_kprobes(&p, 1);
  1132. }
  1133. EXPORT_SYMBOL_GPL(unregister_kprobe);
  1134. void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  1135. {
  1136. int i;
  1137. if (num <= 0)
  1138. return;
  1139. mutex_lock(&kprobe_mutex);
  1140. for (i = 0; i < num; i++)
  1141. if (__unregister_kprobe_top(kps[i]) < 0)
  1142. kps[i]->addr = NULL;
  1143. mutex_unlock(&kprobe_mutex);
  1144. synchronize_sched();
  1145. for (i = 0; i < num; i++)
  1146. if (kps[i]->addr)
  1147. __unregister_kprobe_bottom(kps[i]);
  1148. }
  1149. EXPORT_SYMBOL_GPL(unregister_kprobes);
  1150. static struct notifier_block kprobe_exceptions_nb = {
  1151. .notifier_call = kprobe_exceptions_notify,
  1152. .priority = 0x7fffffff /* we need to be notified first */
  1153. };
  1154. unsigned long __weak arch_deref_entry_point(void *entry)
  1155. {
  1156. return (unsigned long)entry;
  1157. }
  1158. int __kprobes register_jprobes(struct jprobe **jps, int num)
  1159. {
  1160. struct jprobe *jp;
  1161. int ret = 0, i;
  1162. if (num <= 0)
  1163. return -EINVAL;
  1164. for (i = 0; i < num; i++) {
  1165. unsigned long addr, offset;
  1166. jp = jps[i];
  1167. addr = arch_deref_entry_point(jp->entry);
  1168. /* Verify probepoint is a function entry point */
  1169. if (kallsyms_lookup_size_offset(addr, NULL, &offset) &&
  1170. offset == 0) {
  1171. jp->kp.pre_handler = setjmp_pre_handler;
  1172. jp->kp.break_handler = longjmp_break_handler;
  1173. ret = register_kprobe(&jp->kp);
  1174. } else
  1175. ret = -EINVAL;
  1176. if (ret < 0) {
  1177. if (i > 0)
  1178. unregister_jprobes(jps, i);
  1179. break;
  1180. }
  1181. }
  1182. return ret;
  1183. }
  1184. EXPORT_SYMBOL_GPL(register_jprobes);
  1185. int __kprobes register_jprobe(struct jprobe *jp)
  1186. {
  1187. return register_jprobes(&jp, 1);
  1188. }
  1189. EXPORT_SYMBOL_GPL(register_jprobe);
  1190. void __kprobes unregister_jprobe(struct jprobe *jp)
  1191. {
  1192. unregister_jprobes(&jp, 1);
  1193. }
  1194. EXPORT_SYMBOL_GPL(unregister_jprobe);
  1195. void __kprobes unregister_jprobes(struct jprobe **jps, int num)
  1196. {
  1197. int i;
  1198. if (num <= 0)
  1199. return;
  1200. mutex_lock(&kprobe_mutex);
  1201. for (i = 0; i < num; i++)
  1202. if (__unregister_kprobe_top(&jps[i]->kp) < 0)
  1203. jps[i]->kp.addr = NULL;
  1204. mutex_unlock(&kprobe_mutex);
  1205. synchronize_sched();
  1206. for (i = 0; i < num; i++) {
  1207. if (jps[i]->kp.addr)
  1208. __unregister_kprobe_bottom(&jps[i]->kp);
  1209. }
  1210. }
  1211. EXPORT_SYMBOL_GPL(unregister_jprobes);
  1212. #ifdef CONFIG_KRETPROBES
  1213. /*
  1214. * This kprobe pre_handler is registered with every kretprobe. When probe
  1215. * hits it will set up the return probe.
  1216. */
  1217. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  1218. struct pt_regs *regs)
  1219. {
  1220. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  1221. unsigned long hash, flags = 0;
  1222. struct kretprobe_instance *ri;
  1223. /*TODO: consider to only swap the RA after the last pre_handler fired */
  1224. hash = hash_ptr(current, KPROBE_HASH_BITS);
  1225. spin_lock_irqsave(&rp->lock, flags);
  1226. if (!hlist_empty(&rp->free_instances)) {
  1227. ri = hlist_entry(rp->free_instances.first,
  1228. struct kretprobe_instance, hlist);
  1229. hlist_del(&ri->hlist);
  1230. spin_unlock_irqrestore(&rp->lock, flags);
  1231. ri->rp = rp;
  1232. ri->task = current;
  1233. if (rp->entry_handler && rp->entry_handler(ri, regs))
  1234. return 0;
  1235. arch_prepare_kretprobe(ri, regs);
  1236. /* XXX(hch): why is there no hlist_move_head? */
  1237. INIT_HLIST_NODE(&ri->hlist);
  1238. kretprobe_table_lock(hash, &flags);
  1239. hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  1240. kretprobe_table_unlock(hash, &flags);
  1241. } else {
  1242. rp->nmissed++;
  1243. spin_unlock_irqrestore(&rp->lock, flags);
  1244. }
  1245. return 0;
  1246. }
  1247. int __kprobes register_kretprobe(struct kretprobe *rp)
  1248. {
  1249. int ret = 0;
  1250. struct kretprobe_instance *inst;
  1251. int i;
  1252. void *addr;
  1253. if (kretprobe_blacklist_size) {
  1254. addr = kprobe_addr(&rp->kp);
  1255. if (!addr)
  1256. return -EINVAL;
  1257. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1258. if (kretprobe_blacklist[i].addr == addr)
  1259. return -EINVAL;
  1260. }
  1261. }
  1262. rp->kp.pre_handler = pre_handler_kretprobe;
  1263. rp->kp.post_handler = NULL;
  1264. rp->kp.fault_handler = NULL;
  1265. rp->kp.break_handler = NULL;
  1266. /* Pre-allocate memory for max kretprobe instances */
  1267. if (rp->maxactive <= 0) {
  1268. #ifdef CONFIG_PREEMPT
  1269. rp->maxactive = max_t(unsigned int, 10, 2*num_possible_cpus());
  1270. #else
  1271. rp->maxactive = num_possible_cpus();
  1272. #endif
  1273. }
  1274. spin_lock_init(&rp->lock);
  1275. INIT_HLIST_HEAD(&rp->free_instances);
  1276. for (i = 0; i < rp->maxactive; i++) {
  1277. inst = kmalloc(sizeof(struct kretprobe_instance) +
  1278. rp->data_size, GFP_KERNEL);
  1279. if (inst == NULL) {
  1280. free_rp_inst(rp);
  1281. return -ENOMEM;
  1282. }
  1283. INIT_HLIST_NODE(&inst->hlist);
  1284. hlist_add_head(&inst->hlist, &rp->free_instances);
  1285. }
  1286. rp->nmissed = 0;
  1287. /* Establish function entry probe point */
  1288. ret = register_kprobe(&rp->kp);
  1289. if (ret != 0)
  1290. free_rp_inst(rp);
  1291. return ret;
  1292. }
  1293. EXPORT_SYMBOL_GPL(register_kretprobe);
  1294. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  1295. {
  1296. int ret = 0, i;
  1297. if (num <= 0)
  1298. return -EINVAL;
  1299. for (i = 0; i < num; i++) {
  1300. ret = register_kretprobe(rps[i]);
  1301. if (ret < 0) {
  1302. if (i > 0)
  1303. unregister_kretprobes(rps, i);
  1304. break;
  1305. }
  1306. }
  1307. return ret;
  1308. }
  1309. EXPORT_SYMBOL_GPL(register_kretprobes);
  1310. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  1311. {
  1312. unregister_kretprobes(&rp, 1);
  1313. }
  1314. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1315. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  1316. {
  1317. int i;
  1318. if (num <= 0)
  1319. return;
  1320. mutex_lock(&kprobe_mutex);
  1321. for (i = 0; i < num; i++)
  1322. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  1323. rps[i]->kp.addr = NULL;
  1324. mutex_unlock(&kprobe_mutex);
  1325. synchronize_sched();
  1326. for (i = 0; i < num; i++) {
  1327. if (rps[i]->kp.addr) {
  1328. __unregister_kprobe_bottom(&rps[i]->kp);
  1329. cleanup_rp_inst(rps[i]);
  1330. }
  1331. }
  1332. }
  1333. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  1334. #else /* CONFIG_KRETPROBES */
  1335. int __kprobes register_kretprobe(struct kretprobe *rp)
  1336. {
  1337. return -ENOSYS;
  1338. }
  1339. EXPORT_SYMBOL_GPL(register_kretprobe);
  1340. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  1341. {
  1342. return -ENOSYS;
  1343. }
  1344. EXPORT_SYMBOL_GPL(register_kretprobes);
  1345. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  1346. {
  1347. }
  1348. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  1349. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  1350. {
  1351. }
  1352. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  1353. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  1354. struct pt_regs *regs)
  1355. {
  1356. return 0;
  1357. }
  1358. #endif /* CONFIG_KRETPROBES */
  1359. /* Set the kprobe gone and remove its instruction buffer. */
  1360. static void __kprobes kill_kprobe(struct kprobe *p)
  1361. {
  1362. struct kprobe *kp;
  1363. p->flags |= KPROBE_FLAG_GONE;
  1364. if (kprobe_aggrprobe(p)) {
  1365. /*
  1366. * If this is an aggr_kprobe, we have to list all the
  1367. * chained probes and mark them GONE.
  1368. */
  1369. list_for_each_entry_rcu(kp, &p->list, list)
  1370. kp->flags |= KPROBE_FLAG_GONE;
  1371. p->post_handler = NULL;
  1372. p->break_handler = NULL;
  1373. kill_optimized_kprobe(p);
  1374. }
  1375. /*
  1376. * Here, we can remove insn_slot safely, because no thread calls
  1377. * the original probed function (which will be freed soon) any more.
  1378. */
  1379. arch_remove_kprobe(p);
  1380. }
  1381. /* Disable one kprobe */
  1382. int __kprobes disable_kprobe(struct kprobe *kp)
  1383. {
  1384. int ret = 0;
  1385. struct kprobe *p;
  1386. mutex_lock(&kprobe_mutex);
  1387. /* Check whether specified probe is valid. */
  1388. p = __get_valid_kprobe(kp);
  1389. if (unlikely(p == NULL)) {
  1390. ret = -EINVAL;
  1391. goto out;
  1392. }
  1393. /* If the probe is already disabled (or gone), just return */
  1394. if (kprobe_disabled(kp))
  1395. goto out;
  1396. kp->flags |= KPROBE_FLAG_DISABLED;
  1397. if (p != kp)
  1398. /* When kp != p, p is always enabled. */
  1399. try_to_disable_aggr_kprobe(p);
  1400. if (!kprobes_all_disarmed && kprobe_disabled(p))
  1401. disarm_kprobe(p);
  1402. out:
  1403. mutex_unlock(&kprobe_mutex);
  1404. return ret;
  1405. }
  1406. EXPORT_SYMBOL_GPL(disable_kprobe);
  1407. /* Enable one kprobe */
  1408. int __kprobes enable_kprobe(struct kprobe *kp)
  1409. {
  1410. int ret = 0;
  1411. struct kprobe *p;
  1412. mutex_lock(&kprobe_mutex);
  1413. /* Check whether specified probe is valid. */
  1414. p = __get_valid_kprobe(kp);
  1415. if (unlikely(p == NULL)) {
  1416. ret = -EINVAL;
  1417. goto out;
  1418. }
  1419. if (kprobe_gone(kp)) {
  1420. /* This kprobe has gone, we couldn't enable it. */
  1421. ret = -EINVAL;
  1422. goto out;
  1423. }
  1424. if (p != kp)
  1425. kp->flags &= ~KPROBE_FLAG_DISABLED;
  1426. if (!kprobes_all_disarmed && kprobe_disabled(p)) {
  1427. p->flags &= ~KPROBE_FLAG_DISABLED;
  1428. arm_kprobe(p);
  1429. }
  1430. out:
  1431. mutex_unlock(&kprobe_mutex);
  1432. return ret;
  1433. }
  1434. EXPORT_SYMBOL_GPL(enable_kprobe);
  1435. void __kprobes dump_kprobe(struct kprobe *kp)
  1436. {
  1437. printk(KERN_WARNING "Dumping kprobe:\n");
  1438. printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
  1439. kp->symbol_name, kp->addr, kp->offset);
  1440. }
  1441. /* Module notifier call back, checking kprobes on the module */
  1442. static int __kprobes kprobes_module_callback(struct notifier_block *nb,
  1443. unsigned long val, void *data)
  1444. {
  1445. struct module *mod = data;
  1446. struct hlist_head *head;
  1447. struct hlist_node *node;
  1448. struct kprobe *p;
  1449. unsigned int i;
  1450. int checkcore = (val == MODULE_STATE_GOING);
  1451. if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
  1452. return NOTIFY_DONE;
  1453. /*
  1454. * When MODULE_STATE_GOING was notified, both of module .text and
  1455. * .init.text sections would be freed. When MODULE_STATE_LIVE was
  1456. * notified, only .init.text section would be freed. We need to
  1457. * disable kprobes which have been inserted in the sections.
  1458. */
  1459. mutex_lock(&kprobe_mutex);
  1460. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1461. head = &kprobe_table[i];
  1462. hlist_for_each_entry_rcu(p, node, head, hlist)
  1463. if (within_module_init((unsigned long)p->addr, mod) ||
  1464. (checkcore &&
  1465. within_module_core((unsigned long)p->addr, mod))) {
  1466. /*
  1467. * The vaddr this probe is installed will soon
  1468. * be vfreed buy not synced to disk. Hence,
  1469. * disarming the breakpoint isn't needed.
  1470. */
  1471. kill_kprobe(p);
  1472. }
  1473. }
  1474. mutex_unlock(&kprobe_mutex);
  1475. return NOTIFY_DONE;
  1476. }
  1477. static struct notifier_block kprobe_module_nb = {
  1478. .notifier_call = kprobes_module_callback,
  1479. .priority = 0
  1480. };
  1481. static int __init init_kprobes(void)
  1482. {
  1483. int i, err = 0;
  1484. unsigned long offset = 0, size = 0;
  1485. char *modname, namebuf[128];
  1486. const char *symbol_name;
  1487. void *addr;
  1488. struct kprobe_blackpoint *kb;
  1489. /* FIXME allocate the probe table, currently defined statically */
  1490. /* initialize all list heads */
  1491. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1492. INIT_HLIST_HEAD(&kprobe_table[i]);
  1493. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  1494. spin_lock_init(&(kretprobe_table_locks[i].lock));
  1495. }
  1496. /*
  1497. * Lookup and populate the kprobe_blacklist.
  1498. *
  1499. * Unlike the kretprobe blacklist, we'll need to determine
  1500. * the range of addresses that belong to the said functions,
  1501. * since a kprobe need not necessarily be at the beginning
  1502. * of a function.
  1503. */
  1504. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  1505. kprobe_lookup_name(kb->name, addr);
  1506. if (!addr)
  1507. continue;
  1508. kb->start_addr = (unsigned long)addr;
  1509. symbol_name = kallsyms_lookup(kb->start_addr,
  1510. &size, &offset, &modname, namebuf);
  1511. if (!symbol_name)
  1512. kb->range = 0;
  1513. else
  1514. kb->range = size;
  1515. }
  1516. if (kretprobe_blacklist_size) {
  1517. /* lookup the function address from its name */
  1518. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1519. kprobe_lookup_name(kretprobe_blacklist[i].name,
  1520. kretprobe_blacklist[i].addr);
  1521. if (!kretprobe_blacklist[i].addr)
  1522. printk("kretprobe: lookup failed: %s\n",
  1523. kretprobe_blacklist[i].name);
  1524. }
  1525. }
  1526. #if defined(CONFIG_OPTPROBES)
  1527. #if defined(__ARCH_WANT_KPROBES_INSN_SLOT)
  1528. /* Init kprobe_optinsn_slots */
  1529. kprobe_optinsn_slots.insn_size = MAX_OPTINSN_SIZE;
  1530. #endif
  1531. /* By default, kprobes can be optimized */
  1532. kprobes_allow_optimization = true;
  1533. #endif
  1534. /* By default, kprobes are armed */
  1535. kprobes_all_disarmed = false;
  1536. err = arch_init_kprobes();
  1537. if (!err)
  1538. err = register_die_notifier(&kprobe_exceptions_nb);
  1539. if (!err)
  1540. err = register_module_notifier(&kprobe_module_nb);
  1541. kprobes_initialized = (err == 0);
  1542. if (!err)
  1543. init_test_probes();
  1544. return err;
  1545. }
  1546. #ifdef CONFIG_DEBUG_FS
  1547. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  1548. const char *sym, int offset, char *modname, struct kprobe *pp)
  1549. {
  1550. char *kprobe_type;
  1551. if (p->pre_handler == pre_handler_kretprobe)
  1552. kprobe_type = "r";
  1553. else if (p->pre_handler == setjmp_pre_handler)
  1554. kprobe_type = "j";
  1555. else
  1556. kprobe_type = "k";
  1557. if (sym)
  1558. seq_printf(pi, "%p %s %s+0x%x %s ",
  1559. p->addr, kprobe_type, sym, offset,
  1560. (modname ? modname : " "));
  1561. else
  1562. seq_printf(pi, "%p %s %p ",
  1563. p->addr, kprobe_type, p->addr);
  1564. if (!pp)
  1565. pp = p;
  1566. seq_printf(pi, "%s%s%s\n",
  1567. (kprobe_gone(p) ? "[GONE]" : ""),
  1568. ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
  1569. (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""));
  1570. }
  1571. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  1572. {
  1573. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  1574. }
  1575. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  1576. {
  1577. (*pos)++;
  1578. if (*pos >= KPROBE_TABLE_SIZE)
  1579. return NULL;
  1580. return pos;
  1581. }
  1582. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  1583. {
  1584. /* Nothing to do */
  1585. }
  1586. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  1587. {
  1588. struct hlist_head *head;
  1589. struct hlist_node *node;
  1590. struct kprobe *p, *kp;
  1591. const char *sym = NULL;
  1592. unsigned int i = *(loff_t *) v;
  1593. unsigned long offset = 0;
  1594. char *modname, namebuf[128];
  1595. head = &kprobe_table[i];
  1596. preempt_disable();
  1597. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1598. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  1599. &offset, &modname, namebuf);
  1600. if (kprobe_aggrprobe(p)) {
  1601. list_for_each_entry_rcu(kp, &p->list, list)
  1602. report_probe(pi, kp, sym, offset, modname, p);
  1603. } else
  1604. report_probe(pi, p, sym, offset, modname, NULL);
  1605. }
  1606. preempt_enable();
  1607. return 0;
  1608. }
  1609. static const struct seq_operations kprobes_seq_ops = {
  1610. .start = kprobe_seq_start,
  1611. .next = kprobe_seq_next,
  1612. .stop = kprobe_seq_stop,
  1613. .show = show_kprobe_addr
  1614. };
  1615. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  1616. {
  1617. return seq_open(filp, &kprobes_seq_ops);
  1618. }
  1619. static const struct file_operations debugfs_kprobes_operations = {
  1620. .open = kprobes_open,
  1621. .read = seq_read,
  1622. .llseek = seq_lseek,
  1623. .release = seq_release,
  1624. };
  1625. static void __kprobes arm_all_kprobes(void)
  1626. {
  1627. struct hlist_head *head;
  1628. struct hlist_node *node;
  1629. struct kprobe *p;
  1630. unsigned int i;
  1631. mutex_lock(&kprobe_mutex);
  1632. /* If kprobes are armed, just return */
  1633. if (!kprobes_all_disarmed)
  1634. goto already_enabled;
  1635. /* Arming kprobes doesn't optimize kprobe itself */
  1636. mutex_lock(&text_mutex);
  1637. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1638. head = &kprobe_table[i];
  1639. hlist_for_each_entry_rcu(p, node, head, hlist)
  1640. if (!kprobe_disabled(p))
  1641. __arm_kprobe(p);
  1642. }
  1643. mutex_unlock(&text_mutex);
  1644. kprobes_all_disarmed = false;
  1645. printk(KERN_INFO "Kprobes globally enabled\n");
  1646. already_enabled:
  1647. mutex_unlock(&kprobe_mutex);
  1648. return;
  1649. }
  1650. static void __kprobes disarm_all_kprobes(void)
  1651. {
  1652. struct hlist_head *head;
  1653. struct hlist_node *node;
  1654. struct kprobe *p;
  1655. unsigned int i;
  1656. mutex_lock(&kprobe_mutex);
  1657. /* If kprobes are already disarmed, just return */
  1658. if (kprobes_all_disarmed)
  1659. goto already_disabled;
  1660. kprobes_all_disarmed = true;
  1661. printk(KERN_INFO "Kprobes globally disabled\n");
  1662. /*
  1663. * Here we call get_online_cpus() for avoiding text_mutex deadlock,
  1664. * because disarming may also unoptimize kprobes.
  1665. */
  1666. get_online_cpus();
  1667. mutex_lock(&text_mutex);
  1668. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1669. head = &kprobe_table[i];
  1670. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1671. if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
  1672. __disarm_kprobe(p);
  1673. }
  1674. }
  1675. mutex_unlock(&text_mutex);
  1676. put_online_cpus();
  1677. mutex_unlock(&kprobe_mutex);
  1678. /* Allow all currently running kprobes to complete */
  1679. synchronize_sched();
  1680. return;
  1681. already_disabled:
  1682. mutex_unlock(&kprobe_mutex);
  1683. return;
  1684. }
  1685. /*
  1686. * XXX: The debugfs bool file interface doesn't allow for callbacks
  1687. * when the bool state is switched. We can reuse that facility when
  1688. * available
  1689. */
  1690. static ssize_t read_enabled_file_bool(struct file *file,
  1691. char __user *user_buf, size_t count, loff_t *ppos)
  1692. {
  1693. char buf[3];
  1694. if (!kprobes_all_disarmed)
  1695. buf[0] = '1';
  1696. else
  1697. buf[0] = '0';
  1698. buf[1] = '\n';
  1699. buf[2] = 0x00;
  1700. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  1701. }
  1702. static ssize_t write_enabled_file_bool(struct file *file,
  1703. const char __user *user_buf, size_t count, loff_t *ppos)
  1704. {
  1705. char buf[32];
  1706. int buf_size;
  1707. buf_size = min(count, (sizeof(buf)-1));
  1708. if (copy_from_user(buf, user_buf, buf_size))
  1709. return -EFAULT;
  1710. switch (buf[0]) {
  1711. case 'y':
  1712. case 'Y':
  1713. case '1':
  1714. arm_all_kprobes();
  1715. break;
  1716. case 'n':
  1717. case 'N':
  1718. case '0':
  1719. disarm_all_kprobes();
  1720. break;
  1721. }
  1722. return count;
  1723. }
  1724. static const struct file_operations fops_kp = {
  1725. .read = read_enabled_file_bool,
  1726. .write = write_enabled_file_bool,
  1727. .llseek = default_llseek,
  1728. };
  1729. static int __kprobes debugfs_kprobe_init(void)
  1730. {
  1731. struct dentry *dir, *file;
  1732. unsigned int value = 1;
  1733. dir = debugfs_create_dir("kprobes", NULL);
  1734. if (!dir)
  1735. return -ENOMEM;
  1736. file = debugfs_create_file("list", 0444, dir, NULL,
  1737. &debugfs_kprobes_operations);
  1738. if (!file) {
  1739. debugfs_remove(dir);
  1740. return -ENOMEM;
  1741. }
  1742. file = debugfs_create_file("enabled", 0600, dir,
  1743. &value, &fops_kp);
  1744. if (!file) {
  1745. debugfs_remove(dir);
  1746. return -ENOMEM;
  1747. }
  1748. return 0;
  1749. }
  1750. late_initcall(debugfs_kprobe_init);
  1751. #endif /* CONFIG_DEBUG_FS */
  1752. module_init(init_kprobes);
  1753. /* defined in arch/.../kernel/kprobes.c */
  1754. EXPORT_SYMBOL_GPL(jprobe_return);