kprobes.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583
  1. /*
  2. * Kernel Probes (KProbes)
  3. * kernel/kprobes.c
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  18. *
  19. * Copyright (C) IBM Corporation, 2002, 2004
  20. *
  21. * 2002-Oct Created by Vamsi Krishna S <vamsi_krishna@in.ibm.com> Kernel
  22. * Probes initial implementation (includes suggestions from
  23. * Rusty Russell).
  24. * 2004-Aug Updated by Prasanna S Panchamukhi <prasanna@in.ibm.com> with
  25. * hlists and exceptions notifier as suggested by Andi Kleen.
  26. * 2004-July Suparna Bhattacharya <suparna@in.ibm.com> added jumper probes
  27. * interface to access function arguments.
  28. * 2004-Sep Prasanna S Panchamukhi <prasanna@in.ibm.com> Changed Kprobes
  29. * exceptions notifier to be first on the priority list.
  30. * 2005-May Hien Nguyen <hien@us.ibm.com>, Jim Keniston
  31. * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
  32. * <prasanna@in.ibm.com> added function-return probes.
  33. */
  34. #include <linux/kprobes.h>
  35. #include <linux/hash.h>
  36. #include <linux/init.h>
  37. #include <linux/slab.h>
  38. #include <linux/stddef.h>
  39. #include <linux/module.h>
  40. #include <linux/moduleloader.h>
  41. #include <linux/kallsyms.h>
  42. #include <linux/freezer.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/debugfs.h>
  45. #include <linux/kdebug.h>
  46. #include <linux/memory.h>
  47. #include <asm-generic/sections.h>
  48. #include <asm/cacheflush.h>
  49. #include <asm/errno.h>
  50. #include <asm/uaccess.h>
  51. #define KPROBE_HASH_BITS 6
  52. #define KPROBE_TABLE_SIZE (1 << KPROBE_HASH_BITS)
  53. /*
  54. * Some oddball architectures like 64bit powerpc have function descriptors
  55. * so this must be overridable.
  56. */
  57. #ifndef kprobe_lookup_name
  58. #define kprobe_lookup_name(name, addr) \
  59. addr = ((kprobe_opcode_t *)(kallsyms_lookup_name(name)))
  60. #endif
  61. static int kprobes_initialized;
  62. static struct hlist_head kprobe_table[KPROBE_TABLE_SIZE];
  63. static struct hlist_head kretprobe_inst_table[KPROBE_TABLE_SIZE];
  64. /* NOTE: change this value only with kprobe_mutex held */
  65. static bool kprobes_all_disarmed;
  66. static DEFINE_MUTEX(kprobe_mutex); /* Protects kprobe_table */
  67. static DEFINE_PER_CPU(struct kprobe *, kprobe_instance) = NULL;
  68. static struct {
  69. spinlock_t lock ____cacheline_aligned_in_smp;
  70. } kretprobe_table_locks[KPROBE_TABLE_SIZE];
  71. static spinlock_t *kretprobe_table_lock_ptr(unsigned long hash)
  72. {
  73. return &(kretprobe_table_locks[hash].lock);
  74. }
  75. /*
  76. * Normally, functions that we'd want to prohibit kprobes in, are marked
  77. * __kprobes. But, there are cases where such functions already belong to
  78. * a different section (__sched for preempt_schedule)
  79. *
  80. * For such cases, we now have a blacklist
  81. */
  82. static struct kprobe_blackpoint kprobe_blacklist[] = {
  83. {"preempt_schedule",},
  84. {"native_get_debugreg",},
  85. {"irq_entries_start",},
  86. {"common_interrupt",},
  87. {NULL} /* Terminator */
  88. };
  89. #ifdef __ARCH_WANT_KPROBES_INSN_SLOT
  90. /*
  91. * kprobe->ainsn.insn points to the copy of the instruction to be
  92. * single-stepped. x86_64, POWER4 and above have no-exec support and
  93. * stepping on the instruction on a vmalloced/kmalloced/data page
  94. * is a recipe for disaster
  95. */
  96. #define INSNS_PER_PAGE (PAGE_SIZE/(MAX_INSN_SIZE * sizeof(kprobe_opcode_t)))
  97. struct kprobe_insn_page {
  98. struct list_head list;
  99. kprobe_opcode_t *insns; /* Page of instruction slots */
  100. char slot_used[INSNS_PER_PAGE];
  101. int nused;
  102. int ngarbage;
  103. };
  104. enum kprobe_slot_state {
  105. SLOT_CLEAN = 0,
  106. SLOT_DIRTY = 1,
  107. SLOT_USED = 2,
  108. };
  109. static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_pages */
  110. static LIST_HEAD(kprobe_insn_pages);
  111. static int kprobe_garbage_slots;
  112. static int collect_garbage_slots(void);
  113. static int __kprobes check_safety(void)
  114. {
  115. int ret = 0;
  116. #if defined(CONFIG_PREEMPT) && defined(CONFIG_FREEZER)
  117. ret = freeze_processes();
  118. if (ret == 0) {
  119. struct task_struct *p, *q;
  120. do_each_thread(p, q) {
  121. if (p != current && p->state == TASK_RUNNING &&
  122. p->pid != 0) {
  123. printk("Check failed: %s is running\n",p->comm);
  124. ret = -1;
  125. goto loop_end;
  126. }
  127. } while_each_thread(p, q);
  128. }
  129. loop_end:
  130. thaw_processes();
  131. #else
  132. synchronize_sched();
  133. #endif
  134. return ret;
  135. }
  136. /**
  137. * __get_insn_slot() - Find a slot on an executable page for an instruction.
  138. * We allocate an executable page if there's no room on existing ones.
  139. */
  140. static kprobe_opcode_t __kprobes *__get_insn_slot(void)
  141. {
  142. struct kprobe_insn_page *kip;
  143. retry:
  144. list_for_each_entry(kip, &kprobe_insn_pages, list) {
  145. if (kip->nused < INSNS_PER_PAGE) {
  146. int i;
  147. for (i = 0; i < INSNS_PER_PAGE; i++) {
  148. if (kip->slot_used[i] == SLOT_CLEAN) {
  149. kip->slot_used[i] = SLOT_USED;
  150. kip->nused++;
  151. return kip->insns + (i * MAX_INSN_SIZE);
  152. }
  153. }
  154. /* Surprise! No unused slots. Fix kip->nused. */
  155. kip->nused = INSNS_PER_PAGE;
  156. }
  157. }
  158. /* If there are any garbage slots, collect it and try again. */
  159. if (kprobe_garbage_slots && collect_garbage_slots() == 0) {
  160. goto retry;
  161. }
  162. /* All out of space. Need to allocate a new page. Use slot 0. */
  163. kip = kmalloc(sizeof(struct kprobe_insn_page), GFP_KERNEL);
  164. if (!kip)
  165. return NULL;
  166. /*
  167. * Use module_alloc so this page is within +/- 2GB of where the
  168. * kernel image and loaded module images reside. This is required
  169. * so x86_64 can correctly handle the %rip-relative fixups.
  170. */
  171. kip->insns = module_alloc(PAGE_SIZE);
  172. if (!kip->insns) {
  173. kfree(kip);
  174. return NULL;
  175. }
  176. INIT_LIST_HEAD(&kip->list);
  177. list_add(&kip->list, &kprobe_insn_pages);
  178. memset(kip->slot_used, SLOT_CLEAN, INSNS_PER_PAGE);
  179. kip->slot_used[0] = SLOT_USED;
  180. kip->nused = 1;
  181. kip->ngarbage = 0;
  182. return kip->insns;
  183. }
  184. kprobe_opcode_t __kprobes *get_insn_slot(void)
  185. {
  186. kprobe_opcode_t *ret;
  187. mutex_lock(&kprobe_insn_mutex);
  188. ret = __get_insn_slot();
  189. mutex_unlock(&kprobe_insn_mutex);
  190. return ret;
  191. }
  192. /* Return 1 if all garbages are collected, otherwise 0. */
  193. static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
  194. {
  195. kip->slot_used[idx] = SLOT_CLEAN;
  196. kip->nused--;
  197. if (kip->nused == 0) {
  198. /*
  199. * Page is no longer in use. Free it unless
  200. * it's the last one. We keep the last one
  201. * so as not to have to set it up again the
  202. * next time somebody inserts a probe.
  203. */
  204. if (!list_is_singular(&kprobe_insn_pages)) {
  205. list_del(&kip->list);
  206. module_free(NULL, kip->insns);
  207. kfree(kip);
  208. }
  209. return 1;
  210. }
  211. return 0;
  212. }
  213. static int __kprobes collect_garbage_slots(void)
  214. {
  215. struct kprobe_insn_page *kip, *next;
  216. /* Ensure no-one is preepmted on the garbages */
  217. if (check_safety())
  218. return -EAGAIN;
  219. list_for_each_entry_safe(kip, next, &kprobe_insn_pages, list) {
  220. int i;
  221. if (kip->ngarbage == 0)
  222. continue;
  223. kip->ngarbage = 0; /* we will collect all garbages */
  224. for (i = 0; i < INSNS_PER_PAGE; i++) {
  225. if (kip->slot_used[i] == SLOT_DIRTY &&
  226. collect_one_slot(kip, i))
  227. break;
  228. }
  229. }
  230. kprobe_garbage_slots = 0;
  231. return 0;
  232. }
  233. void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty)
  234. {
  235. struct kprobe_insn_page *kip;
  236. mutex_lock(&kprobe_insn_mutex);
  237. list_for_each_entry(kip, &kprobe_insn_pages, list) {
  238. if (kip->insns <= slot &&
  239. slot < kip->insns + (INSNS_PER_PAGE * MAX_INSN_SIZE)) {
  240. int i = (slot - kip->insns) / MAX_INSN_SIZE;
  241. if (dirty) {
  242. kip->slot_used[i] = SLOT_DIRTY;
  243. kip->ngarbage++;
  244. } else
  245. collect_one_slot(kip, i);
  246. break;
  247. }
  248. }
  249. if (dirty && ++kprobe_garbage_slots > INSNS_PER_PAGE)
  250. collect_garbage_slots();
  251. mutex_unlock(&kprobe_insn_mutex);
  252. }
  253. #endif
  254. /* We have preemption disabled.. so it is safe to use __ versions */
  255. static inline void set_kprobe_instance(struct kprobe *kp)
  256. {
  257. __get_cpu_var(kprobe_instance) = kp;
  258. }
  259. static inline void reset_kprobe_instance(void)
  260. {
  261. __get_cpu_var(kprobe_instance) = NULL;
  262. }
  263. /*
  264. * This routine is called either:
  265. * - under the kprobe_mutex - during kprobe_[un]register()
  266. * OR
  267. * - with preemption disabled - from arch/xxx/kernel/kprobes.c
  268. */
  269. struct kprobe __kprobes *get_kprobe(void *addr)
  270. {
  271. struct hlist_head *head;
  272. struct hlist_node *node;
  273. struct kprobe *p;
  274. head = &kprobe_table[hash_ptr(addr, KPROBE_HASH_BITS)];
  275. hlist_for_each_entry_rcu(p, node, head, hlist) {
  276. if (p->addr == addr)
  277. return p;
  278. }
  279. return NULL;
  280. }
  281. /* Arm a kprobe with text_mutex */
  282. static void __kprobes arm_kprobe(struct kprobe *kp)
  283. {
  284. mutex_lock(&text_mutex);
  285. arch_arm_kprobe(kp);
  286. mutex_unlock(&text_mutex);
  287. }
  288. /* Disarm a kprobe with text_mutex */
  289. static void __kprobes disarm_kprobe(struct kprobe *kp)
  290. {
  291. mutex_lock(&text_mutex);
  292. arch_disarm_kprobe(kp);
  293. mutex_unlock(&text_mutex);
  294. }
  295. /*
  296. * Aggregate handlers for multiple kprobes support - these handlers
  297. * take care of invoking the individual kprobe handlers on p->list
  298. */
  299. static int __kprobes aggr_pre_handler(struct kprobe *p, struct pt_regs *regs)
  300. {
  301. struct kprobe *kp;
  302. list_for_each_entry_rcu(kp, &p->list, list) {
  303. if (kp->pre_handler && likely(!kprobe_disabled(kp))) {
  304. set_kprobe_instance(kp);
  305. if (kp->pre_handler(kp, regs))
  306. return 1;
  307. }
  308. reset_kprobe_instance();
  309. }
  310. return 0;
  311. }
  312. static void __kprobes aggr_post_handler(struct kprobe *p, struct pt_regs *regs,
  313. unsigned long flags)
  314. {
  315. struct kprobe *kp;
  316. list_for_each_entry_rcu(kp, &p->list, list) {
  317. if (kp->post_handler && likely(!kprobe_disabled(kp))) {
  318. set_kprobe_instance(kp);
  319. kp->post_handler(kp, regs, flags);
  320. reset_kprobe_instance();
  321. }
  322. }
  323. }
  324. static int __kprobes aggr_fault_handler(struct kprobe *p, struct pt_regs *regs,
  325. int trapnr)
  326. {
  327. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  328. /*
  329. * if we faulted "during" the execution of a user specified
  330. * probe handler, invoke just that probe's fault handler
  331. */
  332. if (cur && cur->fault_handler) {
  333. if (cur->fault_handler(cur, regs, trapnr))
  334. return 1;
  335. }
  336. return 0;
  337. }
  338. static int __kprobes aggr_break_handler(struct kprobe *p, struct pt_regs *regs)
  339. {
  340. struct kprobe *cur = __get_cpu_var(kprobe_instance);
  341. int ret = 0;
  342. if (cur && cur->break_handler) {
  343. if (cur->break_handler(cur, regs))
  344. ret = 1;
  345. }
  346. reset_kprobe_instance();
  347. return ret;
  348. }
  349. /* Walks the list and increments nmissed count for multiprobe case */
  350. void __kprobes kprobes_inc_nmissed_count(struct kprobe *p)
  351. {
  352. struct kprobe *kp;
  353. if (p->pre_handler != aggr_pre_handler) {
  354. p->nmissed++;
  355. } else {
  356. list_for_each_entry_rcu(kp, &p->list, list)
  357. kp->nmissed++;
  358. }
  359. return;
  360. }
  361. void __kprobes recycle_rp_inst(struct kretprobe_instance *ri,
  362. struct hlist_head *head)
  363. {
  364. struct kretprobe *rp = ri->rp;
  365. /* remove rp inst off the rprobe_inst_table */
  366. hlist_del(&ri->hlist);
  367. INIT_HLIST_NODE(&ri->hlist);
  368. if (likely(rp)) {
  369. spin_lock(&rp->lock);
  370. hlist_add_head(&ri->hlist, &rp->free_instances);
  371. spin_unlock(&rp->lock);
  372. } else
  373. /* Unregistering */
  374. hlist_add_head(&ri->hlist, head);
  375. }
  376. void __kprobes kretprobe_hash_lock(struct task_struct *tsk,
  377. struct hlist_head **head, unsigned long *flags)
  378. {
  379. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  380. spinlock_t *hlist_lock;
  381. *head = &kretprobe_inst_table[hash];
  382. hlist_lock = kretprobe_table_lock_ptr(hash);
  383. spin_lock_irqsave(hlist_lock, *flags);
  384. }
  385. static void __kprobes kretprobe_table_lock(unsigned long hash,
  386. unsigned long *flags)
  387. {
  388. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  389. spin_lock_irqsave(hlist_lock, *flags);
  390. }
  391. void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
  392. unsigned long *flags)
  393. {
  394. unsigned long hash = hash_ptr(tsk, KPROBE_HASH_BITS);
  395. spinlock_t *hlist_lock;
  396. hlist_lock = kretprobe_table_lock_ptr(hash);
  397. spin_unlock_irqrestore(hlist_lock, *flags);
  398. }
  399. void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
  400. {
  401. spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
  402. spin_unlock_irqrestore(hlist_lock, *flags);
  403. }
  404. /*
  405. * This function is called from finish_task_switch when task tk becomes dead,
  406. * so that we can recycle any function-return probe instances associated
  407. * with this task. These left over instances represent probed functions
  408. * that have been called but will never return.
  409. */
  410. void __kprobes kprobe_flush_task(struct task_struct *tk)
  411. {
  412. struct kretprobe_instance *ri;
  413. struct hlist_head *head, empty_rp;
  414. struct hlist_node *node, *tmp;
  415. unsigned long hash, flags = 0;
  416. if (unlikely(!kprobes_initialized))
  417. /* Early boot. kretprobe_table_locks not yet initialized. */
  418. return;
  419. hash = hash_ptr(tk, KPROBE_HASH_BITS);
  420. head = &kretprobe_inst_table[hash];
  421. kretprobe_table_lock(hash, &flags);
  422. hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
  423. if (ri->task == tk)
  424. recycle_rp_inst(ri, &empty_rp);
  425. }
  426. kretprobe_table_unlock(hash, &flags);
  427. INIT_HLIST_HEAD(&empty_rp);
  428. hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
  429. hlist_del(&ri->hlist);
  430. kfree(ri);
  431. }
  432. }
  433. static inline void free_rp_inst(struct kretprobe *rp)
  434. {
  435. struct kretprobe_instance *ri;
  436. struct hlist_node *pos, *next;
  437. hlist_for_each_entry_safe(ri, pos, next, &rp->free_instances, hlist) {
  438. hlist_del(&ri->hlist);
  439. kfree(ri);
  440. }
  441. }
  442. static void __kprobes cleanup_rp_inst(struct kretprobe *rp)
  443. {
  444. unsigned long flags, hash;
  445. struct kretprobe_instance *ri;
  446. struct hlist_node *pos, *next;
  447. struct hlist_head *head;
  448. /* No race here */
  449. for (hash = 0; hash < KPROBE_TABLE_SIZE; hash++) {
  450. kretprobe_table_lock(hash, &flags);
  451. head = &kretprobe_inst_table[hash];
  452. hlist_for_each_entry_safe(ri, pos, next, head, hlist) {
  453. if (ri->rp == rp)
  454. ri->rp = NULL;
  455. }
  456. kretprobe_table_unlock(hash, &flags);
  457. }
  458. free_rp_inst(rp);
  459. }
  460. /*
  461. * Keep all fields in the kprobe consistent
  462. */
  463. static inline void copy_kprobe(struct kprobe *old_p, struct kprobe *p)
  464. {
  465. memcpy(&p->opcode, &old_p->opcode, sizeof(kprobe_opcode_t));
  466. memcpy(&p->ainsn, &old_p->ainsn, sizeof(struct arch_specific_insn));
  467. }
  468. /*
  469. * Add the new probe to ap->list. Fail if this is the
  470. * second jprobe at the address - two jprobes can't coexist
  471. */
  472. static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
  473. {
  474. BUG_ON(kprobe_gone(ap) || kprobe_gone(p));
  475. if (p->break_handler) {
  476. if (ap->break_handler)
  477. return -EEXIST;
  478. list_add_tail_rcu(&p->list, &ap->list);
  479. ap->break_handler = aggr_break_handler;
  480. } else
  481. list_add_rcu(&p->list, &ap->list);
  482. if (p->post_handler && !ap->post_handler)
  483. ap->post_handler = aggr_post_handler;
  484. if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
  485. ap->flags &= ~KPROBE_FLAG_DISABLED;
  486. if (!kprobes_all_disarmed)
  487. /* Arm the breakpoint again. */
  488. arm_kprobe(ap);
  489. }
  490. return 0;
  491. }
  492. /*
  493. * Fill in the required fields of the "manager kprobe". Replace the
  494. * earlier kprobe in the hlist with the manager kprobe
  495. */
  496. static inline void add_aggr_kprobe(struct kprobe *ap, struct kprobe *p)
  497. {
  498. copy_kprobe(p, ap);
  499. flush_insn_slot(ap);
  500. ap->addr = p->addr;
  501. ap->flags = p->flags;
  502. ap->pre_handler = aggr_pre_handler;
  503. ap->fault_handler = aggr_fault_handler;
  504. /* We don't care the kprobe which has gone. */
  505. if (p->post_handler && !kprobe_gone(p))
  506. ap->post_handler = aggr_post_handler;
  507. if (p->break_handler && !kprobe_gone(p))
  508. ap->break_handler = aggr_break_handler;
  509. INIT_LIST_HEAD(&ap->list);
  510. list_add_rcu(&p->list, &ap->list);
  511. hlist_replace_rcu(&p->hlist, &ap->hlist);
  512. }
  513. /*
  514. * This is the second or subsequent kprobe at the address - handle
  515. * the intricacies
  516. */
  517. static int __kprobes register_aggr_kprobe(struct kprobe *old_p,
  518. struct kprobe *p)
  519. {
  520. int ret = 0;
  521. struct kprobe *ap = old_p;
  522. if (old_p->pre_handler != aggr_pre_handler) {
  523. /* If old_p is not an aggr_probe, create new aggr_kprobe. */
  524. ap = kzalloc(sizeof(struct kprobe), GFP_KERNEL);
  525. if (!ap)
  526. return -ENOMEM;
  527. add_aggr_kprobe(ap, old_p);
  528. }
  529. if (kprobe_gone(ap)) {
  530. /*
  531. * Attempting to insert new probe at the same location that
  532. * had a probe in the module vaddr area which already
  533. * freed. So, the instruction slot has already been
  534. * released. We need a new slot for the new probe.
  535. */
  536. ret = arch_prepare_kprobe(ap);
  537. if (ret)
  538. /*
  539. * Even if fail to allocate new slot, don't need to
  540. * free aggr_probe. It will be used next time, or
  541. * freed by unregister_kprobe.
  542. */
  543. return ret;
  544. /*
  545. * Clear gone flag to prevent allocating new slot again, and
  546. * set disabled flag because it is not armed yet.
  547. */
  548. ap->flags = (ap->flags & ~KPROBE_FLAG_GONE)
  549. | KPROBE_FLAG_DISABLED;
  550. }
  551. copy_kprobe(ap, p);
  552. return add_new_kprobe(ap, p);
  553. }
  554. /* Try to disable aggr_kprobe, and return 1 if succeeded.*/
  555. static int __kprobes try_to_disable_aggr_kprobe(struct kprobe *p)
  556. {
  557. struct kprobe *kp;
  558. list_for_each_entry_rcu(kp, &p->list, list) {
  559. if (!kprobe_disabled(kp))
  560. /*
  561. * There is an active probe on the list.
  562. * We can't disable aggr_kprobe.
  563. */
  564. return 0;
  565. }
  566. p->flags |= KPROBE_FLAG_DISABLED;
  567. return 1;
  568. }
  569. static int __kprobes in_kprobes_functions(unsigned long addr)
  570. {
  571. struct kprobe_blackpoint *kb;
  572. if (addr >= (unsigned long)__kprobes_text_start &&
  573. addr < (unsigned long)__kprobes_text_end)
  574. return -EINVAL;
  575. /*
  576. * If there exists a kprobe_blacklist, verify and
  577. * fail any probe registration in the prohibited area
  578. */
  579. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  580. if (kb->start_addr) {
  581. if (addr >= kb->start_addr &&
  582. addr < (kb->start_addr + kb->range))
  583. return -EINVAL;
  584. }
  585. }
  586. return 0;
  587. }
  588. /*
  589. * If we have a symbol_name argument, look it up and add the offset field
  590. * to it. This way, we can specify a relative address to a symbol.
  591. */
  592. static kprobe_opcode_t __kprobes *kprobe_addr(struct kprobe *p)
  593. {
  594. kprobe_opcode_t *addr = p->addr;
  595. if (p->symbol_name) {
  596. if (addr)
  597. return NULL;
  598. kprobe_lookup_name(p->symbol_name, addr);
  599. }
  600. if (!addr)
  601. return NULL;
  602. return (kprobe_opcode_t *)(((char *)addr) + p->offset);
  603. }
  604. /* Check passed kprobe is valid and return kprobe in kprobe_table. */
  605. static struct kprobe * __kprobes __get_valid_kprobe(struct kprobe *p)
  606. {
  607. struct kprobe *old_p, *list_p;
  608. old_p = get_kprobe(p->addr);
  609. if (unlikely(!old_p))
  610. return NULL;
  611. if (p != old_p) {
  612. list_for_each_entry_rcu(list_p, &old_p->list, list)
  613. if (list_p == p)
  614. /* kprobe p is a valid probe */
  615. goto valid;
  616. return NULL;
  617. }
  618. valid:
  619. return old_p;
  620. }
  621. /* Return error if the kprobe is being re-registered */
  622. static inline int check_kprobe_rereg(struct kprobe *p)
  623. {
  624. int ret = 0;
  625. struct kprobe *old_p;
  626. mutex_lock(&kprobe_mutex);
  627. old_p = __get_valid_kprobe(p);
  628. if (old_p)
  629. ret = -EINVAL;
  630. mutex_unlock(&kprobe_mutex);
  631. return ret;
  632. }
  633. int __kprobes register_kprobe(struct kprobe *p)
  634. {
  635. int ret = 0;
  636. struct kprobe *old_p;
  637. struct module *probed_mod;
  638. kprobe_opcode_t *addr;
  639. addr = kprobe_addr(p);
  640. if (!addr)
  641. return -EINVAL;
  642. p->addr = addr;
  643. ret = check_kprobe_rereg(p);
  644. if (ret)
  645. return ret;
  646. preempt_disable();
  647. if (!kernel_text_address((unsigned long) p->addr) ||
  648. in_kprobes_functions((unsigned long) p->addr)) {
  649. preempt_enable();
  650. return -EINVAL;
  651. }
  652. /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
  653. p->flags &= KPROBE_FLAG_DISABLED;
  654. /*
  655. * Check if are we probing a module.
  656. */
  657. probed_mod = __module_text_address((unsigned long) p->addr);
  658. if (probed_mod) {
  659. /*
  660. * We must hold a refcount of the probed module while updating
  661. * its code to prohibit unexpected unloading.
  662. */
  663. if (unlikely(!try_module_get(probed_mod))) {
  664. preempt_enable();
  665. return -EINVAL;
  666. }
  667. /*
  668. * If the module freed .init.text, we couldn't insert
  669. * kprobes in there.
  670. */
  671. if (within_module_init((unsigned long)p->addr, probed_mod) &&
  672. probed_mod->state != MODULE_STATE_COMING) {
  673. module_put(probed_mod);
  674. preempt_enable();
  675. return -EINVAL;
  676. }
  677. }
  678. preempt_enable();
  679. p->nmissed = 0;
  680. INIT_LIST_HEAD(&p->list);
  681. mutex_lock(&kprobe_mutex);
  682. old_p = get_kprobe(p->addr);
  683. if (old_p) {
  684. ret = register_aggr_kprobe(old_p, p);
  685. goto out;
  686. }
  687. mutex_lock(&text_mutex);
  688. ret = arch_prepare_kprobe(p);
  689. if (ret)
  690. goto out_unlock_text;
  691. INIT_HLIST_NODE(&p->hlist);
  692. hlist_add_head_rcu(&p->hlist,
  693. &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
  694. if (!kprobes_all_disarmed && !kprobe_disabled(p))
  695. arch_arm_kprobe(p);
  696. out_unlock_text:
  697. mutex_unlock(&text_mutex);
  698. out:
  699. mutex_unlock(&kprobe_mutex);
  700. if (probed_mod)
  701. module_put(probed_mod);
  702. return ret;
  703. }
  704. EXPORT_SYMBOL_GPL(register_kprobe);
  705. /*
  706. * Unregister a kprobe without a scheduler synchronization.
  707. */
  708. static int __kprobes __unregister_kprobe_top(struct kprobe *p)
  709. {
  710. struct kprobe *old_p, *list_p;
  711. old_p = __get_valid_kprobe(p);
  712. if (old_p == NULL)
  713. return -EINVAL;
  714. if (old_p == p ||
  715. (old_p->pre_handler == aggr_pre_handler &&
  716. list_is_singular(&old_p->list))) {
  717. /*
  718. * Only probe on the hash list. Disarm only if kprobes are
  719. * enabled and not gone - otherwise, the breakpoint would
  720. * already have been removed. We save on flushing icache.
  721. */
  722. if (!kprobes_all_disarmed && !kprobe_disabled(old_p))
  723. disarm_kprobe(p);
  724. hlist_del_rcu(&old_p->hlist);
  725. } else {
  726. if (p->break_handler && !kprobe_gone(p))
  727. old_p->break_handler = NULL;
  728. if (p->post_handler && !kprobe_gone(p)) {
  729. list_for_each_entry_rcu(list_p, &old_p->list, list) {
  730. if ((list_p != p) && (list_p->post_handler))
  731. goto noclean;
  732. }
  733. old_p->post_handler = NULL;
  734. }
  735. noclean:
  736. list_del_rcu(&p->list);
  737. if (!kprobe_disabled(old_p)) {
  738. try_to_disable_aggr_kprobe(old_p);
  739. if (!kprobes_all_disarmed && kprobe_disabled(old_p))
  740. disarm_kprobe(old_p);
  741. }
  742. }
  743. return 0;
  744. }
  745. static void __kprobes __unregister_kprobe_bottom(struct kprobe *p)
  746. {
  747. struct kprobe *old_p;
  748. if (list_empty(&p->list))
  749. arch_remove_kprobe(p);
  750. else if (list_is_singular(&p->list)) {
  751. /* "p" is the last child of an aggr_kprobe */
  752. old_p = list_entry(p->list.next, struct kprobe, list);
  753. list_del(&p->list);
  754. arch_remove_kprobe(old_p);
  755. kfree(old_p);
  756. }
  757. }
  758. int __kprobes register_kprobes(struct kprobe **kps, int num)
  759. {
  760. int i, ret = 0;
  761. if (num <= 0)
  762. return -EINVAL;
  763. for (i = 0; i < num; i++) {
  764. ret = register_kprobe(kps[i]);
  765. if (ret < 0) {
  766. if (i > 0)
  767. unregister_kprobes(kps, i);
  768. break;
  769. }
  770. }
  771. return ret;
  772. }
  773. EXPORT_SYMBOL_GPL(register_kprobes);
  774. void __kprobes unregister_kprobe(struct kprobe *p)
  775. {
  776. unregister_kprobes(&p, 1);
  777. }
  778. EXPORT_SYMBOL_GPL(unregister_kprobe);
  779. void __kprobes unregister_kprobes(struct kprobe **kps, int num)
  780. {
  781. int i;
  782. if (num <= 0)
  783. return;
  784. mutex_lock(&kprobe_mutex);
  785. for (i = 0; i < num; i++)
  786. if (__unregister_kprobe_top(kps[i]) < 0)
  787. kps[i]->addr = NULL;
  788. mutex_unlock(&kprobe_mutex);
  789. synchronize_sched();
  790. for (i = 0; i < num; i++)
  791. if (kps[i]->addr)
  792. __unregister_kprobe_bottom(kps[i]);
  793. }
  794. EXPORT_SYMBOL_GPL(unregister_kprobes);
  795. static struct notifier_block kprobe_exceptions_nb = {
  796. .notifier_call = kprobe_exceptions_notify,
  797. .priority = 0x7fffffff /* we need to be notified first */
  798. };
  799. unsigned long __weak arch_deref_entry_point(void *entry)
  800. {
  801. return (unsigned long)entry;
  802. }
  803. int __kprobes register_jprobes(struct jprobe **jps, int num)
  804. {
  805. struct jprobe *jp;
  806. int ret = 0, i;
  807. if (num <= 0)
  808. return -EINVAL;
  809. for (i = 0; i < num; i++) {
  810. unsigned long addr;
  811. jp = jps[i];
  812. addr = arch_deref_entry_point(jp->entry);
  813. if (!kernel_text_address(addr))
  814. ret = -EINVAL;
  815. else {
  816. /* Todo: Verify probepoint is a function entry point */
  817. jp->kp.pre_handler = setjmp_pre_handler;
  818. jp->kp.break_handler = longjmp_break_handler;
  819. ret = register_kprobe(&jp->kp);
  820. }
  821. if (ret < 0) {
  822. if (i > 0)
  823. unregister_jprobes(jps, i);
  824. break;
  825. }
  826. }
  827. return ret;
  828. }
  829. EXPORT_SYMBOL_GPL(register_jprobes);
  830. int __kprobes register_jprobe(struct jprobe *jp)
  831. {
  832. return register_jprobes(&jp, 1);
  833. }
  834. EXPORT_SYMBOL_GPL(register_jprobe);
  835. void __kprobes unregister_jprobe(struct jprobe *jp)
  836. {
  837. unregister_jprobes(&jp, 1);
  838. }
  839. EXPORT_SYMBOL_GPL(unregister_jprobe);
  840. void __kprobes unregister_jprobes(struct jprobe **jps, int num)
  841. {
  842. int i;
  843. if (num <= 0)
  844. return;
  845. mutex_lock(&kprobe_mutex);
  846. for (i = 0; i < num; i++)
  847. if (__unregister_kprobe_top(&jps[i]->kp) < 0)
  848. jps[i]->kp.addr = NULL;
  849. mutex_unlock(&kprobe_mutex);
  850. synchronize_sched();
  851. for (i = 0; i < num; i++) {
  852. if (jps[i]->kp.addr)
  853. __unregister_kprobe_bottom(&jps[i]->kp);
  854. }
  855. }
  856. EXPORT_SYMBOL_GPL(unregister_jprobes);
  857. #ifdef CONFIG_KRETPROBES
  858. /*
  859. * This kprobe pre_handler is registered with every kretprobe. When probe
  860. * hits it will set up the return probe.
  861. */
  862. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  863. struct pt_regs *regs)
  864. {
  865. struct kretprobe *rp = container_of(p, struct kretprobe, kp);
  866. unsigned long hash, flags = 0;
  867. struct kretprobe_instance *ri;
  868. /*TODO: consider to only swap the RA after the last pre_handler fired */
  869. hash = hash_ptr(current, KPROBE_HASH_BITS);
  870. spin_lock_irqsave(&rp->lock, flags);
  871. if (!hlist_empty(&rp->free_instances)) {
  872. ri = hlist_entry(rp->free_instances.first,
  873. struct kretprobe_instance, hlist);
  874. hlist_del(&ri->hlist);
  875. spin_unlock_irqrestore(&rp->lock, flags);
  876. ri->rp = rp;
  877. ri->task = current;
  878. if (rp->entry_handler && rp->entry_handler(ri, regs))
  879. return 0;
  880. arch_prepare_kretprobe(ri, regs);
  881. /* XXX(hch): why is there no hlist_move_head? */
  882. INIT_HLIST_NODE(&ri->hlist);
  883. kretprobe_table_lock(hash, &flags);
  884. hlist_add_head(&ri->hlist, &kretprobe_inst_table[hash]);
  885. kretprobe_table_unlock(hash, &flags);
  886. } else {
  887. rp->nmissed++;
  888. spin_unlock_irqrestore(&rp->lock, flags);
  889. }
  890. return 0;
  891. }
  892. int __kprobes register_kretprobe(struct kretprobe *rp)
  893. {
  894. int ret = 0;
  895. struct kretprobe_instance *inst;
  896. int i;
  897. void *addr;
  898. if (kretprobe_blacklist_size) {
  899. addr = kprobe_addr(&rp->kp);
  900. if (!addr)
  901. return -EINVAL;
  902. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  903. if (kretprobe_blacklist[i].addr == addr)
  904. return -EINVAL;
  905. }
  906. }
  907. rp->kp.pre_handler = pre_handler_kretprobe;
  908. rp->kp.post_handler = NULL;
  909. rp->kp.fault_handler = NULL;
  910. rp->kp.break_handler = NULL;
  911. /* Pre-allocate memory for max kretprobe instances */
  912. if (rp->maxactive <= 0) {
  913. #ifdef CONFIG_PREEMPT
  914. rp->maxactive = max(10, 2 * NR_CPUS);
  915. #else
  916. rp->maxactive = NR_CPUS;
  917. #endif
  918. }
  919. spin_lock_init(&rp->lock);
  920. INIT_HLIST_HEAD(&rp->free_instances);
  921. for (i = 0; i < rp->maxactive; i++) {
  922. inst = kmalloc(sizeof(struct kretprobe_instance) +
  923. rp->data_size, GFP_KERNEL);
  924. if (inst == NULL) {
  925. free_rp_inst(rp);
  926. return -ENOMEM;
  927. }
  928. INIT_HLIST_NODE(&inst->hlist);
  929. hlist_add_head(&inst->hlist, &rp->free_instances);
  930. }
  931. rp->nmissed = 0;
  932. /* Establish function entry probe point */
  933. ret = register_kprobe(&rp->kp);
  934. if (ret != 0)
  935. free_rp_inst(rp);
  936. return ret;
  937. }
  938. EXPORT_SYMBOL_GPL(register_kretprobe);
  939. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  940. {
  941. int ret = 0, i;
  942. if (num <= 0)
  943. return -EINVAL;
  944. for (i = 0; i < num; i++) {
  945. ret = register_kretprobe(rps[i]);
  946. if (ret < 0) {
  947. if (i > 0)
  948. unregister_kretprobes(rps, i);
  949. break;
  950. }
  951. }
  952. return ret;
  953. }
  954. EXPORT_SYMBOL_GPL(register_kretprobes);
  955. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  956. {
  957. unregister_kretprobes(&rp, 1);
  958. }
  959. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  960. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  961. {
  962. int i;
  963. if (num <= 0)
  964. return;
  965. mutex_lock(&kprobe_mutex);
  966. for (i = 0; i < num; i++)
  967. if (__unregister_kprobe_top(&rps[i]->kp) < 0)
  968. rps[i]->kp.addr = NULL;
  969. mutex_unlock(&kprobe_mutex);
  970. synchronize_sched();
  971. for (i = 0; i < num; i++) {
  972. if (rps[i]->kp.addr) {
  973. __unregister_kprobe_bottom(&rps[i]->kp);
  974. cleanup_rp_inst(rps[i]);
  975. }
  976. }
  977. }
  978. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  979. #else /* CONFIG_KRETPROBES */
  980. int __kprobes register_kretprobe(struct kretprobe *rp)
  981. {
  982. return -ENOSYS;
  983. }
  984. EXPORT_SYMBOL_GPL(register_kretprobe);
  985. int __kprobes register_kretprobes(struct kretprobe **rps, int num)
  986. {
  987. return -ENOSYS;
  988. }
  989. EXPORT_SYMBOL_GPL(register_kretprobes);
  990. void __kprobes unregister_kretprobe(struct kretprobe *rp)
  991. {
  992. }
  993. EXPORT_SYMBOL_GPL(unregister_kretprobe);
  994. void __kprobes unregister_kretprobes(struct kretprobe **rps, int num)
  995. {
  996. }
  997. EXPORT_SYMBOL_GPL(unregister_kretprobes);
  998. static int __kprobes pre_handler_kretprobe(struct kprobe *p,
  999. struct pt_regs *regs)
  1000. {
  1001. return 0;
  1002. }
  1003. #endif /* CONFIG_KRETPROBES */
  1004. /* Set the kprobe gone and remove its instruction buffer. */
  1005. static void __kprobes kill_kprobe(struct kprobe *p)
  1006. {
  1007. struct kprobe *kp;
  1008. p->flags |= KPROBE_FLAG_GONE;
  1009. if (p->pre_handler == aggr_pre_handler) {
  1010. /*
  1011. * If this is an aggr_kprobe, we have to list all the
  1012. * chained probes and mark them GONE.
  1013. */
  1014. list_for_each_entry_rcu(kp, &p->list, list)
  1015. kp->flags |= KPROBE_FLAG_GONE;
  1016. p->post_handler = NULL;
  1017. p->break_handler = NULL;
  1018. }
  1019. /*
  1020. * Here, we can remove insn_slot safely, because no thread calls
  1021. * the original probed function (which will be freed soon) any more.
  1022. */
  1023. arch_remove_kprobe(p);
  1024. }
  1025. void __kprobes dump_kprobe(struct kprobe *kp)
  1026. {
  1027. printk(KERN_WARNING "Dumping kprobe:\n");
  1028. printk(KERN_WARNING "Name: %s\nAddress: %p\nOffset: %x\n",
  1029. kp->symbol_name, kp->addr, kp->offset);
  1030. }
  1031. /* Module notifier call back, checking kprobes on the module */
  1032. static int __kprobes kprobes_module_callback(struct notifier_block *nb,
  1033. unsigned long val, void *data)
  1034. {
  1035. struct module *mod = data;
  1036. struct hlist_head *head;
  1037. struct hlist_node *node;
  1038. struct kprobe *p;
  1039. unsigned int i;
  1040. int checkcore = (val == MODULE_STATE_GOING);
  1041. if (val != MODULE_STATE_GOING && val != MODULE_STATE_LIVE)
  1042. return NOTIFY_DONE;
  1043. /*
  1044. * When MODULE_STATE_GOING was notified, both of module .text and
  1045. * .init.text sections would be freed. When MODULE_STATE_LIVE was
  1046. * notified, only .init.text section would be freed. We need to
  1047. * disable kprobes which have been inserted in the sections.
  1048. */
  1049. mutex_lock(&kprobe_mutex);
  1050. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1051. head = &kprobe_table[i];
  1052. hlist_for_each_entry_rcu(p, node, head, hlist)
  1053. if (within_module_init((unsigned long)p->addr, mod) ||
  1054. (checkcore &&
  1055. within_module_core((unsigned long)p->addr, mod))) {
  1056. /*
  1057. * The vaddr this probe is installed will soon
  1058. * be vfreed buy not synced to disk. Hence,
  1059. * disarming the breakpoint isn't needed.
  1060. */
  1061. kill_kprobe(p);
  1062. }
  1063. }
  1064. mutex_unlock(&kprobe_mutex);
  1065. return NOTIFY_DONE;
  1066. }
  1067. static struct notifier_block kprobe_module_nb = {
  1068. .notifier_call = kprobes_module_callback,
  1069. .priority = 0
  1070. };
  1071. static int __init init_kprobes(void)
  1072. {
  1073. int i, err = 0;
  1074. unsigned long offset = 0, size = 0;
  1075. char *modname, namebuf[128];
  1076. const char *symbol_name;
  1077. void *addr;
  1078. struct kprobe_blackpoint *kb;
  1079. /* FIXME allocate the probe table, currently defined statically */
  1080. /* initialize all list heads */
  1081. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1082. INIT_HLIST_HEAD(&kprobe_table[i]);
  1083. INIT_HLIST_HEAD(&kretprobe_inst_table[i]);
  1084. spin_lock_init(&(kretprobe_table_locks[i].lock));
  1085. }
  1086. /*
  1087. * Lookup and populate the kprobe_blacklist.
  1088. *
  1089. * Unlike the kretprobe blacklist, we'll need to determine
  1090. * the range of addresses that belong to the said functions,
  1091. * since a kprobe need not necessarily be at the beginning
  1092. * of a function.
  1093. */
  1094. for (kb = kprobe_blacklist; kb->name != NULL; kb++) {
  1095. kprobe_lookup_name(kb->name, addr);
  1096. if (!addr)
  1097. continue;
  1098. kb->start_addr = (unsigned long)addr;
  1099. symbol_name = kallsyms_lookup(kb->start_addr,
  1100. &size, &offset, &modname, namebuf);
  1101. if (!symbol_name)
  1102. kb->range = 0;
  1103. else
  1104. kb->range = size;
  1105. }
  1106. if (kretprobe_blacklist_size) {
  1107. /* lookup the function address from its name */
  1108. for (i = 0; kretprobe_blacklist[i].name != NULL; i++) {
  1109. kprobe_lookup_name(kretprobe_blacklist[i].name,
  1110. kretprobe_blacklist[i].addr);
  1111. if (!kretprobe_blacklist[i].addr)
  1112. printk("kretprobe: lookup failed: %s\n",
  1113. kretprobe_blacklist[i].name);
  1114. }
  1115. }
  1116. /* By default, kprobes are armed */
  1117. kprobes_all_disarmed = false;
  1118. err = arch_init_kprobes();
  1119. if (!err)
  1120. err = register_die_notifier(&kprobe_exceptions_nb);
  1121. if (!err)
  1122. err = register_module_notifier(&kprobe_module_nb);
  1123. kprobes_initialized = (err == 0);
  1124. if (!err)
  1125. init_test_probes();
  1126. return err;
  1127. }
  1128. #ifdef CONFIG_DEBUG_FS
  1129. static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
  1130. const char *sym, int offset,char *modname)
  1131. {
  1132. char *kprobe_type;
  1133. if (p->pre_handler == pre_handler_kretprobe)
  1134. kprobe_type = "r";
  1135. else if (p->pre_handler == setjmp_pre_handler)
  1136. kprobe_type = "j";
  1137. else
  1138. kprobe_type = "k";
  1139. if (sym)
  1140. seq_printf(pi, "%p %s %s+0x%x %s %s%s\n",
  1141. p->addr, kprobe_type, sym, offset,
  1142. (modname ? modname : " "),
  1143. (kprobe_gone(p) ? "[GONE]" : ""),
  1144. ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1145. "[DISABLED]" : ""));
  1146. else
  1147. seq_printf(pi, "%p %s %p %s%s\n",
  1148. p->addr, kprobe_type, p->addr,
  1149. (kprobe_gone(p) ? "[GONE]" : ""),
  1150. ((kprobe_disabled(p) && !kprobe_gone(p)) ?
  1151. "[DISABLED]" : ""));
  1152. }
  1153. static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
  1154. {
  1155. return (*pos < KPROBE_TABLE_SIZE) ? pos : NULL;
  1156. }
  1157. static void __kprobes *kprobe_seq_next(struct seq_file *f, void *v, loff_t *pos)
  1158. {
  1159. (*pos)++;
  1160. if (*pos >= KPROBE_TABLE_SIZE)
  1161. return NULL;
  1162. return pos;
  1163. }
  1164. static void __kprobes kprobe_seq_stop(struct seq_file *f, void *v)
  1165. {
  1166. /* Nothing to do */
  1167. }
  1168. static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
  1169. {
  1170. struct hlist_head *head;
  1171. struct hlist_node *node;
  1172. struct kprobe *p, *kp;
  1173. const char *sym = NULL;
  1174. unsigned int i = *(loff_t *) v;
  1175. unsigned long offset = 0;
  1176. char *modname, namebuf[128];
  1177. head = &kprobe_table[i];
  1178. preempt_disable();
  1179. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1180. sym = kallsyms_lookup((unsigned long)p->addr, NULL,
  1181. &offset, &modname, namebuf);
  1182. if (p->pre_handler == aggr_pre_handler) {
  1183. list_for_each_entry_rcu(kp, &p->list, list)
  1184. report_probe(pi, kp, sym, offset, modname);
  1185. } else
  1186. report_probe(pi, p, sym, offset, modname);
  1187. }
  1188. preempt_enable();
  1189. return 0;
  1190. }
  1191. static const struct seq_operations kprobes_seq_ops = {
  1192. .start = kprobe_seq_start,
  1193. .next = kprobe_seq_next,
  1194. .stop = kprobe_seq_stop,
  1195. .show = show_kprobe_addr
  1196. };
  1197. static int __kprobes kprobes_open(struct inode *inode, struct file *filp)
  1198. {
  1199. return seq_open(filp, &kprobes_seq_ops);
  1200. }
  1201. static const struct file_operations debugfs_kprobes_operations = {
  1202. .open = kprobes_open,
  1203. .read = seq_read,
  1204. .llseek = seq_lseek,
  1205. .release = seq_release,
  1206. };
  1207. /* Disable one kprobe */
  1208. int __kprobes disable_kprobe(struct kprobe *kp)
  1209. {
  1210. int ret = 0;
  1211. struct kprobe *p;
  1212. mutex_lock(&kprobe_mutex);
  1213. /* Check whether specified probe is valid. */
  1214. p = __get_valid_kprobe(kp);
  1215. if (unlikely(p == NULL)) {
  1216. ret = -EINVAL;
  1217. goto out;
  1218. }
  1219. /* If the probe is already disabled (or gone), just return */
  1220. if (kprobe_disabled(kp))
  1221. goto out;
  1222. kp->flags |= KPROBE_FLAG_DISABLED;
  1223. if (p != kp)
  1224. /* When kp != p, p is always enabled. */
  1225. try_to_disable_aggr_kprobe(p);
  1226. if (!kprobes_all_disarmed && kprobe_disabled(p))
  1227. disarm_kprobe(p);
  1228. out:
  1229. mutex_unlock(&kprobe_mutex);
  1230. return ret;
  1231. }
  1232. EXPORT_SYMBOL_GPL(disable_kprobe);
  1233. /* Enable one kprobe */
  1234. int __kprobes enable_kprobe(struct kprobe *kp)
  1235. {
  1236. int ret = 0;
  1237. struct kprobe *p;
  1238. mutex_lock(&kprobe_mutex);
  1239. /* Check whether specified probe is valid. */
  1240. p = __get_valid_kprobe(kp);
  1241. if (unlikely(p == NULL)) {
  1242. ret = -EINVAL;
  1243. goto out;
  1244. }
  1245. if (kprobe_gone(kp)) {
  1246. /* This kprobe has gone, we couldn't enable it. */
  1247. ret = -EINVAL;
  1248. goto out;
  1249. }
  1250. if (!kprobes_all_disarmed && kprobe_disabled(p))
  1251. arm_kprobe(p);
  1252. p->flags &= ~KPROBE_FLAG_DISABLED;
  1253. if (p != kp)
  1254. kp->flags &= ~KPROBE_FLAG_DISABLED;
  1255. out:
  1256. mutex_unlock(&kprobe_mutex);
  1257. return ret;
  1258. }
  1259. EXPORT_SYMBOL_GPL(enable_kprobe);
  1260. static void __kprobes arm_all_kprobes(void)
  1261. {
  1262. struct hlist_head *head;
  1263. struct hlist_node *node;
  1264. struct kprobe *p;
  1265. unsigned int i;
  1266. mutex_lock(&kprobe_mutex);
  1267. /* If kprobes are armed, just return */
  1268. if (!kprobes_all_disarmed)
  1269. goto already_enabled;
  1270. mutex_lock(&text_mutex);
  1271. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1272. head = &kprobe_table[i];
  1273. hlist_for_each_entry_rcu(p, node, head, hlist)
  1274. if (!kprobe_disabled(p))
  1275. arch_arm_kprobe(p);
  1276. }
  1277. mutex_unlock(&text_mutex);
  1278. kprobes_all_disarmed = false;
  1279. printk(KERN_INFO "Kprobes globally enabled\n");
  1280. already_enabled:
  1281. mutex_unlock(&kprobe_mutex);
  1282. return;
  1283. }
  1284. static void __kprobes disarm_all_kprobes(void)
  1285. {
  1286. struct hlist_head *head;
  1287. struct hlist_node *node;
  1288. struct kprobe *p;
  1289. unsigned int i;
  1290. mutex_lock(&kprobe_mutex);
  1291. /* If kprobes are already disarmed, just return */
  1292. if (kprobes_all_disarmed)
  1293. goto already_disabled;
  1294. kprobes_all_disarmed = true;
  1295. printk(KERN_INFO "Kprobes globally disabled\n");
  1296. mutex_lock(&text_mutex);
  1297. for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
  1298. head = &kprobe_table[i];
  1299. hlist_for_each_entry_rcu(p, node, head, hlist) {
  1300. if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
  1301. arch_disarm_kprobe(p);
  1302. }
  1303. }
  1304. mutex_unlock(&text_mutex);
  1305. mutex_unlock(&kprobe_mutex);
  1306. /* Allow all currently running kprobes to complete */
  1307. synchronize_sched();
  1308. return;
  1309. already_disabled:
  1310. mutex_unlock(&kprobe_mutex);
  1311. return;
  1312. }
  1313. /*
  1314. * XXX: The debugfs bool file interface doesn't allow for callbacks
  1315. * when the bool state is switched. We can reuse that facility when
  1316. * available
  1317. */
  1318. static ssize_t read_enabled_file_bool(struct file *file,
  1319. char __user *user_buf, size_t count, loff_t *ppos)
  1320. {
  1321. char buf[3];
  1322. if (!kprobes_all_disarmed)
  1323. buf[0] = '1';
  1324. else
  1325. buf[0] = '0';
  1326. buf[1] = '\n';
  1327. buf[2] = 0x00;
  1328. return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
  1329. }
  1330. static ssize_t write_enabled_file_bool(struct file *file,
  1331. const char __user *user_buf, size_t count, loff_t *ppos)
  1332. {
  1333. char buf[32];
  1334. int buf_size;
  1335. buf_size = min(count, (sizeof(buf)-1));
  1336. if (copy_from_user(buf, user_buf, buf_size))
  1337. return -EFAULT;
  1338. switch (buf[0]) {
  1339. case 'y':
  1340. case 'Y':
  1341. case '1':
  1342. arm_all_kprobes();
  1343. break;
  1344. case 'n':
  1345. case 'N':
  1346. case '0':
  1347. disarm_all_kprobes();
  1348. break;
  1349. }
  1350. return count;
  1351. }
  1352. static const struct file_operations fops_kp = {
  1353. .read = read_enabled_file_bool,
  1354. .write = write_enabled_file_bool,
  1355. };
  1356. static int __kprobes debugfs_kprobe_init(void)
  1357. {
  1358. struct dentry *dir, *file;
  1359. unsigned int value = 1;
  1360. dir = debugfs_create_dir("kprobes", NULL);
  1361. if (!dir)
  1362. return -ENOMEM;
  1363. file = debugfs_create_file("list", 0444, dir, NULL,
  1364. &debugfs_kprobes_operations);
  1365. if (!file) {
  1366. debugfs_remove(dir);
  1367. return -ENOMEM;
  1368. }
  1369. file = debugfs_create_file("enabled", 0600, dir,
  1370. &value, &fops_kp);
  1371. if (!file) {
  1372. debugfs_remove(dir);
  1373. return -ENOMEM;
  1374. }
  1375. return 0;
  1376. }
  1377. late_initcall(debugfs_kprobe_init);
  1378. #endif /* CONFIG_DEBUG_FS */
  1379. module_init(init_kprobes);
  1380. /* defined in arch/.../kernel/kprobes.c */
  1381. EXPORT_SYMBOL_GPL(jprobe_return);