uprobes.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634
  1. /*
  2. * User-space Probes (UProbes)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  17. *
  18. * Copyright (C) IBM Corporation, 2008-2012
  19. * Authors:
  20. * Srikar Dronamraju
  21. * Jim Keniston
  22. * Copyright (C) 2011-2012 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  23. */
  24. #include <linux/kernel.h>
  25. #include <linux/highmem.h>
  26. #include <linux/pagemap.h> /* read_mapping_page */
  27. #include <linux/slab.h>
  28. #include <linux/sched.h>
  29. #include <linux/export.h>
  30. #include <linux/rmap.h> /* anon_vma_prepare */
  31. #include <linux/mmu_notifier.h> /* set_pte_at_notify */
  32. #include <linux/swap.h> /* try_to_free_swap */
  33. #include <linux/ptrace.h> /* user_enable_single_step */
  34. #include <linux/kdebug.h> /* notifier mechanism */
  35. #include "../../mm/internal.h" /* munlock_vma_page */
  36. #include <linux/percpu-rwsem.h>
  37. #include <linux/uprobes.h>
  38. #define UINSNS_PER_PAGE (PAGE_SIZE/UPROBE_XOL_SLOT_BYTES)
  39. #define MAX_UPROBE_XOL_SLOTS UINSNS_PER_PAGE
  40. static struct rb_root uprobes_tree = RB_ROOT;
  41. /*
  42. * allows us to skip the uprobe_mmap if there are no uprobe events active
  43. * at this time. Probably a fine grained per inode count is better?
  44. */
  45. #define no_uprobe_events() RB_EMPTY_ROOT(&uprobes_tree)
  46. static DEFINE_SPINLOCK(uprobes_treelock); /* serialize rbtree access */
  47. #define UPROBES_HASH_SZ 13
  48. /* serialize uprobe->pending_list */
  49. static struct mutex uprobes_mmap_mutex[UPROBES_HASH_SZ];
  50. #define uprobes_mmap_hash(v) (&uprobes_mmap_mutex[((unsigned long)(v)) % UPROBES_HASH_SZ])
  51. static struct percpu_rw_semaphore dup_mmap_sem;
  52. /* Have a copy of original instruction */
  53. #define UPROBE_COPY_INSN 0
  54. /* Can skip singlestep */
  55. #define UPROBE_SKIP_SSTEP 1
  56. struct uprobe {
  57. struct rb_node rb_node; /* node in the rb tree */
  58. atomic_t ref;
  59. struct rw_semaphore register_rwsem;
  60. struct rw_semaphore consumer_rwsem;
  61. struct list_head pending_list;
  62. struct uprobe_consumer *consumers;
  63. struct inode *inode; /* Also hold a ref to inode */
  64. loff_t offset;
  65. unsigned long flags;
  66. struct arch_uprobe arch;
  67. };
  68. /*
  69. * valid_vma: Verify if the specified vma is an executable vma
  70. * Relax restrictions while unregistering: vm_flags might have
  71. * changed after breakpoint was inserted.
  72. * - is_register: indicates if we are in register context.
  73. * - Return 1 if the specified virtual address is in an
  74. * executable vma.
  75. */
  76. static bool valid_vma(struct vm_area_struct *vma, bool is_register)
  77. {
  78. vm_flags_t flags = VM_HUGETLB | VM_MAYEXEC | VM_SHARED;
  79. if (is_register)
  80. flags |= VM_WRITE;
  81. return vma->vm_file && (vma->vm_flags & flags) == VM_MAYEXEC;
  82. }
  83. static unsigned long offset_to_vaddr(struct vm_area_struct *vma, loff_t offset)
  84. {
  85. return vma->vm_start + offset - ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  86. }
  87. static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
  88. {
  89. return ((loff_t)vma->vm_pgoff << PAGE_SHIFT) + (vaddr - vma->vm_start);
  90. }
  91. /**
  92. * __replace_page - replace page in vma by new page.
  93. * based on replace_page in mm/ksm.c
  94. *
  95. * @vma: vma that holds the pte pointing to page
  96. * @addr: address the old @page is mapped at
  97. * @page: the cowed page we are replacing by kpage
  98. * @kpage: the modified page we replace page by
  99. *
  100. * Returns 0 on success, -EFAULT on failure.
  101. */
  102. static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
  103. struct page *page, struct page *kpage)
  104. {
  105. struct mm_struct *mm = vma->vm_mm;
  106. spinlock_t *ptl;
  107. pte_t *ptep;
  108. int err;
  109. /* For mmu_notifiers */
  110. const unsigned long mmun_start = addr;
  111. const unsigned long mmun_end = addr + PAGE_SIZE;
  112. /* For try_to_free_swap() and munlock_vma_page() below */
  113. lock_page(page);
  114. mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
  115. err = -EAGAIN;
  116. ptep = page_check_address(page, mm, addr, &ptl, 0);
  117. if (!ptep)
  118. goto unlock;
  119. get_page(kpage);
  120. page_add_new_anon_rmap(kpage, vma, addr);
  121. if (!PageAnon(page)) {
  122. dec_mm_counter(mm, MM_FILEPAGES);
  123. inc_mm_counter(mm, MM_ANONPAGES);
  124. }
  125. flush_cache_page(vma, addr, pte_pfn(*ptep));
  126. ptep_clear_flush(vma, addr, ptep);
  127. set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot));
  128. page_remove_rmap(page);
  129. if (!page_mapped(page))
  130. try_to_free_swap(page);
  131. pte_unmap_unlock(ptep, ptl);
  132. if (vma->vm_flags & VM_LOCKED)
  133. munlock_vma_page(page);
  134. put_page(page);
  135. err = 0;
  136. unlock:
  137. mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
  138. unlock_page(page);
  139. return err;
  140. }
  141. /**
  142. * is_swbp_insn - check if instruction is breakpoint instruction.
  143. * @insn: instruction to be checked.
  144. * Default implementation of is_swbp_insn
  145. * Returns true if @insn is a breakpoint instruction.
  146. */
  147. bool __weak is_swbp_insn(uprobe_opcode_t *insn)
  148. {
  149. return *insn == UPROBE_SWBP_INSN;
  150. }
  151. static void copy_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *opcode)
  152. {
  153. void *kaddr = kmap_atomic(page);
  154. memcpy(opcode, kaddr + (vaddr & ~PAGE_MASK), UPROBE_SWBP_INSN_SIZE);
  155. kunmap_atomic(kaddr);
  156. }
  157. static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode)
  158. {
  159. uprobe_opcode_t old_opcode;
  160. bool is_swbp;
  161. copy_opcode(page, vaddr, &old_opcode);
  162. is_swbp = is_swbp_insn(&old_opcode);
  163. if (is_swbp_insn(new_opcode)) {
  164. if (is_swbp) /* register: already installed? */
  165. return 0;
  166. } else {
  167. if (!is_swbp) /* unregister: was it changed by us? */
  168. return 0;
  169. }
  170. return 1;
  171. }
  172. /*
  173. * NOTE:
  174. * Expect the breakpoint instruction to be the smallest size instruction for
  175. * the architecture. If an arch has variable length instruction and the
  176. * breakpoint instruction is not of the smallest length instruction
  177. * supported by that architecture then we need to modify is_swbp_at_addr and
  178. * write_opcode accordingly. This would never be a problem for archs that
  179. * have fixed length instructions.
  180. */
  181. /*
  182. * write_opcode - write the opcode at a given virtual address.
  183. * @mm: the probed process address space.
  184. * @vaddr: the virtual address to store the opcode.
  185. * @opcode: opcode to be written at @vaddr.
  186. *
  187. * Called with mm->mmap_sem held (for read and with a reference to
  188. * mm).
  189. *
  190. * For mm @mm, write the opcode at @vaddr.
  191. * Return 0 (success) or a negative errno.
  192. */
  193. static int write_opcode(struct mm_struct *mm, unsigned long vaddr,
  194. uprobe_opcode_t opcode)
  195. {
  196. struct page *old_page, *new_page;
  197. void *vaddr_old, *vaddr_new;
  198. struct vm_area_struct *vma;
  199. int ret;
  200. retry:
  201. /* Read the page with vaddr into memory */
  202. ret = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &old_page, &vma);
  203. if (ret <= 0)
  204. return ret;
  205. ret = verify_opcode(old_page, vaddr, &opcode);
  206. if (ret <= 0)
  207. goto put_old;
  208. ret = -ENOMEM;
  209. new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
  210. if (!new_page)
  211. goto put_old;
  212. __SetPageUptodate(new_page);
  213. /* copy the page now that we've got it stable */
  214. vaddr_old = kmap_atomic(old_page);
  215. vaddr_new = kmap_atomic(new_page);
  216. memcpy(vaddr_new, vaddr_old, PAGE_SIZE);
  217. memcpy(vaddr_new + (vaddr & ~PAGE_MASK), &opcode, UPROBE_SWBP_INSN_SIZE);
  218. kunmap_atomic(vaddr_new);
  219. kunmap_atomic(vaddr_old);
  220. ret = anon_vma_prepare(vma);
  221. if (ret)
  222. goto put_new;
  223. ret = __replace_page(vma, vaddr, old_page, new_page);
  224. put_new:
  225. page_cache_release(new_page);
  226. put_old:
  227. put_page(old_page);
  228. if (unlikely(ret == -EAGAIN))
  229. goto retry;
  230. return ret;
  231. }
  232. /**
  233. * set_swbp - store breakpoint at a given address.
  234. * @auprobe: arch specific probepoint information.
  235. * @mm: the probed process address space.
  236. * @vaddr: the virtual address to insert the opcode.
  237. *
  238. * For mm @mm, store the breakpoint instruction at @vaddr.
  239. * Return 0 (success) or a negative errno.
  240. */
  241. int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
  242. {
  243. return write_opcode(mm, vaddr, UPROBE_SWBP_INSN);
  244. }
  245. /**
  246. * set_orig_insn - Restore the original instruction.
  247. * @mm: the probed process address space.
  248. * @auprobe: arch specific probepoint information.
  249. * @vaddr: the virtual address to insert the opcode.
  250. *
  251. * For mm @mm, restore the original opcode (opcode) at @vaddr.
  252. * Return 0 (success) or a negative errno.
  253. */
  254. int __weak
  255. set_orig_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr)
  256. {
  257. return write_opcode(mm, vaddr, *(uprobe_opcode_t *)auprobe->insn);
  258. }
  259. static int match_uprobe(struct uprobe *l, struct uprobe *r)
  260. {
  261. if (l->inode < r->inode)
  262. return -1;
  263. if (l->inode > r->inode)
  264. return 1;
  265. if (l->offset < r->offset)
  266. return -1;
  267. if (l->offset > r->offset)
  268. return 1;
  269. return 0;
  270. }
  271. static struct uprobe *__find_uprobe(struct inode *inode, loff_t offset)
  272. {
  273. struct uprobe u = { .inode = inode, .offset = offset };
  274. struct rb_node *n = uprobes_tree.rb_node;
  275. struct uprobe *uprobe;
  276. int match;
  277. while (n) {
  278. uprobe = rb_entry(n, struct uprobe, rb_node);
  279. match = match_uprobe(&u, uprobe);
  280. if (!match) {
  281. atomic_inc(&uprobe->ref);
  282. return uprobe;
  283. }
  284. if (match < 0)
  285. n = n->rb_left;
  286. else
  287. n = n->rb_right;
  288. }
  289. return NULL;
  290. }
  291. /*
  292. * Find a uprobe corresponding to a given inode:offset
  293. * Acquires uprobes_treelock
  294. */
  295. static struct uprobe *find_uprobe(struct inode *inode, loff_t offset)
  296. {
  297. struct uprobe *uprobe;
  298. spin_lock(&uprobes_treelock);
  299. uprobe = __find_uprobe(inode, offset);
  300. spin_unlock(&uprobes_treelock);
  301. return uprobe;
  302. }
  303. static struct uprobe *__insert_uprobe(struct uprobe *uprobe)
  304. {
  305. struct rb_node **p = &uprobes_tree.rb_node;
  306. struct rb_node *parent = NULL;
  307. struct uprobe *u;
  308. int match;
  309. while (*p) {
  310. parent = *p;
  311. u = rb_entry(parent, struct uprobe, rb_node);
  312. match = match_uprobe(uprobe, u);
  313. if (!match) {
  314. atomic_inc(&u->ref);
  315. return u;
  316. }
  317. if (match < 0)
  318. p = &parent->rb_left;
  319. else
  320. p = &parent->rb_right;
  321. }
  322. u = NULL;
  323. rb_link_node(&uprobe->rb_node, parent, p);
  324. rb_insert_color(&uprobe->rb_node, &uprobes_tree);
  325. /* get access + creation ref */
  326. atomic_set(&uprobe->ref, 2);
  327. return u;
  328. }
  329. /*
  330. * Acquire uprobes_treelock.
  331. * Matching uprobe already exists in rbtree;
  332. * increment (access refcount) and return the matching uprobe.
  333. *
  334. * No matching uprobe; insert the uprobe in rb_tree;
  335. * get a double refcount (access + creation) and return NULL.
  336. */
  337. static struct uprobe *insert_uprobe(struct uprobe *uprobe)
  338. {
  339. struct uprobe *u;
  340. spin_lock(&uprobes_treelock);
  341. u = __insert_uprobe(uprobe);
  342. spin_unlock(&uprobes_treelock);
  343. return u;
  344. }
  345. static void put_uprobe(struct uprobe *uprobe)
  346. {
  347. if (atomic_dec_and_test(&uprobe->ref))
  348. kfree(uprobe);
  349. }
  350. static struct uprobe *alloc_uprobe(struct inode *inode, loff_t offset)
  351. {
  352. struct uprobe *uprobe, *cur_uprobe;
  353. uprobe = kzalloc(sizeof(struct uprobe), GFP_KERNEL);
  354. if (!uprobe)
  355. return NULL;
  356. uprobe->inode = igrab(inode);
  357. uprobe->offset = offset;
  358. init_rwsem(&uprobe->register_rwsem);
  359. init_rwsem(&uprobe->consumer_rwsem);
  360. /* For now assume that the instruction need not be single-stepped */
  361. __set_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
  362. /* add to uprobes_tree, sorted on inode:offset */
  363. cur_uprobe = insert_uprobe(uprobe);
  364. /* a uprobe exists for this inode:offset combination */
  365. if (cur_uprobe) {
  366. kfree(uprobe);
  367. uprobe = cur_uprobe;
  368. iput(inode);
  369. }
  370. return uprobe;
  371. }
  372. static void consumer_add(struct uprobe *uprobe, struct uprobe_consumer *uc)
  373. {
  374. down_write(&uprobe->consumer_rwsem);
  375. uc->next = uprobe->consumers;
  376. uprobe->consumers = uc;
  377. up_write(&uprobe->consumer_rwsem);
  378. }
  379. /*
  380. * For uprobe @uprobe, delete the consumer @uc.
  381. * Return true if the @uc is deleted successfully
  382. * or return false.
  383. */
  384. static bool consumer_del(struct uprobe *uprobe, struct uprobe_consumer *uc)
  385. {
  386. struct uprobe_consumer **con;
  387. bool ret = false;
  388. down_write(&uprobe->consumer_rwsem);
  389. for (con = &uprobe->consumers; *con; con = &(*con)->next) {
  390. if (*con == uc) {
  391. *con = uc->next;
  392. ret = true;
  393. break;
  394. }
  395. }
  396. up_write(&uprobe->consumer_rwsem);
  397. return ret;
  398. }
  399. static int
  400. __copy_insn(struct address_space *mapping, struct file *filp, char *insn,
  401. unsigned long nbytes, loff_t offset)
  402. {
  403. struct page *page;
  404. void *vaddr;
  405. unsigned long off;
  406. pgoff_t idx;
  407. if (!filp)
  408. return -EINVAL;
  409. if (!mapping->a_ops->readpage)
  410. return -EIO;
  411. idx = offset >> PAGE_CACHE_SHIFT;
  412. off = offset & ~PAGE_MASK;
  413. /*
  414. * Ensure that the page that has the original instruction is
  415. * populated and in page-cache.
  416. */
  417. page = read_mapping_page(mapping, idx, filp);
  418. if (IS_ERR(page))
  419. return PTR_ERR(page);
  420. vaddr = kmap_atomic(page);
  421. memcpy(insn, vaddr + off, nbytes);
  422. kunmap_atomic(vaddr);
  423. page_cache_release(page);
  424. return 0;
  425. }
  426. static int copy_insn(struct uprobe *uprobe, struct file *filp)
  427. {
  428. struct address_space *mapping;
  429. unsigned long nbytes;
  430. int bytes;
  431. nbytes = PAGE_SIZE - (uprobe->offset & ~PAGE_MASK);
  432. mapping = uprobe->inode->i_mapping;
  433. /* Instruction at end of binary; copy only available bytes */
  434. if (uprobe->offset + MAX_UINSN_BYTES > uprobe->inode->i_size)
  435. bytes = uprobe->inode->i_size - uprobe->offset;
  436. else
  437. bytes = MAX_UINSN_BYTES;
  438. /* Instruction at the page-boundary; copy bytes in second page */
  439. if (nbytes < bytes) {
  440. int err = __copy_insn(mapping, filp, uprobe->arch.insn + nbytes,
  441. bytes - nbytes, uprobe->offset + nbytes);
  442. if (err)
  443. return err;
  444. bytes = nbytes;
  445. }
  446. return __copy_insn(mapping, filp, uprobe->arch.insn, bytes, uprobe->offset);
  447. }
  448. static int prepare_uprobe(struct uprobe *uprobe, struct file *file,
  449. struct mm_struct *mm, unsigned long vaddr)
  450. {
  451. int ret = 0;
  452. if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
  453. return ret;
  454. /* TODO: move this into _register, until then we abuse this sem. */
  455. down_write(&uprobe->consumer_rwsem);
  456. if (test_bit(UPROBE_COPY_INSN, &uprobe->flags))
  457. goto out;
  458. ret = copy_insn(uprobe, file);
  459. if (ret)
  460. goto out;
  461. ret = -ENOTSUPP;
  462. if (is_swbp_insn((uprobe_opcode_t *)uprobe->arch.insn))
  463. goto out;
  464. ret = arch_uprobe_analyze_insn(&uprobe->arch, mm, vaddr);
  465. if (ret)
  466. goto out;
  467. /* write_opcode() assumes we don't cross page boundary */
  468. BUG_ON((uprobe->offset & ~PAGE_MASK) +
  469. UPROBE_SWBP_INSN_SIZE > PAGE_SIZE);
  470. smp_wmb(); /* pairs with rmb() in find_active_uprobe() */
  471. set_bit(UPROBE_COPY_INSN, &uprobe->flags);
  472. out:
  473. up_write(&uprobe->consumer_rwsem);
  474. return ret;
  475. }
  476. static inline bool consumer_filter(struct uprobe_consumer *uc,
  477. enum uprobe_filter_ctx ctx, struct mm_struct *mm)
  478. {
  479. return !uc->filter || uc->filter(uc, ctx, mm);
  480. }
  481. static bool filter_chain(struct uprobe *uprobe,
  482. enum uprobe_filter_ctx ctx, struct mm_struct *mm)
  483. {
  484. struct uprobe_consumer *uc;
  485. bool ret = false;
  486. down_read(&uprobe->consumer_rwsem);
  487. for (uc = uprobe->consumers; uc; uc = uc->next) {
  488. ret = consumer_filter(uc, ctx, mm);
  489. if (ret)
  490. break;
  491. }
  492. up_read(&uprobe->consumer_rwsem);
  493. return ret;
  494. }
  495. static int
  496. install_breakpoint(struct uprobe *uprobe, struct mm_struct *mm,
  497. struct vm_area_struct *vma, unsigned long vaddr)
  498. {
  499. bool first_uprobe;
  500. int ret;
  501. ret = prepare_uprobe(uprobe, vma->vm_file, mm, vaddr);
  502. if (ret)
  503. return ret;
  504. /*
  505. * set MMF_HAS_UPROBES in advance for uprobe_pre_sstep_notifier(),
  506. * the task can hit this breakpoint right after __replace_page().
  507. */
  508. first_uprobe = !test_bit(MMF_HAS_UPROBES, &mm->flags);
  509. if (first_uprobe)
  510. set_bit(MMF_HAS_UPROBES, &mm->flags);
  511. ret = set_swbp(&uprobe->arch, mm, vaddr);
  512. if (!ret)
  513. clear_bit(MMF_RECALC_UPROBES, &mm->flags);
  514. else if (first_uprobe)
  515. clear_bit(MMF_HAS_UPROBES, &mm->flags);
  516. return ret;
  517. }
  518. static int
  519. remove_breakpoint(struct uprobe *uprobe, struct mm_struct *mm, unsigned long vaddr)
  520. {
  521. set_bit(MMF_RECALC_UPROBES, &mm->flags);
  522. return set_orig_insn(&uprobe->arch, mm, vaddr);
  523. }
  524. static inline bool uprobe_is_active(struct uprobe *uprobe)
  525. {
  526. return !RB_EMPTY_NODE(&uprobe->rb_node);
  527. }
  528. /*
  529. * There could be threads that have already hit the breakpoint. They
  530. * will recheck the current insn and restart if find_uprobe() fails.
  531. * See find_active_uprobe().
  532. */
  533. static void delete_uprobe(struct uprobe *uprobe)
  534. {
  535. if (WARN_ON(!uprobe_is_active(uprobe)))
  536. return;
  537. spin_lock(&uprobes_treelock);
  538. rb_erase(&uprobe->rb_node, &uprobes_tree);
  539. spin_unlock(&uprobes_treelock);
  540. RB_CLEAR_NODE(&uprobe->rb_node); /* for uprobe_is_active() */
  541. iput(uprobe->inode);
  542. put_uprobe(uprobe);
  543. }
  544. struct map_info {
  545. struct map_info *next;
  546. struct mm_struct *mm;
  547. unsigned long vaddr;
  548. };
  549. static inline struct map_info *free_map_info(struct map_info *info)
  550. {
  551. struct map_info *next = info->next;
  552. kfree(info);
  553. return next;
  554. }
  555. static struct map_info *
  556. build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
  557. {
  558. unsigned long pgoff = offset >> PAGE_SHIFT;
  559. struct vm_area_struct *vma;
  560. struct map_info *curr = NULL;
  561. struct map_info *prev = NULL;
  562. struct map_info *info;
  563. int more = 0;
  564. again:
  565. mutex_lock(&mapping->i_mmap_mutex);
  566. vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
  567. if (!valid_vma(vma, is_register))
  568. continue;
  569. if (!prev && !more) {
  570. /*
  571. * Needs GFP_NOWAIT to avoid i_mmap_mutex recursion through
  572. * reclaim. This is optimistic, no harm done if it fails.
  573. */
  574. prev = kmalloc(sizeof(struct map_info),
  575. GFP_NOWAIT | __GFP_NOMEMALLOC | __GFP_NOWARN);
  576. if (prev)
  577. prev->next = NULL;
  578. }
  579. if (!prev) {
  580. more++;
  581. continue;
  582. }
  583. if (!atomic_inc_not_zero(&vma->vm_mm->mm_users))
  584. continue;
  585. info = prev;
  586. prev = prev->next;
  587. info->next = curr;
  588. curr = info;
  589. info->mm = vma->vm_mm;
  590. info->vaddr = offset_to_vaddr(vma, offset);
  591. }
  592. mutex_unlock(&mapping->i_mmap_mutex);
  593. if (!more)
  594. goto out;
  595. prev = curr;
  596. while (curr) {
  597. mmput(curr->mm);
  598. curr = curr->next;
  599. }
  600. do {
  601. info = kmalloc(sizeof(struct map_info), GFP_KERNEL);
  602. if (!info) {
  603. curr = ERR_PTR(-ENOMEM);
  604. goto out;
  605. }
  606. info->next = prev;
  607. prev = info;
  608. } while (--more);
  609. goto again;
  610. out:
  611. while (prev)
  612. prev = free_map_info(prev);
  613. return curr;
  614. }
  615. static int register_for_each_vma(struct uprobe *uprobe, bool is_register)
  616. {
  617. struct map_info *info;
  618. int err = 0;
  619. percpu_down_write(&dup_mmap_sem);
  620. info = build_map_info(uprobe->inode->i_mapping,
  621. uprobe->offset, is_register);
  622. if (IS_ERR(info)) {
  623. err = PTR_ERR(info);
  624. goto out;
  625. }
  626. while (info) {
  627. struct mm_struct *mm = info->mm;
  628. struct vm_area_struct *vma;
  629. if (err && is_register)
  630. goto free;
  631. down_write(&mm->mmap_sem);
  632. vma = find_vma(mm, info->vaddr);
  633. if (!vma || !valid_vma(vma, is_register) ||
  634. vma->vm_file->f_mapping->host != uprobe->inode)
  635. goto unlock;
  636. if (vma->vm_start > info->vaddr ||
  637. vaddr_to_offset(vma, info->vaddr) != uprobe->offset)
  638. goto unlock;
  639. if (is_register) {
  640. /* consult only the "caller", new consumer. */
  641. if (consumer_filter(uprobe->consumers,
  642. UPROBE_FILTER_REGISTER, mm))
  643. err = install_breakpoint(uprobe, mm, vma, info->vaddr);
  644. } else if (test_bit(MMF_HAS_UPROBES, &mm->flags)) {
  645. if (!filter_chain(uprobe,
  646. UPROBE_FILTER_UNREGISTER, mm))
  647. err |= remove_breakpoint(uprobe, mm, info->vaddr);
  648. }
  649. unlock:
  650. up_write(&mm->mmap_sem);
  651. free:
  652. mmput(mm);
  653. info = free_map_info(info);
  654. }
  655. out:
  656. percpu_up_write(&dup_mmap_sem);
  657. return err;
  658. }
  659. static int __uprobe_register(struct uprobe *uprobe, struct uprobe_consumer *uc)
  660. {
  661. consumer_add(uprobe, uc);
  662. return register_for_each_vma(uprobe, true);
  663. }
  664. static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *uc)
  665. {
  666. int err;
  667. if (!consumer_del(uprobe, uc)) /* WARN? */
  668. return;
  669. err = register_for_each_vma(uprobe, false);
  670. /* TODO : cant unregister? schedule a worker thread */
  671. if (!uprobe->consumers && !err)
  672. delete_uprobe(uprobe);
  673. }
  674. /*
  675. * uprobe_register - register a probe
  676. * @inode: the file in which the probe has to be placed.
  677. * @offset: offset from the start of the file.
  678. * @uc: information on howto handle the probe..
  679. *
  680. * Apart from the access refcount, uprobe_register() takes a creation
  681. * refcount (thro alloc_uprobe) if and only if this @uprobe is getting
  682. * inserted into the rbtree (i.e first consumer for a @inode:@offset
  683. * tuple). Creation refcount stops uprobe_unregister from freeing the
  684. * @uprobe even before the register operation is complete. Creation
  685. * refcount is released when the last @uc for the @uprobe
  686. * unregisters.
  687. *
  688. * Return errno if it cannot successully install probes
  689. * else return 0 (success)
  690. */
  691. int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
  692. {
  693. struct uprobe *uprobe;
  694. int ret;
  695. /* Racy, just to catch the obvious mistakes */
  696. if (offset > i_size_read(inode))
  697. return -EINVAL;
  698. retry:
  699. uprobe = alloc_uprobe(inode, offset);
  700. if (!uprobe)
  701. return -ENOMEM;
  702. /*
  703. * We can race with uprobe_unregister()->delete_uprobe().
  704. * Check uprobe_is_active() and retry if it is false.
  705. */
  706. down_write(&uprobe->register_rwsem);
  707. ret = -EAGAIN;
  708. if (likely(uprobe_is_active(uprobe))) {
  709. ret = __uprobe_register(uprobe, uc);
  710. if (ret)
  711. __uprobe_unregister(uprobe, uc);
  712. }
  713. up_write(&uprobe->register_rwsem);
  714. put_uprobe(uprobe);
  715. if (unlikely(ret == -EAGAIN))
  716. goto retry;
  717. return ret;
  718. }
  719. EXPORT_SYMBOL_GPL(uprobe_register);
  720. /*
  721. * uprobe_unregister - unregister a already registered probe.
  722. * @inode: the file in which the probe has to be removed.
  723. * @offset: offset from the start of the file.
  724. * @uc: identify which probe if multiple probes are colocated.
  725. */
  726. void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc)
  727. {
  728. struct uprobe *uprobe;
  729. uprobe = find_uprobe(inode, offset);
  730. if (!uprobe)
  731. return;
  732. down_write(&uprobe->register_rwsem);
  733. __uprobe_unregister(uprobe, uc);
  734. up_write(&uprobe->register_rwsem);
  735. put_uprobe(uprobe);
  736. }
  737. EXPORT_SYMBOL_GPL(uprobe_unregister);
  738. static int unapply_uprobe(struct uprobe *uprobe, struct mm_struct *mm)
  739. {
  740. struct vm_area_struct *vma;
  741. int err = 0;
  742. down_read(&mm->mmap_sem);
  743. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  744. unsigned long vaddr;
  745. loff_t offset;
  746. if (!valid_vma(vma, false) ||
  747. vma->vm_file->f_mapping->host != uprobe->inode)
  748. continue;
  749. offset = (loff_t)vma->vm_pgoff << PAGE_SHIFT;
  750. if (uprobe->offset < offset ||
  751. uprobe->offset >= offset + vma->vm_end - vma->vm_start)
  752. continue;
  753. vaddr = offset_to_vaddr(vma, uprobe->offset);
  754. err |= remove_breakpoint(uprobe, mm, vaddr);
  755. }
  756. up_read(&mm->mmap_sem);
  757. return err;
  758. }
  759. static struct rb_node *
  760. find_node_in_range(struct inode *inode, loff_t min, loff_t max)
  761. {
  762. struct rb_node *n = uprobes_tree.rb_node;
  763. while (n) {
  764. struct uprobe *u = rb_entry(n, struct uprobe, rb_node);
  765. if (inode < u->inode) {
  766. n = n->rb_left;
  767. } else if (inode > u->inode) {
  768. n = n->rb_right;
  769. } else {
  770. if (max < u->offset)
  771. n = n->rb_left;
  772. else if (min > u->offset)
  773. n = n->rb_right;
  774. else
  775. break;
  776. }
  777. }
  778. return n;
  779. }
  780. /*
  781. * For a given range in vma, build a list of probes that need to be inserted.
  782. */
  783. static void build_probe_list(struct inode *inode,
  784. struct vm_area_struct *vma,
  785. unsigned long start, unsigned long end,
  786. struct list_head *head)
  787. {
  788. loff_t min, max;
  789. struct rb_node *n, *t;
  790. struct uprobe *u;
  791. INIT_LIST_HEAD(head);
  792. min = vaddr_to_offset(vma, start);
  793. max = min + (end - start) - 1;
  794. spin_lock(&uprobes_treelock);
  795. n = find_node_in_range(inode, min, max);
  796. if (n) {
  797. for (t = n; t; t = rb_prev(t)) {
  798. u = rb_entry(t, struct uprobe, rb_node);
  799. if (u->inode != inode || u->offset < min)
  800. break;
  801. list_add(&u->pending_list, head);
  802. atomic_inc(&u->ref);
  803. }
  804. for (t = n; (t = rb_next(t)); ) {
  805. u = rb_entry(t, struct uprobe, rb_node);
  806. if (u->inode != inode || u->offset > max)
  807. break;
  808. list_add(&u->pending_list, head);
  809. atomic_inc(&u->ref);
  810. }
  811. }
  812. spin_unlock(&uprobes_treelock);
  813. }
  814. /*
  815. * Called from mmap_region/vma_adjust with mm->mmap_sem acquired.
  816. *
  817. * Currently we ignore all errors and always return 0, the callers
  818. * can't handle the failure anyway.
  819. */
  820. int uprobe_mmap(struct vm_area_struct *vma)
  821. {
  822. struct list_head tmp_list;
  823. struct uprobe *uprobe, *u;
  824. struct inode *inode;
  825. if (no_uprobe_events() || !valid_vma(vma, true))
  826. return 0;
  827. inode = vma->vm_file->f_mapping->host;
  828. if (!inode)
  829. return 0;
  830. mutex_lock(uprobes_mmap_hash(inode));
  831. build_probe_list(inode, vma, vma->vm_start, vma->vm_end, &tmp_list);
  832. /*
  833. * We can race with uprobe_unregister(), this uprobe can be already
  834. * removed. But in this case filter_chain() must return false, all
  835. * consumers have gone away.
  836. */
  837. list_for_each_entry_safe(uprobe, u, &tmp_list, pending_list) {
  838. if (!fatal_signal_pending(current) &&
  839. filter_chain(uprobe, UPROBE_FILTER_MMAP, vma->vm_mm)) {
  840. unsigned long vaddr = offset_to_vaddr(vma, uprobe->offset);
  841. install_breakpoint(uprobe, vma->vm_mm, vma, vaddr);
  842. }
  843. put_uprobe(uprobe);
  844. }
  845. mutex_unlock(uprobes_mmap_hash(inode));
  846. return 0;
  847. }
  848. static bool
  849. vma_has_uprobes(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  850. {
  851. loff_t min, max;
  852. struct inode *inode;
  853. struct rb_node *n;
  854. inode = vma->vm_file->f_mapping->host;
  855. min = vaddr_to_offset(vma, start);
  856. max = min + (end - start) - 1;
  857. spin_lock(&uprobes_treelock);
  858. n = find_node_in_range(inode, min, max);
  859. spin_unlock(&uprobes_treelock);
  860. return !!n;
  861. }
  862. /*
  863. * Called in context of a munmap of a vma.
  864. */
  865. void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  866. {
  867. if (no_uprobe_events() || !valid_vma(vma, false))
  868. return;
  869. if (!atomic_read(&vma->vm_mm->mm_users)) /* called by mmput() ? */
  870. return;
  871. if (!test_bit(MMF_HAS_UPROBES, &vma->vm_mm->flags) ||
  872. test_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags))
  873. return;
  874. if (vma_has_uprobes(vma, start, end))
  875. set_bit(MMF_RECALC_UPROBES, &vma->vm_mm->flags);
  876. }
  877. /* Slot allocation for XOL */
  878. static int xol_add_vma(struct xol_area *area)
  879. {
  880. struct mm_struct *mm = current->mm;
  881. int ret = -EALREADY;
  882. down_write(&mm->mmap_sem);
  883. if (mm->uprobes_state.xol_area)
  884. goto fail;
  885. ret = -ENOMEM;
  886. /* Try to map as high as possible, this is only a hint. */
  887. area->vaddr = get_unmapped_area(NULL, TASK_SIZE - PAGE_SIZE, PAGE_SIZE, 0, 0);
  888. if (area->vaddr & ~PAGE_MASK) {
  889. ret = area->vaddr;
  890. goto fail;
  891. }
  892. ret = install_special_mapping(mm, area->vaddr, PAGE_SIZE,
  893. VM_EXEC|VM_MAYEXEC|VM_DONTCOPY|VM_IO, &area->page);
  894. if (ret)
  895. goto fail;
  896. smp_wmb(); /* pairs with get_xol_area() */
  897. mm->uprobes_state.xol_area = area;
  898. ret = 0;
  899. fail:
  900. up_write(&mm->mmap_sem);
  901. return ret;
  902. }
  903. /*
  904. * get_xol_area - Allocate process's xol_area if necessary.
  905. * This area will be used for storing instructions for execution out of line.
  906. *
  907. * Returns the allocated area or NULL.
  908. */
  909. static struct xol_area *get_xol_area(void)
  910. {
  911. struct mm_struct *mm = current->mm;
  912. struct xol_area *area;
  913. area = mm->uprobes_state.xol_area;
  914. if (area)
  915. goto ret;
  916. area = kzalloc(sizeof(*area), GFP_KERNEL);
  917. if (unlikely(!area))
  918. goto out;
  919. area->bitmap = kzalloc(BITS_TO_LONGS(UINSNS_PER_PAGE) * sizeof(long), GFP_KERNEL);
  920. if (!area->bitmap)
  921. goto free_area;
  922. area->page = alloc_page(GFP_HIGHUSER);
  923. if (!area->page)
  924. goto free_bitmap;
  925. init_waitqueue_head(&area->wq);
  926. if (!xol_add_vma(area))
  927. return area;
  928. __free_page(area->page);
  929. free_bitmap:
  930. kfree(area->bitmap);
  931. free_area:
  932. kfree(area);
  933. out:
  934. area = mm->uprobes_state.xol_area;
  935. ret:
  936. smp_read_barrier_depends(); /* pairs with wmb in xol_add_vma() */
  937. return area;
  938. }
  939. /*
  940. * uprobe_clear_state - Free the area allocated for slots.
  941. */
  942. void uprobe_clear_state(struct mm_struct *mm)
  943. {
  944. struct xol_area *area = mm->uprobes_state.xol_area;
  945. if (!area)
  946. return;
  947. put_page(area->page);
  948. kfree(area->bitmap);
  949. kfree(area);
  950. }
  951. void uprobe_start_dup_mmap(void)
  952. {
  953. percpu_down_read(&dup_mmap_sem);
  954. }
  955. void uprobe_end_dup_mmap(void)
  956. {
  957. percpu_up_read(&dup_mmap_sem);
  958. }
  959. void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm)
  960. {
  961. newmm->uprobes_state.xol_area = NULL;
  962. if (test_bit(MMF_HAS_UPROBES, &oldmm->flags)) {
  963. set_bit(MMF_HAS_UPROBES, &newmm->flags);
  964. /* unconditionally, dup_mmap() skips VM_DONTCOPY vmas */
  965. set_bit(MMF_RECALC_UPROBES, &newmm->flags);
  966. }
  967. }
  968. /*
  969. * - search for a free slot.
  970. */
  971. static unsigned long xol_take_insn_slot(struct xol_area *area)
  972. {
  973. unsigned long slot_addr;
  974. int slot_nr;
  975. do {
  976. slot_nr = find_first_zero_bit(area->bitmap, UINSNS_PER_PAGE);
  977. if (slot_nr < UINSNS_PER_PAGE) {
  978. if (!test_and_set_bit(slot_nr, area->bitmap))
  979. break;
  980. slot_nr = UINSNS_PER_PAGE;
  981. continue;
  982. }
  983. wait_event(area->wq, (atomic_read(&area->slot_count) < UINSNS_PER_PAGE));
  984. } while (slot_nr >= UINSNS_PER_PAGE);
  985. slot_addr = area->vaddr + (slot_nr * UPROBE_XOL_SLOT_BYTES);
  986. atomic_inc(&area->slot_count);
  987. return slot_addr;
  988. }
  989. /*
  990. * xol_get_insn_slot - allocate a slot for xol.
  991. * Returns the allocated slot address or 0.
  992. */
  993. static unsigned long xol_get_insn_slot(struct uprobe *uprobe)
  994. {
  995. struct xol_area *area;
  996. unsigned long offset;
  997. unsigned long xol_vaddr;
  998. void *vaddr;
  999. area = get_xol_area();
  1000. if (!area)
  1001. return 0;
  1002. xol_vaddr = xol_take_insn_slot(area);
  1003. if (unlikely(!xol_vaddr))
  1004. return 0;
  1005. /* Initialize the slot */
  1006. offset = xol_vaddr & ~PAGE_MASK;
  1007. vaddr = kmap_atomic(area->page);
  1008. memcpy(vaddr + offset, uprobe->arch.insn, MAX_UINSN_BYTES);
  1009. kunmap_atomic(vaddr);
  1010. /*
  1011. * We probably need flush_icache_user_range() but it needs vma.
  1012. * This should work on supported architectures too.
  1013. */
  1014. flush_dcache_page(area->page);
  1015. return xol_vaddr;
  1016. }
  1017. /*
  1018. * xol_free_insn_slot - If slot was earlier allocated by
  1019. * @xol_get_insn_slot(), make the slot available for
  1020. * subsequent requests.
  1021. */
  1022. static void xol_free_insn_slot(struct task_struct *tsk)
  1023. {
  1024. struct xol_area *area;
  1025. unsigned long vma_end;
  1026. unsigned long slot_addr;
  1027. if (!tsk->mm || !tsk->mm->uprobes_state.xol_area || !tsk->utask)
  1028. return;
  1029. slot_addr = tsk->utask->xol_vaddr;
  1030. if (unlikely(!slot_addr))
  1031. return;
  1032. area = tsk->mm->uprobes_state.xol_area;
  1033. vma_end = area->vaddr + PAGE_SIZE;
  1034. if (area->vaddr <= slot_addr && slot_addr < vma_end) {
  1035. unsigned long offset;
  1036. int slot_nr;
  1037. offset = slot_addr - area->vaddr;
  1038. slot_nr = offset / UPROBE_XOL_SLOT_BYTES;
  1039. if (slot_nr >= UINSNS_PER_PAGE)
  1040. return;
  1041. clear_bit(slot_nr, area->bitmap);
  1042. atomic_dec(&area->slot_count);
  1043. if (waitqueue_active(&area->wq))
  1044. wake_up(&area->wq);
  1045. tsk->utask->xol_vaddr = 0;
  1046. }
  1047. }
  1048. /**
  1049. * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
  1050. * @regs: Reflects the saved state of the task after it has hit a breakpoint
  1051. * instruction.
  1052. * Return the address of the breakpoint instruction.
  1053. */
  1054. unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs)
  1055. {
  1056. return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE;
  1057. }
  1058. /*
  1059. * Called with no locks held.
  1060. * Called in context of a exiting or a exec-ing thread.
  1061. */
  1062. void uprobe_free_utask(struct task_struct *t)
  1063. {
  1064. struct uprobe_task *utask = t->utask;
  1065. if (!utask)
  1066. return;
  1067. if (utask->active_uprobe)
  1068. put_uprobe(utask->active_uprobe);
  1069. xol_free_insn_slot(t);
  1070. kfree(utask);
  1071. t->utask = NULL;
  1072. }
  1073. /*
  1074. * Called in context of a new clone/fork from copy_process.
  1075. */
  1076. void uprobe_copy_process(struct task_struct *t)
  1077. {
  1078. t->utask = NULL;
  1079. }
  1080. /*
  1081. * Allocate a uprobe_task object for the task if if necessary.
  1082. * Called when the thread hits a breakpoint.
  1083. *
  1084. * Returns:
  1085. * - pointer to new uprobe_task on success
  1086. * - NULL otherwise
  1087. */
  1088. static struct uprobe_task *get_utask(void)
  1089. {
  1090. if (!current->utask)
  1091. current->utask = kzalloc(sizeof(struct uprobe_task), GFP_KERNEL);
  1092. return current->utask;
  1093. }
  1094. /* Prepare to single-step probed instruction out of line. */
  1095. static int
  1096. pre_ssout(struct uprobe *uprobe, struct pt_regs *regs, unsigned long bp_vaddr)
  1097. {
  1098. struct uprobe_task *utask;
  1099. unsigned long xol_vaddr;
  1100. int err;
  1101. utask = get_utask();
  1102. if (!utask)
  1103. return -ENOMEM;
  1104. xol_vaddr = xol_get_insn_slot(uprobe);
  1105. if (!xol_vaddr)
  1106. return -ENOMEM;
  1107. utask->xol_vaddr = xol_vaddr;
  1108. utask->vaddr = bp_vaddr;
  1109. err = arch_uprobe_pre_xol(&uprobe->arch, regs);
  1110. if (unlikely(err)) {
  1111. xol_free_insn_slot(current);
  1112. return err;
  1113. }
  1114. utask->active_uprobe = uprobe;
  1115. utask->state = UTASK_SSTEP;
  1116. return 0;
  1117. }
  1118. /*
  1119. * If we are singlestepping, then ensure this thread is not connected to
  1120. * non-fatal signals until completion of singlestep. When xol insn itself
  1121. * triggers the signal, restart the original insn even if the task is
  1122. * already SIGKILL'ed (since coredump should report the correct ip). This
  1123. * is even more important if the task has a handler for SIGSEGV/etc, The
  1124. * _same_ instruction should be repeated again after return from the signal
  1125. * handler, and SSTEP can never finish in this case.
  1126. */
  1127. bool uprobe_deny_signal(void)
  1128. {
  1129. struct task_struct *t = current;
  1130. struct uprobe_task *utask = t->utask;
  1131. if (likely(!utask || !utask->active_uprobe))
  1132. return false;
  1133. WARN_ON_ONCE(utask->state != UTASK_SSTEP);
  1134. if (signal_pending(t)) {
  1135. spin_lock_irq(&t->sighand->siglock);
  1136. clear_tsk_thread_flag(t, TIF_SIGPENDING);
  1137. spin_unlock_irq(&t->sighand->siglock);
  1138. if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
  1139. utask->state = UTASK_SSTEP_TRAPPED;
  1140. set_tsk_thread_flag(t, TIF_UPROBE);
  1141. set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
  1142. }
  1143. }
  1144. return true;
  1145. }
  1146. /*
  1147. * Avoid singlestepping the original instruction if the original instruction
  1148. * is a NOP or can be emulated.
  1149. */
  1150. static bool can_skip_sstep(struct uprobe *uprobe, struct pt_regs *regs)
  1151. {
  1152. if (test_bit(UPROBE_SKIP_SSTEP, &uprobe->flags)) {
  1153. if (arch_uprobe_skip_sstep(&uprobe->arch, regs))
  1154. return true;
  1155. clear_bit(UPROBE_SKIP_SSTEP, &uprobe->flags);
  1156. }
  1157. return false;
  1158. }
  1159. static void mmf_recalc_uprobes(struct mm_struct *mm)
  1160. {
  1161. struct vm_area_struct *vma;
  1162. for (vma = mm->mmap; vma; vma = vma->vm_next) {
  1163. if (!valid_vma(vma, false))
  1164. continue;
  1165. /*
  1166. * This is not strictly accurate, we can race with
  1167. * uprobe_unregister() and see the already removed
  1168. * uprobe if delete_uprobe() was not yet called.
  1169. * Or this uprobe can be filtered out.
  1170. */
  1171. if (vma_has_uprobes(vma, vma->vm_start, vma->vm_end))
  1172. return;
  1173. }
  1174. clear_bit(MMF_HAS_UPROBES, &mm->flags);
  1175. }
  1176. static int is_swbp_at_addr(struct mm_struct *mm, unsigned long vaddr)
  1177. {
  1178. struct page *page;
  1179. uprobe_opcode_t opcode;
  1180. int result;
  1181. pagefault_disable();
  1182. result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
  1183. sizeof(opcode));
  1184. pagefault_enable();
  1185. if (likely(result == 0))
  1186. goto out;
  1187. result = get_user_pages(NULL, mm, vaddr, 1, 0, 1, &page, NULL);
  1188. if (result < 0)
  1189. return result;
  1190. copy_opcode(page, vaddr, &opcode);
  1191. put_page(page);
  1192. out:
  1193. return is_swbp_insn(&opcode);
  1194. }
  1195. static struct uprobe *find_active_uprobe(unsigned long bp_vaddr, int *is_swbp)
  1196. {
  1197. struct mm_struct *mm = current->mm;
  1198. struct uprobe *uprobe = NULL;
  1199. struct vm_area_struct *vma;
  1200. down_read(&mm->mmap_sem);
  1201. vma = find_vma(mm, bp_vaddr);
  1202. if (vma && vma->vm_start <= bp_vaddr) {
  1203. if (valid_vma(vma, false)) {
  1204. struct inode *inode = vma->vm_file->f_mapping->host;
  1205. loff_t offset = vaddr_to_offset(vma, bp_vaddr);
  1206. uprobe = find_uprobe(inode, offset);
  1207. }
  1208. if (!uprobe)
  1209. *is_swbp = is_swbp_at_addr(mm, bp_vaddr);
  1210. } else {
  1211. *is_swbp = -EFAULT;
  1212. }
  1213. if (!uprobe && test_and_clear_bit(MMF_RECALC_UPROBES, &mm->flags))
  1214. mmf_recalc_uprobes(mm);
  1215. up_read(&mm->mmap_sem);
  1216. return uprobe;
  1217. }
  1218. static void handler_chain(struct uprobe *uprobe, struct pt_regs *regs)
  1219. {
  1220. struct uprobe_consumer *uc;
  1221. int remove = UPROBE_HANDLER_REMOVE;
  1222. down_read(&uprobe->register_rwsem);
  1223. for (uc = uprobe->consumers; uc; uc = uc->next) {
  1224. int rc = uc->handler(uc, regs);
  1225. WARN(rc & ~UPROBE_HANDLER_MASK,
  1226. "bad rc=0x%x from %pf()\n", rc, uc->handler);
  1227. remove &= rc;
  1228. }
  1229. if (remove && uprobe->consumers) {
  1230. WARN_ON(!uprobe_is_active(uprobe));
  1231. unapply_uprobe(uprobe, current->mm);
  1232. }
  1233. up_read(&uprobe->register_rwsem);
  1234. }
  1235. /*
  1236. * Run handler and ask thread to singlestep.
  1237. * Ensure all non-fatal signals cannot interrupt thread while it singlesteps.
  1238. */
  1239. static void handle_swbp(struct pt_regs *regs)
  1240. {
  1241. struct uprobe *uprobe;
  1242. unsigned long bp_vaddr;
  1243. int uninitialized_var(is_swbp);
  1244. bp_vaddr = uprobe_get_swbp_addr(regs);
  1245. uprobe = find_active_uprobe(bp_vaddr, &is_swbp);
  1246. if (!uprobe) {
  1247. if (is_swbp > 0) {
  1248. /* No matching uprobe; signal SIGTRAP. */
  1249. send_sig(SIGTRAP, current, 0);
  1250. } else {
  1251. /*
  1252. * Either we raced with uprobe_unregister() or we can't
  1253. * access this memory. The latter is only possible if
  1254. * another thread plays with our ->mm. In both cases
  1255. * we can simply restart. If this vma was unmapped we
  1256. * can pretend this insn was not executed yet and get
  1257. * the (correct) SIGSEGV after restart.
  1258. */
  1259. instruction_pointer_set(regs, bp_vaddr);
  1260. }
  1261. return;
  1262. }
  1263. /* change it in advance for ->handler() and restart */
  1264. instruction_pointer_set(regs, bp_vaddr);
  1265. /*
  1266. * TODO: move copy_insn/etc into _register and remove this hack.
  1267. * After we hit the bp, _unregister + _register can install the
  1268. * new and not-yet-analyzed uprobe at the same address, restart.
  1269. */
  1270. smp_rmb(); /* pairs with wmb() in install_breakpoint() */
  1271. if (unlikely(!test_bit(UPROBE_COPY_INSN, &uprobe->flags)))
  1272. goto out;
  1273. handler_chain(uprobe, regs);
  1274. if (can_skip_sstep(uprobe, regs))
  1275. goto out;
  1276. if (!pre_ssout(uprobe, regs, bp_vaddr))
  1277. return;
  1278. /* can_skip_sstep() succeeded, or restart if can't singlestep */
  1279. out:
  1280. put_uprobe(uprobe);
  1281. }
  1282. /*
  1283. * Perform required fix-ups and disable singlestep.
  1284. * Allow pending signals to take effect.
  1285. */
  1286. static void handle_singlestep(struct uprobe_task *utask, struct pt_regs *regs)
  1287. {
  1288. struct uprobe *uprobe;
  1289. uprobe = utask->active_uprobe;
  1290. if (utask->state == UTASK_SSTEP_ACK)
  1291. arch_uprobe_post_xol(&uprobe->arch, regs);
  1292. else if (utask->state == UTASK_SSTEP_TRAPPED)
  1293. arch_uprobe_abort_xol(&uprobe->arch, regs);
  1294. else
  1295. WARN_ON_ONCE(1);
  1296. put_uprobe(uprobe);
  1297. utask->active_uprobe = NULL;
  1298. utask->state = UTASK_RUNNING;
  1299. xol_free_insn_slot(current);
  1300. spin_lock_irq(&current->sighand->siglock);
  1301. recalc_sigpending(); /* see uprobe_deny_signal() */
  1302. spin_unlock_irq(&current->sighand->siglock);
  1303. }
  1304. /*
  1305. * On breakpoint hit, breakpoint notifier sets the TIF_UPROBE flag and
  1306. * allows the thread to return from interrupt. After that handle_swbp()
  1307. * sets utask->active_uprobe.
  1308. *
  1309. * On singlestep exception, singlestep notifier sets the TIF_UPROBE flag
  1310. * and allows the thread to return from interrupt.
  1311. *
  1312. * While returning to userspace, thread notices the TIF_UPROBE flag and calls
  1313. * uprobe_notify_resume().
  1314. */
  1315. void uprobe_notify_resume(struct pt_regs *regs)
  1316. {
  1317. struct uprobe_task *utask;
  1318. clear_thread_flag(TIF_UPROBE);
  1319. utask = current->utask;
  1320. if (utask && utask->active_uprobe)
  1321. handle_singlestep(utask, regs);
  1322. else
  1323. handle_swbp(regs);
  1324. }
  1325. /*
  1326. * uprobe_pre_sstep_notifier gets called from interrupt context as part of
  1327. * notifier mechanism. Set TIF_UPROBE flag and indicate breakpoint hit.
  1328. */
  1329. int uprobe_pre_sstep_notifier(struct pt_regs *regs)
  1330. {
  1331. if (!current->mm || !test_bit(MMF_HAS_UPROBES, &current->mm->flags))
  1332. return 0;
  1333. set_thread_flag(TIF_UPROBE);
  1334. return 1;
  1335. }
  1336. /*
  1337. * uprobe_post_sstep_notifier gets called in interrupt context as part of notifier
  1338. * mechanism. Set TIF_UPROBE flag and indicate completion of singlestep.
  1339. */
  1340. int uprobe_post_sstep_notifier(struct pt_regs *regs)
  1341. {
  1342. struct uprobe_task *utask = current->utask;
  1343. if (!current->mm || !utask || !utask->active_uprobe)
  1344. /* task is currently not uprobed */
  1345. return 0;
  1346. utask->state = UTASK_SSTEP_ACK;
  1347. set_thread_flag(TIF_UPROBE);
  1348. return 1;
  1349. }
  1350. static struct notifier_block uprobe_exception_nb = {
  1351. .notifier_call = arch_uprobe_exception_notify,
  1352. .priority = INT_MAX-1, /* notified after kprobes, kgdb */
  1353. };
  1354. static int __init init_uprobes(void)
  1355. {
  1356. int i;
  1357. for (i = 0; i < UPROBES_HASH_SZ; i++)
  1358. mutex_init(&uprobes_mmap_mutex[i]);
  1359. if (percpu_init_rwsem(&dup_mmap_sem))
  1360. return -ENOMEM;
  1361. return register_die_notifier(&uprobe_exception_nb);
  1362. }
  1363. module_init(init_uprobes);
  1364. static void __exit exit_uprobes(void)
  1365. {
  1366. }
  1367. module_exit(exit_uprobes);