nommu.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409
  1. /*
  2. * linux/mm/nommu.c
  3. *
  4. * Replacement code for mm functions to support CPU's that don't
  5. * have any form of memory management unit (thus no virtual memory).
  6. *
  7. * See Documentation/nommu-mmap.txt
  8. *
  9. * Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
  10. * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11. * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12. * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/mman.h>
  16. #include <linux/swap.h>
  17. #include <linux/file.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/mount.h>
  26. #include <linux/personality.h>
  27. #include <linux/security.h>
  28. #include <linux/syscalls.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/tlb.h>
  31. #include <asm/tlbflush.h>
  32. void *high_memory;
  33. struct page *mem_map;
  34. unsigned long max_mapnr;
  35. unsigned long num_physpages;
  36. unsigned long askedalloc, realalloc;
  37. atomic_t vm_committed_space = ATOMIC_INIT(0);
  38. int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
  39. int sysctl_overcommit_ratio = 50; /* default is 50% */
  40. int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
  41. int heap_stack_gap = 0;
  42. EXPORT_SYMBOL(mem_map);
  43. EXPORT_SYMBOL(num_physpages);
  44. /* list of shareable VMAs */
  45. struct rb_root nommu_vma_tree = RB_ROOT;
  46. DECLARE_RWSEM(nommu_vma_sem);
  47. struct vm_operations_struct generic_file_vm_ops = {
  48. };
  49. /*
  50. * Handle all mappings that got truncated by a "truncate()"
  51. * system call.
  52. *
  53. * NOTE! We have to be ready to update the memory sharing
  54. * between the file and the memory map for a potential last
  55. * incomplete page. Ugly, but necessary.
  56. */
  57. int vmtruncate(struct inode *inode, loff_t offset)
  58. {
  59. struct address_space *mapping = inode->i_mapping;
  60. unsigned long limit;
  61. if (inode->i_size < offset)
  62. goto do_expand;
  63. i_size_write(inode, offset);
  64. truncate_inode_pages(mapping, offset);
  65. goto out_truncate;
  66. do_expand:
  67. limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
  68. if (limit != RLIM_INFINITY && offset > limit)
  69. goto out_sig;
  70. if (offset > inode->i_sb->s_maxbytes)
  71. goto out;
  72. i_size_write(inode, offset);
  73. out_truncate:
  74. if (inode->i_op && inode->i_op->truncate)
  75. inode->i_op->truncate(inode);
  76. return 0;
  77. out_sig:
  78. send_sig(SIGXFSZ, current, 0);
  79. out:
  80. return -EFBIG;
  81. }
  82. EXPORT_SYMBOL(vmtruncate);
  83. /*
  84. * Return the total memory allocated for this pointer, not
  85. * just what the caller asked for.
  86. *
  87. * Doesn't have to be accurate, i.e. may have races.
  88. */
  89. unsigned int kobjsize(const void *objp)
  90. {
  91. struct page *page;
  92. if (!objp || !((page = virt_to_page(objp))))
  93. return 0;
  94. if (PageSlab(page))
  95. return ksize(objp);
  96. BUG_ON(page->index < 0);
  97. BUG_ON(page->index >= MAX_ORDER);
  98. return (PAGE_SIZE << page->index);
  99. }
  100. /*
  101. * get a list of pages in an address range belonging to the specified process
  102. * and indicate the VMA that covers each page
  103. * - this is potentially dodgy as we may end incrementing the page count of a
  104. * slab page or a secondary page from a compound page
  105. * - don't permit access to VMAs that don't support it, such as I/O mappings
  106. */
  107. int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  108. unsigned long start, int len, int write, int force,
  109. struct page **pages, struct vm_area_struct **vmas)
  110. {
  111. struct vm_area_struct *vma;
  112. unsigned long vm_flags;
  113. int i;
  114. /* calculate required read or write permissions.
  115. * - if 'force' is set, we only require the "MAY" flags.
  116. */
  117. vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  118. vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
  119. for (i = 0; i < len; i++) {
  120. vma = find_vma(mm, start);
  121. if (!vma)
  122. goto finish_or_fault;
  123. /* protect what we can, including chardevs */
  124. if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
  125. !(vm_flags & vma->vm_flags))
  126. goto finish_or_fault;
  127. if (pages) {
  128. pages[i] = virt_to_page(start);
  129. if (pages[i])
  130. page_cache_get(pages[i]);
  131. }
  132. if (vmas)
  133. vmas[i] = vma;
  134. start += PAGE_SIZE;
  135. }
  136. return i;
  137. finish_or_fault:
  138. return i ? : -EFAULT;
  139. }
  140. EXPORT_SYMBOL(get_user_pages);
  141. DEFINE_RWLOCK(vmlist_lock);
  142. struct vm_struct *vmlist;
  143. void vfree(void *addr)
  144. {
  145. kfree(addr);
  146. }
  147. EXPORT_SYMBOL(vfree);
  148. void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  149. {
  150. /*
  151. * kmalloc doesn't like __GFP_HIGHMEM for some reason
  152. */
  153. return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
  154. }
  155. EXPORT_SYMBOL(__vmalloc);
  156. struct page * vmalloc_to_page(void *addr)
  157. {
  158. return virt_to_page(addr);
  159. }
  160. EXPORT_SYMBOL(vmalloc_to_page);
  161. unsigned long vmalloc_to_pfn(void *addr)
  162. {
  163. return page_to_pfn(virt_to_page(addr));
  164. }
  165. EXPORT_SYMBOL(vmalloc_to_pfn);
  166. long vread(char *buf, char *addr, unsigned long count)
  167. {
  168. memcpy(buf, addr, count);
  169. return count;
  170. }
  171. long vwrite(char *buf, char *addr, unsigned long count)
  172. {
  173. /* Don't allow overflow */
  174. if ((unsigned long) addr + count < count)
  175. count = -(unsigned long) addr;
  176. memcpy(addr, buf, count);
  177. return(count);
  178. }
  179. /*
  180. * vmalloc - allocate virtually continguos memory
  181. *
  182. * @size: allocation size
  183. *
  184. * Allocate enough pages to cover @size from the page level
  185. * allocator and map them into continguos kernel virtual space.
  186. *
  187. * For tight control over page level allocator and protection flags
  188. * use __vmalloc() instead.
  189. */
  190. void *vmalloc(unsigned long size)
  191. {
  192. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
  193. }
  194. EXPORT_SYMBOL(vmalloc);
  195. void *vmalloc_node(unsigned long size, int node)
  196. {
  197. return vmalloc(size);
  198. }
  199. EXPORT_SYMBOL(vmalloc_node);
  200. /**
  201. * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
  202. * @size: allocation size
  203. *
  204. * Allocate enough 32bit PA addressable pages to cover @size from the
  205. * page level allocator and map them into continguos kernel virtual space.
  206. */
  207. void *vmalloc_32(unsigned long size)
  208. {
  209. return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
  210. }
  211. EXPORT_SYMBOL(vmalloc_32);
  212. /**
  213. * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
  214. * @size: allocation size
  215. *
  216. * The resulting memory area is 32bit addressable and zeroed so it can be
  217. * mapped to userspace without leaking data.
  218. */
  219. void *vmalloc_32_user(unsigned long size)
  220. {
  221. return __vmalloc(size, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
  222. }
  223. EXPORT_SYMBOL(vmalloc_32_user);
  224. void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
  225. {
  226. BUG();
  227. return NULL;
  228. }
  229. EXPORT_SYMBOL(vmap);
  230. void vunmap(void *addr)
  231. {
  232. BUG();
  233. }
  234. EXPORT_SYMBOL(vunmap);
  235. /*
  236. * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  237. * have one.
  238. */
  239. void __attribute__((weak)) vmalloc_sync_all(void)
  240. {
  241. }
  242. int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  243. struct page *page)
  244. {
  245. return -EINVAL;
  246. }
  247. EXPORT_SYMBOL(vm_insert_page);
  248. /*
  249. * sys_brk() for the most part doesn't need the global kernel
  250. * lock, except when an application is doing something nasty
  251. * like trying to un-brk an area that has already been mapped
  252. * to a regular file. in this case, the unmapping will need
  253. * to invoke file system routines that need the global lock.
  254. */
  255. asmlinkage unsigned long sys_brk(unsigned long brk)
  256. {
  257. struct mm_struct *mm = current->mm;
  258. if (brk < mm->start_brk || brk > mm->context.end_brk)
  259. return mm->brk;
  260. if (mm->brk == brk)
  261. return mm->brk;
  262. /*
  263. * Always allow shrinking brk
  264. */
  265. if (brk <= mm->brk) {
  266. mm->brk = brk;
  267. return brk;
  268. }
  269. /*
  270. * Ok, looks good - let it rip.
  271. */
  272. return mm->brk = brk;
  273. }
  274. #ifdef DEBUG
  275. static void show_process_blocks(void)
  276. {
  277. struct vm_list_struct *vml;
  278. printk("Process blocks %d:", current->pid);
  279. for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
  280. printk(" %p: %p", vml, vml->vma);
  281. if (vml->vma)
  282. printk(" (%d @%lx #%d)",
  283. kobjsize((void *) vml->vma->vm_start),
  284. vml->vma->vm_start,
  285. atomic_read(&vml->vma->vm_usage));
  286. printk(vml->next ? " ->" : ".\n");
  287. }
  288. }
  289. #endif /* DEBUG */
  290. /*
  291. * add a VMA into a process's mm_struct in the appropriate place in the list
  292. * - should be called with mm->mmap_sem held writelocked
  293. */
  294. static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
  295. {
  296. struct vm_list_struct **ppv;
  297. for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
  298. if ((*ppv)->vma->vm_start > vml->vma->vm_start)
  299. break;
  300. vml->next = *ppv;
  301. *ppv = vml;
  302. }
  303. /*
  304. * look up the first VMA in which addr resides, NULL if none
  305. * - should be called with mm->mmap_sem at least held readlocked
  306. */
  307. struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
  308. {
  309. struct vm_list_struct *loop, *vml;
  310. /* search the vm_start ordered list */
  311. vml = NULL;
  312. for (loop = mm->context.vmlist; loop; loop = loop->next) {
  313. if (loop->vma->vm_start > addr)
  314. break;
  315. vml = loop;
  316. }
  317. if (vml && vml->vma->vm_end > addr)
  318. return vml->vma;
  319. return NULL;
  320. }
  321. EXPORT_SYMBOL(find_vma);
  322. /*
  323. * find a VMA
  324. * - we don't extend stack VMAs under NOMMU conditions
  325. */
  326. struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
  327. {
  328. return find_vma(mm, addr);
  329. }
  330. int expand_stack(struct vm_area_struct *vma, unsigned long address)
  331. {
  332. return -ENOMEM;
  333. }
  334. /*
  335. * look up the first VMA exactly that exactly matches addr
  336. * - should be called with mm->mmap_sem at least held readlocked
  337. */
  338. static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
  339. unsigned long addr)
  340. {
  341. struct vm_list_struct *vml;
  342. /* search the vm_start ordered list */
  343. for (vml = mm->context.vmlist; vml; vml = vml->next) {
  344. if (vml->vma->vm_start == addr)
  345. return vml->vma;
  346. if (vml->vma->vm_start > addr)
  347. break;
  348. }
  349. return NULL;
  350. }
  351. /*
  352. * find a VMA in the global tree
  353. */
  354. static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
  355. {
  356. struct vm_area_struct *vma;
  357. struct rb_node *n = nommu_vma_tree.rb_node;
  358. while (n) {
  359. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  360. if (start < vma->vm_start)
  361. n = n->rb_left;
  362. else if (start > vma->vm_start)
  363. n = n->rb_right;
  364. else
  365. return vma;
  366. }
  367. return NULL;
  368. }
  369. /*
  370. * add a VMA in the global tree
  371. */
  372. static void add_nommu_vma(struct vm_area_struct *vma)
  373. {
  374. struct vm_area_struct *pvma;
  375. struct address_space *mapping;
  376. struct rb_node **p = &nommu_vma_tree.rb_node;
  377. struct rb_node *parent = NULL;
  378. /* add the VMA to the mapping */
  379. if (vma->vm_file) {
  380. mapping = vma->vm_file->f_mapping;
  381. flush_dcache_mmap_lock(mapping);
  382. vma_prio_tree_insert(vma, &mapping->i_mmap);
  383. flush_dcache_mmap_unlock(mapping);
  384. }
  385. /* add the VMA to the master list */
  386. while (*p) {
  387. parent = *p;
  388. pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
  389. if (vma->vm_start < pvma->vm_start) {
  390. p = &(*p)->rb_left;
  391. }
  392. else if (vma->vm_start > pvma->vm_start) {
  393. p = &(*p)->rb_right;
  394. }
  395. else {
  396. /* mappings are at the same address - this can only
  397. * happen for shared-mem chardevs and shared file
  398. * mappings backed by ramfs/tmpfs */
  399. BUG_ON(!(pvma->vm_flags & VM_SHARED));
  400. if (vma < pvma)
  401. p = &(*p)->rb_left;
  402. else if (vma > pvma)
  403. p = &(*p)->rb_right;
  404. else
  405. BUG();
  406. }
  407. }
  408. rb_link_node(&vma->vm_rb, parent, p);
  409. rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
  410. }
  411. /*
  412. * delete a VMA from the global list
  413. */
  414. static void delete_nommu_vma(struct vm_area_struct *vma)
  415. {
  416. struct address_space *mapping;
  417. /* remove the VMA from the mapping */
  418. if (vma->vm_file) {
  419. mapping = vma->vm_file->f_mapping;
  420. flush_dcache_mmap_lock(mapping);
  421. vma_prio_tree_remove(vma, &mapping->i_mmap);
  422. flush_dcache_mmap_unlock(mapping);
  423. }
  424. /* remove from the master list */
  425. rb_erase(&vma->vm_rb, &nommu_vma_tree);
  426. }
  427. /*
  428. * determine whether a mapping should be permitted and, if so, what sort of
  429. * mapping we're capable of supporting
  430. */
  431. static int validate_mmap_request(struct file *file,
  432. unsigned long addr,
  433. unsigned long len,
  434. unsigned long prot,
  435. unsigned long flags,
  436. unsigned long pgoff,
  437. unsigned long *_capabilities)
  438. {
  439. unsigned long capabilities;
  440. unsigned long reqprot = prot;
  441. int ret;
  442. /* do the simple checks first */
  443. if (flags & MAP_FIXED || addr) {
  444. printk(KERN_DEBUG
  445. "%d: Can't do fixed-address/overlay mmap of RAM\n",
  446. current->pid);
  447. return -EINVAL;
  448. }
  449. if ((flags & MAP_TYPE) != MAP_PRIVATE &&
  450. (flags & MAP_TYPE) != MAP_SHARED)
  451. return -EINVAL;
  452. if (!len)
  453. return -EINVAL;
  454. /* Careful about overflows.. */
  455. len = PAGE_ALIGN(len);
  456. if (!len || len > TASK_SIZE)
  457. return -ENOMEM;
  458. /* offset overflow? */
  459. if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
  460. return -EOVERFLOW;
  461. if (file) {
  462. /* validate file mapping requests */
  463. struct address_space *mapping;
  464. /* files must support mmap */
  465. if (!file->f_op || !file->f_op->mmap)
  466. return -ENODEV;
  467. /* work out if what we've got could possibly be shared
  468. * - we support chardevs that provide their own "memory"
  469. * - we support files/blockdevs that are memory backed
  470. */
  471. mapping = file->f_mapping;
  472. if (!mapping)
  473. mapping = file->f_path.dentry->d_inode->i_mapping;
  474. capabilities = 0;
  475. if (mapping && mapping->backing_dev_info)
  476. capabilities = mapping->backing_dev_info->capabilities;
  477. if (!capabilities) {
  478. /* no explicit capabilities set, so assume some
  479. * defaults */
  480. switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
  481. case S_IFREG:
  482. case S_IFBLK:
  483. capabilities = BDI_CAP_MAP_COPY;
  484. break;
  485. case S_IFCHR:
  486. capabilities =
  487. BDI_CAP_MAP_DIRECT |
  488. BDI_CAP_READ_MAP |
  489. BDI_CAP_WRITE_MAP;
  490. break;
  491. default:
  492. return -EINVAL;
  493. }
  494. }
  495. /* eliminate any capabilities that we can't support on this
  496. * device */
  497. if (!file->f_op->get_unmapped_area)
  498. capabilities &= ~BDI_CAP_MAP_DIRECT;
  499. if (!file->f_op->read)
  500. capabilities &= ~BDI_CAP_MAP_COPY;
  501. if (flags & MAP_SHARED) {
  502. /* do checks for writing, appending and locking */
  503. if ((prot & PROT_WRITE) &&
  504. !(file->f_mode & FMODE_WRITE))
  505. return -EACCES;
  506. if (IS_APPEND(file->f_path.dentry->d_inode) &&
  507. (file->f_mode & FMODE_WRITE))
  508. return -EACCES;
  509. if (locks_verify_locked(file->f_path.dentry->d_inode))
  510. return -EAGAIN;
  511. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  512. return -ENODEV;
  513. if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
  514. ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
  515. ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
  516. ) {
  517. printk("MAP_SHARED not completely supported on !MMU\n");
  518. return -EINVAL;
  519. }
  520. /* we mustn't privatise shared mappings */
  521. capabilities &= ~BDI_CAP_MAP_COPY;
  522. }
  523. else {
  524. /* we're going to read the file into private memory we
  525. * allocate */
  526. if (!(capabilities & BDI_CAP_MAP_COPY))
  527. return -ENODEV;
  528. /* we don't permit a private writable mapping to be
  529. * shared with the backing device */
  530. if (prot & PROT_WRITE)
  531. capabilities &= ~BDI_CAP_MAP_DIRECT;
  532. }
  533. /* handle executable mappings and implied executable
  534. * mappings */
  535. if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
  536. if (prot & PROT_EXEC)
  537. return -EPERM;
  538. }
  539. else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
  540. /* handle implication of PROT_EXEC by PROT_READ */
  541. if (current->personality & READ_IMPLIES_EXEC) {
  542. if (capabilities & BDI_CAP_EXEC_MAP)
  543. prot |= PROT_EXEC;
  544. }
  545. }
  546. else if ((prot & PROT_READ) &&
  547. (prot & PROT_EXEC) &&
  548. !(capabilities & BDI_CAP_EXEC_MAP)
  549. ) {
  550. /* backing file is not executable, try to copy */
  551. capabilities &= ~BDI_CAP_MAP_DIRECT;
  552. }
  553. }
  554. else {
  555. /* anonymous mappings are always memory backed and can be
  556. * privately mapped
  557. */
  558. capabilities = BDI_CAP_MAP_COPY;
  559. /* handle PROT_EXEC implication by PROT_READ */
  560. if ((prot & PROT_READ) &&
  561. (current->personality & READ_IMPLIES_EXEC))
  562. prot |= PROT_EXEC;
  563. }
  564. /* allow the security API to have its say */
  565. ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
  566. if (ret < 0)
  567. return ret;
  568. /* looks okay */
  569. *_capabilities = capabilities;
  570. return 0;
  571. }
  572. /*
  573. * we've determined that we can make the mapping, now translate what we
  574. * now know into VMA flags
  575. */
  576. static unsigned long determine_vm_flags(struct file *file,
  577. unsigned long prot,
  578. unsigned long flags,
  579. unsigned long capabilities)
  580. {
  581. unsigned long vm_flags;
  582. vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
  583. vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  584. /* vm_flags |= mm->def_flags; */
  585. if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
  586. /* attempt to share read-only copies of mapped file chunks */
  587. if (file && !(prot & PROT_WRITE))
  588. vm_flags |= VM_MAYSHARE;
  589. }
  590. else {
  591. /* overlay a shareable mapping on the backing device or inode
  592. * if possible - used for chardevs, ramfs/tmpfs/shmfs and
  593. * romfs/cramfs */
  594. if (flags & MAP_SHARED)
  595. vm_flags |= VM_MAYSHARE | VM_SHARED;
  596. else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
  597. vm_flags |= VM_MAYSHARE;
  598. }
  599. /* refuse to let anyone share private mappings with this process if
  600. * it's being traced - otherwise breakpoints set in it may interfere
  601. * with another untraced process
  602. */
  603. if ((flags & MAP_PRIVATE) && (current->ptrace & PT_PTRACED))
  604. vm_flags &= ~VM_MAYSHARE;
  605. return vm_flags;
  606. }
  607. /*
  608. * set up a shared mapping on a file
  609. */
  610. static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
  611. {
  612. int ret;
  613. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  614. if (ret != -ENOSYS)
  615. return ret;
  616. /* getting an ENOSYS error indicates that direct mmap isn't
  617. * possible (as opposed to tried but failed) so we'll fall
  618. * through to making a private copy of the data and mapping
  619. * that if we can */
  620. return -ENODEV;
  621. }
  622. /*
  623. * set up a private mapping or an anonymous shared mapping
  624. */
  625. static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
  626. {
  627. void *base;
  628. int ret;
  629. /* invoke the file's mapping function so that it can keep track of
  630. * shared mappings on devices or memory
  631. * - VM_MAYSHARE will be set if it may attempt to share
  632. */
  633. if (vma->vm_file) {
  634. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  635. if (ret != -ENOSYS) {
  636. /* shouldn't return success if we're not sharing */
  637. BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
  638. return ret; /* success or a real error */
  639. }
  640. /* getting an ENOSYS error indicates that direct mmap isn't
  641. * possible (as opposed to tried but failed) so we'll try to
  642. * make a private copy of the data and map that instead */
  643. }
  644. /* allocate some memory to hold the mapping
  645. * - note that this may not return a page-aligned address if the object
  646. * we're allocating is smaller than a page
  647. */
  648. base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
  649. if (!base)
  650. goto enomem;
  651. vma->vm_start = (unsigned long) base;
  652. vma->vm_end = vma->vm_start + len;
  653. vma->vm_flags |= VM_MAPPED_COPY;
  654. #ifdef WARN_ON_SLACK
  655. if (len + WARN_ON_SLACK <= kobjsize(result))
  656. printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
  657. len, current->pid, kobjsize(result) - len);
  658. #endif
  659. if (vma->vm_file) {
  660. /* read the contents of a file into the copy */
  661. mm_segment_t old_fs;
  662. loff_t fpos;
  663. fpos = vma->vm_pgoff;
  664. fpos <<= PAGE_SHIFT;
  665. old_fs = get_fs();
  666. set_fs(KERNEL_DS);
  667. ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
  668. set_fs(old_fs);
  669. if (ret < 0)
  670. goto error_free;
  671. /* clear the last little bit */
  672. if (ret < len)
  673. memset(base + ret, 0, len - ret);
  674. } else {
  675. /* if it's an anonymous mapping, then just clear it */
  676. memset(base, 0, len);
  677. }
  678. return 0;
  679. error_free:
  680. kfree(base);
  681. vma->vm_start = 0;
  682. return ret;
  683. enomem:
  684. printk("Allocation of length %lu from process %d failed\n",
  685. len, current->pid);
  686. show_free_areas();
  687. return -ENOMEM;
  688. }
  689. /*
  690. * handle mapping creation for uClinux
  691. */
  692. unsigned long do_mmap_pgoff(struct file *file,
  693. unsigned long addr,
  694. unsigned long len,
  695. unsigned long prot,
  696. unsigned long flags,
  697. unsigned long pgoff)
  698. {
  699. struct vm_list_struct *vml = NULL;
  700. struct vm_area_struct *vma = NULL;
  701. struct rb_node *rb;
  702. unsigned long capabilities, vm_flags;
  703. void *result;
  704. int ret;
  705. /* decide whether we should attempt the mapping, and if so what sort of
  706. * mapping */
  707. ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
  708. &capabilities);
  709. if (ret < 0)
  710. return ret;
  711. /* we've determined that we can make the mapping, now translate what we
  712. * now know into VMA flags */
  713. vm_flags = determine_vm_flags(file, prot, flags, capabilities);
  714. /* we're going to need to record the mapping if it works */
  715. vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
  716. if (!vml)
  717. goto error_getting_vml;
  718. down_write(&nommu_vma_sem);
  719. /* if we want to share, we need to check for VMAs created by other
  720. * mmap() calls that overlap with our proposed mapping
  721. * - we can only share with an exact match on most regular files
  722. * - shared mappings on character devices and memory backed files are
  723. * permitted to overlap inexactly as far as we are concerned for in
  724. * these cases, sharing is handled in the driver or filesystem rather
  725. * than here
  726. */
  727. if (vm_flags & VM_MAYSHARE) {
  728. unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  729. unsigned long vmpglen;
  730. /* suppress VMA sharing for shared regions */
  731. if (vm_flags & VM_SHARED &&
  732. capabilities & BDI_CAP_MAP_DIRECT)
  733. goto dont_share_VMAs;
  734. for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
  735. vma = rb_entry(rb, struct vm_area_struct, vm_rb);
  736. if (!(vma->vm_flags & VM_MAYSHARE))
  737. continue;
  738. /* search for overlapping mappings on the same file */
  739. if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
  740. continue;
  741. if (vma->vm_pgoff >= pgoff + pglen)
  742. continue;
  743. vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
  744. vmpglen >>= PAGE_SHIFT;
  745. if (pgoff >= vma->vm_pgoff + vmpglen)
  746. continue;
  747. /* handle inexactly overlapping matches between mappings */
  748. if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
  749. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  750. goto sharing_violation;
  751. continue;
  752. }
  753. /* we've found a VMA we can share */
  754. atomic_inc(&vma->vm_usage);
  755. vml->vma = vma;
  756. result = (void *) vma->vm_start;
  757. goto shared;
  758. }
  759. dont_share_VMAs:
  760. vma = NULL;
  761. /* obtain the address at which to make a shared mapping
  762. * - this is the hook for quasi-memory character devices to
  763. * tell us the location of a shared mapping
  764. */
  765. if (file && file->f_op->get_unmapped_area) {
  766. addr = file->f_op->get_unmapped_area(file, addr, len,
  767. pgoff, flags);
  768. if (IS_ERR((void *) addr)) {
  769. ret = addr;
  770. if (ret != (unsigned long) -ENOSYS)
  771. goto error;
  772. /* the driver refused to tell us where to site
  773. * the mapping so we'll have to attempt to copy
  774. * it */
  775. ret = (unsigned long) -ENODEV;
  776. if (!(capabilities & BDI_CAP_MAP_COPY))
  777. goto error;
  778. capabilities &= ~BDI_CAP_MAP_DIRECT;
  779. }
  780. }
  781. }
  782. /* we're going to need a VMA struct as well */
  783. vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  784. if (!vma)
  785. goto error_getting_vma;
  786. INIT_LIST_HEAD(&vma->anon_vma_node);
  787. atomic_set(&vma->vm_usage, 1);
  788. if (file)
  789. get_file(file);
  790. vma->vm_file = file;
  791. vma->vm_flags = vm_flags;
  792. vma->vm_start = addr;
  793. vma->vm_end = addr + len;
  794. vma->vm_pgoff = pgoff;
  795. vml->vma = vma;
  796. /* set up the mapping */
  797. if (file && vma->vm_flags & VM_SHARED)
  798. ret = do_mmap_shared_file(vma, len);
  799. else
  800. ret = do_mmap_private(vma, len);
  801. if (ret < 0)
  802. goto error;
  803. /* okay... we have a mapping; now we have to register it */
  804. result = (void *) vma->vm_start;
  805. if (vma->vm_flags & VM_MAPPED_COPY) {
  806. realalloc += kobjsize(result);
  807. askedalloc += len;
  808. }
  809. realalloc += kobjsize(vma);
  810. askedalloc += sizeof(*vma);
  811. current->mm->total_vm += len >> PAGE_SHIFT;
  812. add_nommu_vma(vma);
  813. shared:
  814. realalloc += kobjsize(vml);
  815. askedalloc += sizeof(*vml);
  816. add_vma_to_mm(current->mm, vml);
  817. up_write(&nommu_vma_sem);
  818. if (prot & PROT_EXEC)
  819. flush_icache_range((unsigned long) result,
  820. (unsigned long) result + len);
  821. #ifdef DEBUG
  822. printk("do_mmap:\n");
  823. show_process_blocks();
  824. #endif
  825. return (unsigned long) result;
  826. error:
  827. up_write(&nommu_vma_sem);
  828. kfree(vml);
  829. if (vma) {
  830. if (vma->vm_file)
  831. fput(vma->vm_file);
  832. kfree(vma);
  833. }
  834. return ret;
  835. sharing_violation:
  836. up_write(&nommu_vma_sem);
  837. printk("Attempt to share mismatched mappings\n");
  838. kfree(vml);
  839. return -EINVAL;
  840. error_getting_vma:
  841. up_write(&nommu_vma_sem);
  842. kfree(vml);
  843. printk("Allocation of vma for %lu byte allocation from process %d failed\n",
  844. len, current->pid);
  845. show_free_areas();
  846. return -ENOMEM;
  847. error_getting_vml:
  848. printk("Allocation of vml for %lu byte allocation from process %d failed\n",
  849. len, current->pid);
  850. show_free_areas();
  851. return -ENOMEM;
  852. }
  853. EXPORT_SYMBOL(do_mmap_pgoff);
  854. /*
  855. * handle mapping disposal for uClinux
  856. */
  857. static void put_vma(struct vm_area_struct *vma)
  858. {
  859. if (vma) {
  860. down_write(&nommu_vma_sem);
  861. if (atomic_dec_and_test(&vma->vm_usage)) {
  862. delete_nommu_vma(vma);
  863. if (vma->vm_ops && vma->vm_ops->close)
  864. vma->vm_ops->close(vma);
  865. /* IO memory and memory shared directly out of the pagecache from
  866. * ramfs/tmpfs mustn't be released here */
  867. if (vma->vm_flags & VM_MAPPED_COPY) {
  868. realalloc -= kobjsize((void *) vma->vm_start);
  869. askedalloc -= vma->vm_end - vma->vm_start;
  870. kfree((void *) vma->vm_start);
  871. }
  872. realalloc -= kobjsize(vma);
  873. askedalloc -= sizeof(*vma);
  874. if (vma->vm_file)
  875. fput(vma->vm_file);
  876. kfree(vma);
  877. }
  878. up_write(&nommu_vma_sem);
  879. }
  880. }
  881. /*
  882. * release a mapping
  883. * - under NOMMU conditions the parameters must match exactly to the mapping to
  884. * be removed
  885. */
  886. int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
  887. {
  888. struct vm_list_struct *vml, **parent;
  889. unsigned long end = addr + len;
  890. #ifdef DEBUG
  891. printk("do_munmap:\n");
  892. #endif
  893. for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
  894. if ((*parent)->vma->vm_start > addr)
  895. break;
  896. if ((*parent)->vma->vm_start == addr &&
  897. ((len == 0) || ((*parent)->vma->vm_end == end)))
  898. goto found;
  899. }
  900. printk("munmap of non-mmaped memory by process %d (%s): %p\n",
  901. current->pid, current->comm, (void *) addr);
  902. return -EINVAL;
  903. found:
  904. vml = *parent;
  905. put_vma(vml->vma);
  906. *parent = vml->next;
  907. realalloc -= kobjsize(vml);
  908. askedalloc -= sizeof(*vml);
  909. kfree(vml);
  910. update_hiwater_vm(mm);
  911. mm->total_vm -= len >> PAGE_SHIFT;
  912. #ifdef DEBUG
  913. show_process_blocks();
  914. #endif
  915. return 0;
  916. }
  917. EXPORT_SYMBOL(do_munmap);
  918. asmlinkage long sys_munmap(unsigned long addr, size_t len)
  919. {
  920. int ret;
  921. struct mm_struct *mm = current->mm;
  922. down_write(&mm->mmap_sem);
  923. ret = do_munmap(mm, addr, len);
  924. up_write(&mm->mmap_sem);
  925. return ret;
  926. }
  927. /*
  928. * Release all mappings
  929. */
  930. void exit_mmap(struct mm_struct * mm)
  931. {
  932. struct vm_list_struct *tmp;
  933. if (mm) {
  934. #ifdef DEBUG
  935. printk("Exit_mmap:\n");
  936. #endif
  937. mm->total_vm = 0;
  938. while ((tmp = mm->context.vmlist)) {
  939. mm->context.vmlist = tmp->next;
  940. put_vma(tmp->vma);
  941. realalloc -= kobjsize(tmp);
  942. askedalloc -= sizeof(*tmp);
  943. kfree(tmp);
  944. }
  945. #ifdef DEBUG
  946. show_process_blocks();
  947. #endif
  948. }
  949. }
  950. unsigned long do_brk(unsigned long addr, unsigned long len)
  951. {
  952. return -ENOMEM;
  953. }
  954. /*
  955. * expand (or shrink) an existing mapping, potentially moving it at the same
  956. * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
  957. *
  958. * under NOMMU conditions, we only permit changing a mapping's size, and only
  959. * as long as it stays within the hole allocated by the kmalloc() call in
  960. * do_mmap_pgoff() and the block is not shareable
  961. *
  962. * MREMAP_FIXED is not supported under NOMMU conditions
  963. */
  964. unsigned long do_mremap(unsigned long addr,
  965. unsigned long old_len, unsigned long new_len,
  966. unsigned long flags, unsigned long new_addr)
  967. {
  968. struct vm_area_struct *vma;
  969. /* insanity checks first */
  970. if (new_len == 0)
  971. return (unsigned long) -EINVAL;
  972. if (flags & MREMAP_FIXED && new_addr != addr)
  973. return (unsigned long) -EINVAL;
  974. vma = find_vma_exact(current->mm, addr);
  975. if (!vma)
  976. return (unsigned long) -EINVAL;
  977. if (vma->vm_end != vma->vm_start + old_len)
  978. return (unsigned long) -EFAULT;
  979. if (vma->vm_flags & VM_MAYSHARE)
  980. return (unsigned long) -EPERM;
  981. if (new_len > kobjsize((void *) addr))
  982. return (unsigned long) -ENOMEM;
  983. /* all checks complete - do it */
  984. vma->vm_end = vma->vm_start + new_len;
  985. askedalloc -= old_len;
  986. askedalloc += new_len;
  987. return vma->vm_start;
  988. }
  989. EXPORT_SYMBOL(do_mremap);
  990. asmlinkage unsigned long sys_mremap(unsigned long addr,
  991. unsigned long old_len, unsigned long new_len,
  992. unsigned long flags, unsigned long new_addr)
  993. {
  994. unsigned long ret;
  995. down_write(&current->mm->mmap_sem);
  996. ret = do_mremap(addr, old_len, new_len, flags, new_addr);
  997. up_write(&current->mm->mmap_sem);
  998. return ret;
  999. }
  1000. struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
  1001. unsigned int foll_flags)
  1002. {
  1003. return NULL;
  1004. }
  1005. int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  1006. unsigned long to, unsigned long size, pgprot_t prot)
  1007. {
  1008. vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
  1009. return 0;
  1010. }
  1011. EXPORT_SYMBOL(remap_pfn_range);
  1012. void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  1013. {
  1014. }
  1015. unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
  1016. unsigned long len, unsigned long pgoff, unsigned long flags)
  1017. {
  1018. return -ENOMEM;
  1019. }
  1020. void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
  1021. {
  1022. }
  1023. void unmap_mapping_range(struct address_space *mapping,
  1024. loff_t const holebegin, loff_t const holelen,
  1025. int even_cows)
  1026. {
  1027. }
  1028. EXPORT_SYMBOL(unmap_mapping_range);
  1029. /*
  1030. * ask for an unmapped area at which to create a mapping on a file
  1031. */
  1032. unsigned long get_unmapped_area(struct file *file, unsigned long addr,
  1033. unsigned long len, unsigned long pgoff,
  1034. unsigned long flags)
  1035. {
  1036. unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
  1037. unsigned long, unsigned long);
  1038. get_area = current->mm->get_unmapped_area;
  1039. if (file && file->f_op && file->f_op->get_unmapped_area)
  1040. get_area = file->f_op->get_unmapped_area;
  1041. if (!get_area)
  1042. return -ENOSYS;
  1043. return get_area(file, addr, len, pgoff, flags);
  1044. }
  1045. EXPORT_SYMBOL(get_unmapped_area);
  1046. /*
  1047. * Check that a process has enough memory to allocate a new virtual
  1048. * mapping. 0 means there is enough memory for the allocation to
  1049. * succeed and -ENOMEM implies there is not.
  1050. *
  1051. * We currently support three overcommit policies, which are set via the
  1052. * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
  1053. *
  1054. * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
  1055. * Additional code 2002 Jul 20 by Robert Love.
  1056. *
  1057. * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
  1058. *
  1059. * Note this is a helper function intended to be used by LSMs which
  1060. * wish to use this logic.
  1061. */
  1062. int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
  1063. {
  1064. unsigned long free, allowed;
  1065. vm_acct_memory(pages);
  1066. /*
  1067. * Sometimes we want to use more memory than we have
  1068. */
  1069. if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
  1070. return 0;
  1071. if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
  1072. unsigned long n;
  1073. free = global_page_state(NR_FILE_PAGES);
  1074. free += nr_swap_pages;
  1075. /*
  1076. * Any slabs which are created with the
  1077. * SLAB_RECLAIM_ACCOUNT flag claim to have contents
  1078. * which are reclaimable, under pressure. The dentry
  1079. * cache and most inode caches should fall into this
  1080. */
  1081. free += global_page_state(NR_SLAB_RECLAIMABLE);
  1082. /*
  1083. * Leave the last 3% for root
  1084. */
  1085. if (!cap_sys_admin)
  1086. free -= free / 32;
  1087. if (free > pages)
  1088. return 0;
  1089. /*
  1090. * nr_free_pages() is very expensive on large systems,
  1091. * only call if we're about to fail.
  1092. */
  1093. n = nr_free_pages();
  1094. /*
  1095. * Leave reserved pages. The pages are not for anonymous pages.
  1096. */
  1097. if (n <= totalreserve_pages)
  1098. goto error;
  1099. else
  1100. n -= totalreserve_pages;
  1101. /*
  1102. * Leave the last 3% for root
  1103. */
  1104. if (!cap_sys_admin)
  1105. n -= n / 32;
  1106. free += n;
  1107. if (free > pages)
  1108. return 0;
  1109. goto error;
  1110. }
  1111. allowed = totalram_pages * sysctl_overcommit_ratio / 100;
  1112. /*
  1113. * Leave the last 3% for root
  1114. */
  1115. if (!cap_sys_admin)
  1116. allowed -= allowed / 32;
  1117. allowed += total_swap_pages;
  1118. /* Don't let a single process grow too big:
  1119. leave 3% of the size of this process for other processes */
  1120. allowed -= current->mm->total_vm / 32;
  1121. /*
  1122. * cast `allowed' as a signed long because vm_committed_space
  1123. * sometimes has a negative value
  1124. */
  1125. if (atomic_read(&vm_committed_space) < (long)allowed)
  1126. return 0;
  1127. error:
  1128. vm_unacct_memory(pages);
  1129. return -ENOMEM;
  1130. }
  1131. int in_gate_area_no_task(unsigned long addr)
  1132. {
  1133. return 0;
  1134. }
  1135. int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1136. {
  1137. BUG();
  1138. return 0;
  1139. }
  1140. EXPORT_SYMBOL(filemap_fault);
  1141. /*
  1142. * Access another process' address space.
  1143. * - source/target buffer must be kernel space
  1144. */
  1145. int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
  1146. {
  1147. struct vm_area_struct *vma;
  1148. struct mm_struct *mm;
  1149. if (addr + len < addr)
  1150. return 0;
  1151. mm = get_task_mm(tsk);
  1152. if (!mm)
  1153. return 0;
  1154. down_read(&mm->mmap_sem);
  1155. /* the access must start within one of the target process's mappings */
  1156. vma = find_vma(mm, addr);
  1157. if (vma) {
  1158. /* don't overrun this mapping */
  1159. if (addr + len >= vma->vm_end)
  1160. len = vma->vm_end - addr;
  1161. /* only read or write mappings where it is permitted */
  1162. if (write && vma->vm_flags & VM_MAYWRITE)
  1163. len -= copy_to_user((void *) addr, buf, len);
  1164. else if (!write && vma->vm_flags & VM_MAYREAD)
  1165. len -= copy_from_user(buf, (void *) addr, len);
  1166. else
  1167. len = 0;
  1168. } else {
  1169. len = 0;
  1170. }
  1171. up_read(&mm->mmap_sem);
  1172. mmput(mm);
  1173. return len;
  1174. }