nommu.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274
  1. /*
  2. * linux/mm/nommu.c
  3. *
  4. * Replacement code for mm functions to support CPU's that don't
  5. * have any form of memory management unit (thus no virtual memory).
  6. *
  7. * See Documentation/nommu-mmap.txt
  8. *
  9. * Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
  10. * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11. * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12. * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/mman.h>
  16. #include <linux/swap.h>
  17. #include <linux/file.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/mount.h>
  26. #include <linux/personality.h>
  27. #include <linux/security.h>
  28. #include <linux/syscalls.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/tlb.h>
  31. #include <asm/tlbflush.h>
  32. void *high_memory;
  33. struct page *mem_map;
  34. unsigned long max_mapnr;
  35. unsigned long num_physpages;
  36. unsigned long askedalloc, realalloc;
  37. atomic_t vm_committed_space = ATOMIC_INIT(0);
  38. int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
  39. int sysctl_overcommit_ratio = 50; /* default is 50% */
  40. int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
  41. int heap_stack_gap = 0;
  42. EXPORT_SYMBOL(mem_map);
  43. EXPORT_SYMBOL(__vm_enough_memory);
  44. /* list of shareable VMAs */
  45. struct rb_root nommu_vma_tree = RB_ROOT;
  46. DECLARE_RWSEM(nommu_vma_sem);
  47. struct vm_operations_struct generic_file_vm_ops = {
  48. };
  49. EXPORT_SYMBOL(vfree);
  50. EXPORT_SYMBOL(vmalloc_to_page);
  51. EXPORT_SYMBOL(vmalloc_32);
  52. EXPORT_SYMBOL(vmap);
  53. EXPORT_SYMBOL(vunmap);
  54. /*
  55. * Handle all mappings that got truncated by a "truncate()"
  56. * system call.
  57. *
  58. * NOTE! We have to be ready to update the memory sharing
  59. * between the file and the memory map for a potential last
  60. * incomplete page. Ugly, but necessary.
  61. */
  62. int vmtruncate(struct inode *inode, loff_t offset)
  63. {
  64. struct address_space *mapping = inode->i_mapping;
  65. unsigned long limit;
  66. if (inode->i_size < offset)
  67. goto do_expand;
  68. i_size_write(inode, offset);
  69. truncate_inode_pages(mapping, offset);
  70. goto out_truncate;
  71. do_expand:
  72. limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
  73. if (limit != RLIM_INFINITY && offset > limit)
  74. goto out_sig;
  75. if (offset > inode->i_sb->s_maxbytes)
  76. goto out;
  77. i_size_write(inode, offset);
  78. out_truncate:
  79. if (inode->i_op && inode->i_op->truncate)
  80. inode->i_op->truncate(inode);
  81. return 0;
  82. out_sig:
  83. send_sig(SIGXFSZ, current, 0);
  84. out:
  85. return -EFBIG;
  86. }
  87. EXPORT_SYMBOL(vmtruncate);
  88. /*
  89. * Return the total memory allocated for this pointer, not
  90. * just what the caller asked for.
  91. *
  92. * Doesn't have to be accurate, i.e. may have races.
  93. */
  94. unsigned int kobjsize(const void *objp)
  95. {
  96. struct page *page;
  97. if (!objp || !((page = virt_to_page(objp))))
  98. return 0;
  99. if (PageSlab(page))
  100. return ksize(objp);
  101. BUG_ON(page->index < 0);
  102. BUG_ON(page->index >= MAX_ORDER);
  103. return (PAGE_SIZE << page->index);
  104. }
  105. /*
  106. * get a list of pages in an address range belonging to the specified process
  107. * and indicate the VMA that covers each page
  108. * - this is potentially dodgy as we may end incrementing the page count of a
  109. * slab page or a secondary page from a compound page
  110. * - don't permit access to VMAs that don't support it, such as I/O mappings
  111. */
  112. int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  113. unsigned long start, int len, int write, int force,
  114. struct page **pages, struct vm_area_struct **vmas)
  115. {
  116. struct vm_area_struct *vma;
  117. unsigned long vm_flags;
  118. int i;
  119. /* calculate required read or write permissions.
  120. * - if 'force' is set, we only require the "MAY" flags.
  121. */
  122. vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  123. vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
  124. for (i = 0; i < len; i++) {
  125. vma = find_vma(mm, start);
  126. if (!vma)
  127. goto finish_or_fault;
  128. /* protect what we can, including chardevs */
  129. if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
  130. !(vm_flags & vma->vm_flags))
  131. goto finish_or_fault;
  132. if (pages) {
  133. pages[i] = virt_to_page(start);
  134. if (pages[i])
  135. page_cache_get(pages[i]);
  136. }
  137. if (vmas)
  138. vmas[i] = vma;
  139. start += PAGE_SIZE;
  140. }
  141. return i;
  142. finish_or_fault:
  143. return i ? : -EFAULT;
  144. }
  145. EXPORT_SYMBOL(get_user_pages);
  146. DEFINE_RWLOCK(vmlist_lock);
  147. struct vm_struct *vmlist;
  148. void vfree(void *addr)
  149. {
  150. kfree(addr);
  151. }
  152. void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  153. {
  154. /*
  155. * kmalloc doesn't like __GFP_HIGHMEM for some reason
  156. */
  157. return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
  158. }
  159. struct page * vmalloc_to_page(void *addr)
  160. {
  161. return virt_to_page(addr);
  162. }
  163. unsigned long vmalloc_to_pfn(void *addr)
  164. {
  165. return page_to_pfn(virt_to_page(addr));
  166. }
  167. long vread(char *buf, char *addr, unsigned long count)
  168. {
  169. memcpy(buf, addr, count);
  170. return count;
  171. }
  172. long vwrite(char *buf, char *addr, unsigned long count)
  173. {
  174. /* Don't allow overflow */
  175. if ((unsigned long) addr + count < count)
  176. count = -(unsigned long) addr;
  177. memcpy(addr, buf, count);
  178. return(count);
  179. }
  180. /*
  181. * vmalloc - allocate virtually continguos memory
  182. *
  183. * @size: allocation size
  184. *
  185. * Allocate enough pages to cover @size from the page level
  186. * allocator and map them into continguos kernel virtual space.
  187. *
  188. * For tight cotrol over page level allocator and protection flags
  189. * use __vmalloc() instead.
  190. */
  191. void *vmalloc(unsigned long size)
  192. {
  193. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
  194. }
  195. EXPORT_SYMBOL(vmalloc);
  196. void *vmalloc_node(unsigned long size, int node)
  197. {
  198. return vmalloc(size);
  199. }
  200. EXPORT_SYMBOL(vmalloc_node);
  201. /*
  202. * vmalloc_32 - allocate virtually continguos memory (32bit addressable)
  203. *
  204. * @size: allocation size
  205. *
  206. * Allocate enough 32bit PA addressable pages to cover @size from the
  207. * page level allocator and map them into continguos kernel virtual space.
  208. */
  209. void *vmalloc_32(unsigned long size)
  210. {
  211. return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
  212. }
  213. void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
  214. {
  215. BUG();
  216. return NULL;
  217. }
  218. void vunmap(void *addr)
  219. {
  220. BUG();
  221. }
  222. /*
  223. * sys_brk() for the most part doesn't need the global kernel
  224. * lock, except when an application is doing something nasty
  225. * like trying to un-brk an area that has already been mapped
  226. * to a regular file. in this case, the unmapping will need
  227. * to invoke file system routines that need the global lock.
  228. */
  229. asmlinkage unsigned long sys_brk(unsigned long brk)
  230. {
  231. struct mm_struct *mm = current->mm;
  232. if (brk < mm->start_brk || brk > mm->context.end_brk)
  233. return mm->brk;
  234. if (mm->brk == brk)
  235. return mm->brk;
  236. /*
  237. * Always allow shrinking brk
  238. */
  239. if (brk <= mm->brk) {
  240. mm->brk = brk;
  241. return brk;
  242. }
  243. /*
  244. * Ok, looks good - let it rip.
  245. */
  246. return mm->brk = brk;
  247. }
  248. #ifdef DEBUG
  249. static void show_process_blocks(void)
  250. {
  251. struct vm_list_struct *vml;
  252. printk("Process blocks %d:", current->pid);
  253. for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
  254. printk(" %p: %p", vml, vml->vma);
  255. if (vml->vma)
  256. printk(" (%d @%lx #%d)",
  257. kobjsize((void *) vml->vma->vm_start),
  258. vml->vma->vm_start,
  259. atomic_read(&vml->vma->vm_usage));
  260. printk(vml->next ? " ->" : ".\n");
  261. }
  262. }
  263. #endif /* DEBUG */
  264. static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
  265. {
  266. struct vm_area_struct *vma;
  267. struct rb_node *n = nommu_vma_tree.rb_node;
  268. while (n) {
  269. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  270. if (start < vma->vm_start)
  271. n = n->rb_left;
  272. else if (start > vma->vm_start)
  273. n = n->rb_right;
  274. else
  275. return vma;
  276. }
  277. return NULL;
  278. }
  279. static void add_nommu_vma(struct vm_area_struct *vma)
  280. {
  281. struct vm_area_struct *pvma;
  282. struct address_space *mapping;
  283. struct rb_node **p = &nommu_vma_tree.rb_node;
  284. struct rb_node *parent = NULL;
  285. /* add the VMA to the mapping */
  286. if (vma->vm_file) {
  287. mapping = vma->vm_file->f_mapping;
  288. flush_dcache_mmap_lock(mapping);
  289. vma_prio_tree_insert(vma, &mapping->i_mmap);
  290. flush_dcache_mmap_unlock(mapping);
  291. }
  292. /* add the VMA to the master list */
  293. while (*p) {
  294. parent = *p;
  295. pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
  296. if (vma->vm_start < pvma->vm_start) {
  297. p = &(*p)->rb_left;
  298. }
  299. else if (vma->vm_start > pvma->vm_start) {
  300. p = &(*p)->rb_right;
  301. }
  302. else {
  303. /* mappings are at the same address - this can only
  304. * happen for shared-mem chardevs and shared file
  305. * mappings backed by ramfs/tmpfs */
  306. BUG_ON(!(pvma->vm_flags & VM_SHARED));
  307. if (vma < pvma)
  308. p = &(*p)->rb_left;
  309. else if (vma > pvma)
  310. p = &(*p)->rb_right;
  311. else
  312. BUG();
  313. }
  314. }
  315. rb_link_node(&vma->vm_rb, parent, p);
  316. rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
  317. }
  318. static void delete_nommu_vma(struct vm_area_struct *vma)
  319. {
  320. struct address_space *mapping;
  321. /* remove the VMA from the mapping */
  322. if (vma->vm_file) {
  323. mapping = vma->vm_file->f_mapping;
  324. flush_dcache_mmap_lock(mapping);
  325. vma_prio_tree_remove(vma, &mapping->i_mmap);
  326. flush_dcache_mmap_unlock(mapping);
  327. }
  328. /* remove from the master list */
  329. rb_erase(&vma->vm_rb, &nommu_vma_tree);
  330. }
  331. /*
  332. * determine whether a mapping should be permitted and, if so, what sort of
  333. * mapping we're capable of supporting
  334. */
  335. static int validate_mmap_request(struct file *file,
  336. unsigned long addr,
  337. unsigned long len,
  338. unsigned long prot,
  339. unsigned long flags,
  340. unsigned long pgoff,
  341. unsigned long *_capabilities)
  342. {
  343. unsigned long capabilities;
  344. unsigned long reqprot = prot;
  345. int ret;
  346. /* do the simple checks first */
  347. if (flags & MAP_FIXED || addr) {
  348. printk(KERN_DEBUG
  349. "%d: Can't do fixed-address/overlay mmap of RAM\n",
  350. current->pid);
  351. return -EINVAL;
  352. }
  353. if ((flags & MAP_TYPE) != MAP_PRIVATE &&
  354. (flags & MAP_TYPE) != MAP_SHARED)
  355. return -EINVAL;
  356. if (PAGE_ALIGN(len) == 0)
  357. return addr;
  358. if (len > TASK_SIZE)
  359. return -EINVAL;
  360. /* offset overflow? */
  361. if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
  362. return -EINVAL;
  363. if (file) {
  364. /* validate file mapping requests */
  365. struct address_space *mapping;
  366. /* files must support mmap */
  367. if (!file->f_op || !file->f_op->mmap)
  368. return -ENODEV;
  369. /* work out if what we've got could possibly be shared
  370. * - we support chardevs that provide their own "memory"
  371. * - we support files/blockdevs that are memory backed
  372. */
  373. mapping = file->f_mapping;
  374. if (!mapping)
  375. mapping = file->f_dentry->d_inode->i_mapping;
  376. capabilities = 0;
  377. if (mapping && mapping->backing_dev_info)
  378. capabilities = mapping->backing_dev_info->capabilities;
  379. if (!capabilities) {
  380. /* no explicit capabilities set, so assume some
  381. * defaults */
  382. switch (file->f_dentry->d_inode->i_mode & S_IFMT) {
  383. case S_IFREG:
  384. case S_IFBLK:
  385. capabilities = BDI_CAP_MAP_COPY;
  386. break;
  387. case S_IFCHR:
  388. capabilities =
  389. BDI_CAP_MAP_DIRECT |
  390. BDI_CAP_READ_MAP |
  391. BDI_CAP_WRITE_MAP;
  392. break;
  393. default:
  394. return -EINVAL;
  395. }
  396. }
  397. /* eliminate any capabilities that we can't support on this
  398. * device */
  399. if (!file->f_op->get_unmapped_area)
  400. capabilities &= ~BDI_CAP_MAP_DIRECT;
  401. if (!file->f_op->read)
  402. capabilities &= ~BDI_CAP_MAP_COPY;
  403. if (flags & MAP_SHARED) {
  404. /* do checks for writing, appending and locking */
  405. if ((prot & PROT_WRITE) &&
  406. !(file->f_mode & FMODE_WRITE))
  407. return -EACCES;
  408. if (IS_APPEND(file->f_dentry->d_inode) &&
  409. (file->f_mode & FMODE_WRITE))
  410. return -EACCES;
  411. if (locks_verify_locked(file->f_dentry->d_inode))
  412. return -EAGAIN;
  413. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  414. return -ENODEV;
  415. if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
  416. ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
  417. ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
  418. ) {
  419. printk("MAP_SHARED not completely supported on !MMU\n");
  420. return -EINVAL;
  421. }
  422. /* we mustn't privatise shared mappings */
  423. capabilities &= ~BDI_CAP_MAP_COPY;
  424. }
  425. else {
  426. /* we're going to read the file into private memory we
  427. * allocate */
  428. if (!(capabilities & BDI_CAP_MAP_COPY))
  429. return -ENODEV;
  430. /* we don't permit a private writable mapping to be
  431. * shared with the backing device */
  432. if (prot & PROT_WRITE)
  433. capabilities &= ~BDI_CAP_MAP_DIRECT;
  434. }
  435. /* handle executable mappings and implied executable
  436. * mappings */
  437. if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) {
  438. if (prot & PROT_EXEC)
  439. return -EPERM;
  440. }
  441. else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
  442. /* handle implication of PROT_EXEC by PROT_READ */
  443. if (current->personality & READ_IMPLIES_EXEC) {
  444. if (capabilities & BDI_CAP_EXEC_MAP)
  445. prot |= PROT_EXEC;
  446. }
  447. }
  448. else if ((prot & PROT_READ) &&
  449. (prot & PROT_EXEC) &&
  450. !(capabilities & BDI_CAP_EXEC_MAP)
  451. ) {
  452. /* backing file is not executable, try to copy */
  453. capabilities &= ~BDI_CAP_MAP_DIRECT;
  454. }
  455. }
  456. else {
  457. /* anonymous mappings are always memory backed and can be
  458. * privately mapped
  459. */
  460. capabilities = BDI_CAP_MAP_COPY;
  461. /* handle PROT_EXEC implication by PROT_READ */
  462. if ((prot & PROT_READ) &&
  463. (current->personality & READ_IMPLIES_EXEC))
  464. prot |= PROT_EXEC;
  465. }
  466. /* allow the security API to have its say */
  467. ret = security_file_mmap(file, reqprot, prot, flags);
  468. if (ret < 0)
  469. return ret;
  470. /* looks okay */
  471. *_capabilities = capabilities;
  472. return 0;
  473. }
  474. /*
  475. * we've determined that we can make the mapping, now translate what we
  476. * now know into VMA flags
  477. */
  478. static unsigned long determine_vm_flags(struct file *file,
  479. unsigned long prot,
  480. unsigned long flags,
  481. unsigned long capabilities)
  482. {
  483. unsigned long vm_flags;
  484. vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
  485. vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  486. /* vm_flags |= mm->def_flags; */
  487. if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
  488. /* attempt to share read-only copies of mapped file chunks */
  489. if (file && !(prot & PROT_WRITE))
  490. vm_flags |= VM_MAYSHARE;
  491. }
  492. else {
  493. /* overlay a shareable mapping on the backing device or inode
  494. * if possible - used for chardevs, ramfs/tmpfs/shmfs and
  495. * romfs/cramfs */
  496. if (flags & MAP_SHARED)
  497. vm_flags |= VM_MAYSHARE | VM_SHARED;
  498. else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
  499. vm_flags |= VM_MAYSHARE;
  500. }
  501. /* refuse to let anyone share private mappings with this process if
  502. * it's being traced - otherwise breakpoints set in it may interfere
  503. * with another untraced process
  504. */
  505. if ((flags & MAP_PRIVATE) && (current->ptrace & PT_PTRACED))
  506. vm_flags &= ~VM_MAYSHARE;
  507. return vm_flags;
  508. }
  509. /*
  510. * set up a shared mapping on a file
  511. */
  512. static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
  513. {
  514. int ret;
  515. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  516. if (ret != -ENOSYS)
  517. return ret;
  518. /* getting an ENOSYS error indicates that direct mmap isn't
  519. * possible (as opposed to tried but failed) so we'll fall
  520. * through to making a private copy of the data and mapping
  521. * that if we can */
  522. return -ENODEV;
  523. }
  524. /*
  525. * set up a private mapping or an anonymous shared mapping
  526. */
  527. static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
  528. {
  529. void *base;
  530. int ret;
  531. /* invoke the file's mapping function so that it can keep track of
  532. * shared mappings on devices or memory
  533. * - VM_MAYSHARE will be set if it may attempt to share
  534. */
  535. if (vma->vm_file) {
  536. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  537. if (ret != -ENOSYS) {
  538. /* shouldn't return success if we're not sharing */
  539. BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
  540. return ret; /* success or a real error */
  541. }
  542. /* getting an ENOSYS error indicates that direct mmap isn't
  543. * possible (as opposed to tried but failed) so we'll try to
  544. * make a private copy of the data and map that instead */
  545. }
  546. /* allocate some memory to hold the mapping
  547. * - note that this may not return a page-aligned address if the object
  548. * we're allocating is smaller than a page
  549. */
  550. base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
  551. if (!base)
  552. goto enomem;
  553. vma->vm_start = (unsigned long) base;
  554. vma->vm_end = vma->vm_start + len;
  555. vma->vm_flags |= VM_MAPPED_COPY;
  556. #ifdef WARN_ON_SLACK
  557. if (len + WARN_ON_SLACK <= kobjsize(result))
  558. printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
  559. len, current->pid, kobjsize(result) - len);
  560. #endif
  561. if (vma->vm_file) {
  562. /* read the contents of a file into the copy */
  563. mm_segment_t old_fs;
  564. loff_t fpos;
  565. fpos = vma->vm_pgoff;
  566. fpos <<= PAGE_SHIFT;
  567. old_fs = get_fs();
  568. set_fs(KERNEL_DS);
  569. ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
  570. set_fs(old_fs);
  571. if (ret < 0)
  572. goto error_free;
  573. /* clear the last little bit */
  574. if (ret < len)
  575. memset(base + ret, 0, len - ret);
  576. } else {
  577. /* if it's an anonymous mapping, then just clear it */
  578. memset(base, 0, len);
  579. }
  580. return 0;
  581. error_free:
  582. kfree(base);
  583. vma->vm_start = 0;
  584. return ret;
  585. enomem:
  586. printk("Allocation of length %lu from process %d failed\n",
  587. len, current->pid);
  588. show_free_areas();
  589. return -ENOMEM;
  590. }
  591. /*
  592. * handle mapping creation for uClinux
  593. */
  594. unsigned long do_mmap_pgoff(struct file *file,
  595. unsigned long addr,
  596. unsigned long len,
  597. unsigned long prot,
  598. unsigned long flags,
  599. unsigned long pgoff)
  600. {
  601. struct vm_list_struct *vml = NULL;
  602. struct vm_area_struct *vma = NULL;
  603. struct rb_node *rb;
  604. unsigned long capabilities, vm_flags;
  605. void *result;
  606. int ret;
  607. /* decide whether we should attempt the mapping, and if so what sort of
  608. * mapping */
  609. ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
  610. &capabilities);
  611. if (ret < 0)
  612. return ret;
  613. /* we've determined that we can make the mapping, now translate what we
  614. * now know into VMA flags */
  615. vm_flags = determine_vm_flags(file, prot, flags, capabilities);
  616. /* we're going to need to record the mapping if it works */
  617. vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
  618. if (!vml)
  619. goto error_getting_vml;
  620. memset(vml, 0, sizeof(*vml));
  621. down_write(&nommu_vma_sem);
  622. /* if we want to share, we need to check for VMAs created by other
  623. * mmap() calls that overlap with our proposed mapping
  624. * - we can only share with an exact match on most regular files
  625. * - shared mappings on character devices and memory backed files are
  626. * permitted to overlap inexactly as far as we are concerned for in
  627. * these cases, sharing is handled in the driver or filesystem rather
  628. * than here
  629. */
  630. if (vm_flags & VM_MAYSHARE) {
  631. unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  632. unsigned long vmpglen;
  633. for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
  634. vma = rb_entry(rb, struct vm_area_struct, vm_rb);
  635. if (!(vma->vm_flags & VM_MAYSHARE))
  636. continue;
  637. /* search for overlapping mappings on the same file */
  638. if (vma->vm_file->f_dentry->d_inode != file->f_dentry->d_inode)
  639. continue;
  640. if (vma->vm_pgoff >= pgoff + pglen)
  641. continue;
  642. vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
  643. vmpglen >>= PAGE_SHIFT;
  644. if (pgoff >= vma->vm_pgoff + vmpglen)
  645. continue;
  646. /* handle inexactly overlapping matches between mappings */
  647. if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
  648. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  649. goto sharing_violation;
  650. continue;
  651. }
  652. /* we've found a VMA we can share */
  653. atomic_inc(&vma->vm_usage);
  654. vml->vma = vma;
  655. result = (void *) vma->vm_start;
  656. goto shared;
  657. }
  658. vma = NULL;
  659. /* obtain the address at which to make a shared mapping
  660. * - this is the hook for quasi-memory character devices to
  661. * tell us the location of a shared mapping
  662. */
  663. if (file && file->f_op->get_unmapped_area) {
  664. addr = file->f_op->get_unmapped_area(file, addr, len,
  665. pgoff, flags);
  666. if (IS_ERR((void *) addr)) {
  667. ret = addr;
  668. if (ret != (unsigned long) -ENOSYS)
  669. goto error;
  670. /* the driver refused to tell us where to site
  671. * the mapping so we'll have to attempt to copy
  672. * it */
  673. ret = (unsigned long) -ENODEV;
  674. if (!(capabilities & BDI_CAP_MAP_COPY))
  675. goto error;
  676. capabilities &= ~BDI_CAP_MAP_DIRECT;
  677. }
  678. }
  679. }
  680. /* we're going to need a VMA struct as well */
  681. vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  682. if (!vma)
  683. goto error_getting_vma;
  684. memset(vma, 0, sizeof(*vma));
  685. INIT_LIST_HEAD(&vma->anon_vma_node);
  686. atomic_set(&vma->vm_usage, 1);
  687. if (file)
  688. get_file(file);
  689. vma->vm_file = file;
  690. vma->vm_flags = vm_flags;
  691. vma->vm_start = addr;
  692. vma->vm_end = addr + len;
  693. vma->vm_pgoff = pgoff;
  694. vml->vma = vma;
  695. /* set up the mapping */
  696. if (file && vma->vm_flags & VM_SHARED)
  697. ret = do_mmap_shared_file(vma, len);
  698. else
  699. ret = do_mmap_private(vma, len);
  700. if (ret < 0)
  701. goto error;
  702. /* okay... we have a mapping; now we have to register it */
  703. result = (void *) vma->vm_start;
  704. if (vma->vm_flags & VM_MAPPED_COPY) {
  705. realalloc += kobjsize(result);
  706. askedalloc += len;
  707. }
  708. realalloc += kobjsize(vma);
  709. askedalloc += sizeof(*vma);
  710. current->mm->total_vm += len >> PAGE_SHIFT;
  711. add_nommu_vma(vma);
  712. shared:
  713. realalloc += kobjsize(vml);
  714. askedalloc += sizeof(*vml);
  715. vml->next = current->mm->context.vmlist;
  716. current->mm->context.vmlist = vml;
  717. up_write(&nommu_vma_sem);
  718. if (prot & PROT_EXEC)
  719. flush_icache_range((unsigned long) result,
  720. (unsigned long) result + len);
  721. #ifdef DEBUG
  722. printk("do_mmap:\n");
  723. show_process_blocks();
  724. #endif
  725. return (unsigned long) result;
  726. error:
  727. up_write(&nommu_vma_sem);
  728. kfree(vml);
  729. if (vma) {
  730. fput(vma->vm_file);
  731. kfree(vma);
  732. }
  733. return ret;
  734. sharing_violation:
  735. up_write(&nommu_vma_sem);
  736. printk("Attempt to share mismatched mappings\n");
  737. kfree(vml);
  738. return -EINVAL;
  739. error_getting_vma:
  740. up_write(&nommu_vma_sem);
  741. kfree(vml);
  742. printk("Allocation of vma for %lu byte allocation from process %d failed\n",
  743. len, current->pid);
  744. show_free_areas();
  745. return -ENOMEM;
  746. error_getting_vml:
  747. printk("Allocation of vml for %lu byte allocation from process %d failed\n",
  748. len, current->pid);
  749. show_free_areas();
  750. return -ENOMEM;
  751. }
  752. /*
  753. * handle mapping disposal for uClinux
  754. */
  755. static void put_vma(struct vm_area_struct *vma)
  756. {
  757. if (vma) {
  758. down_write(&nommu_vma_sem);
  759. if (atomic_dec_and_test(&vma->vm_usage)) {
  760. delete_nommu_vma(vma);
  761. if (vma->vm_ops && vma->vm_ops->close)
  762. vma->vm_ops->close(vma);
  763. /* IO memory and memory shared directly out of the pagecache from
  764. * ramfs/tmpfs mustn't be released here */
  765. if (vma->vm_flags & VM_MAPPED_COPY) {
  766. realalloc -= kobjsize((void *) vma->vm_start);
  767. askedalloc -= vma->vm_end - vma->vm_start;
  768. kfree((void *) vma->vm_start);
  769. }
  770. realalloc -= kobjsize(vma);
  771. askedalloc -= sizeof(*vma);
  772. if (vma->vm_file)
  773. fput(vma->vm_file);
  774. kfree(vma);
  775. }
  776. up_write(&nommu_vma_sem);
  777. }
  778. }
  779. int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
  780. {
  781. struct vm_list_struct *vml, **parent;
  782. unsigned long end = addr + len;
  783. #ifdef DEBUG
  784. printk("do_munmap:\n");
  785. #endif
  786. for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next)
  787. if ((*parent)->vma->vm_start == addr &&
  788. ((len == 0) || ((*parent)->vma->vm_end == end)))
  789. goto found;
  790. printk("munmap of non-mmaped memory by process %d (%s): %p\n",
  791. current->pid, current->comm, (void *) addr);
  792. return -EINVAL;
  793. found:
  794. vml = *parent;
  795. put_vma(vml->vma);
  796. *parent = vml->next;
  797. realalloc -= kobjsize(vml);
  798. askedalloc -= sizeof(*vml);
  799. kfree(vml);
  800. update_hiwater_vm(mm);
  801. mm->total_vm -= len >> PAGE_SHIFT;
  802. #ifdef DEBUG
  803. show_process_blocks();
  804. #endif
  805. return 0;
  806. }
  807. /* Release all mmaps. */
  808. void exit_mmap(struct mm_struct * mm)
  809. {
  810. struct vm_list_struct *tmp;
  811. if (mm) {
  812. #ifdef DEBUG
  813. printk("Exit_mmap:\n");
  814. #endif
  815. mm->total_vm = 0;
  816. while ((tmp = mm->context.vmlist)) {
  817. mm->context.vmlist = tmp->next;
  818. put_vma(tmp->vma);
  819. realalloc -= kobjsize(tmp);
  820. askedalloc -= sizeof(*tmp);
  821. kfree(tmp);
  822. }
  823. #ifdef DEBUG
  824. show_process_blocks();
  825. #endif
  826. }
  827. }
  828. asmlinkage long sys_munmap(unsigned long addr, size_t len)
  829. {
  830. int ret;
  831. struct mm_struct *mm = current->mm;
  832. down_write(&mm->mmap_sem);
  833. ret = do_munmap(mm, addr, len);
  834. up_write(&mm->mmap_sem);
  835. return ret;
  836. }
  837. unsigned long do_brk(unsigned long addr, unsigned long len)
  838. {
  839. return -ENOMEM;
  840. }
  841. /*
  842. * Expand (or shrink) an existing mapping, potentially moving it at the
  843. * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
  844. *
  845. * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
  846. * This option implies MREMAP_MAYMOVE.
  847. *
  848. * on uClinux, we only permit changing a mapping's size, and only as long as it stays within the
  849. * hole allocated by the kmalloc() call in do_mmap_pgoff() and the block is not shareable
  850. */
  851. unsigned long do_mremap(unsigned long addr,
  852. unsigned long old_len, unsigned long new_len,
  853. unsigned long flags, unsigned long new_addr)
  854. {
  855. struct vm_list_struct *vml = NULL;
  856. /* insanity checks first */
  857. if (new_len == 0)
  858. return (unsigned long) -EINVAL;
  859. if (flags & MREMAP_FIXED && new_addr != addr)
  860. return (unsigned long) -EINVAL;
  861. for (vml = current->mm->context.vmlist; vml; vml = vml->next)
  862. if (vml->vma->vm_start == addr)
  863. goto found;
  864. return (unsigned long) -EINVAL;
  865. found:
  866. if (vml->vma->vm_end != vml->vma->vm_start + old_len)
  867. return (unsigned long) -EFAULT;
  868. if (vml->vma->vm_flags & VM_MAYSHARE)
  869. return (unsigned long) -EPERM;
  870. if (new_len > kobjsize((void *) addr))
  871. return (unsigned long) -ENOMEM;
  872. /* all checks complete - do it */
  873. vml->vma->vm_end = vml->vma->vm_start + new_len;
  874. askedalloc -= old_len;
  875. askedalloc += new_len;
  876. return vml->vma->vm_start;
  877. }
  878. /*
  879. * Look up the first VMA which satisfies addr < vm_end, NULL if none
  880. * - should be called with mm->mmap_sem at least readlocked
  881. */
  882. struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
  883. {
  884. struct vm_list_struct *vml;
  885. for (vml = mm->context.vmlist; vml; vml = vml->next)
  886. if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
  887. return vml->vma;
  888. return NULL;
  889. }
  890. EXPORT_SYMBOL(find_vma);
  891. struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
  892. unsigned int foll_flags)
  893. {
  894. return NULL;
  895. }
  896. struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
  897. {
  898. return NULL;
  899. }
  900. int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  901. unsigned long to, unsigned long size, pgprot_t prot)
  902. {
  903. vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
  904. return 0;
  905. }
  906. EXPORT_SYMBOL(remap_pfn_range);
  907. void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  908. {
  909. }
  910. unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
  911. unsigned long len, unsigned long pgoff, unsigned long flags)
  912. {
  913. return -ENOMEM;
  914. }
  915. void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
  916. {
  917. }
  918. void unmap_mapping_range(struct address_space *mapping,
  919. loff_t const holebegin, loff_t const holelen,
  920. int even_cows)
  921. {
  922. }
  923. EXPORT_SYMBOL(unmap_mapping_range);
  924. /*
  925. * Check that a process has enough memory to allocate a new virtual
  926. * mapping. 0 means there is enough memory for the allocation to
  927. * succeed and -ENOMEM implies there is not.
  928. *
  929. * We currently support three overcommit policies, which are set via the
  930. * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
  931. *
  932. * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
  933. * Additional code 2002 Jul 20 by Robert Love.
  934. *
  935. * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
  936. *
  937. * Note this is a helper function intended to be used by LSMs which
  938. * wish to use this logic.
  939. */
  940. int __vm_enough_memory(long pages, int cap_sys_admin)
  941. {
  942. unsigned long free, allowed;
  943. vm_acct_memory(pages);
  944. /*
  945. * Sometimes we want to use more memory than we have
  946. */
  947. if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
  948. return 0;
  949. if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
  950. unsigned long n;
  951. free = global_page_state(NR_FILE_PAGES);
  952. free += nr_swap_pages;
  953. /*
  954. * Any slabs which are created with the
  955. * SLAB_RECLAIM_ACCOUNT flag claim to have contents
  956. * which are reclaimable, under pressure. The dentry
  957. * cache and most inode caches should fall into this
  958. */
  959. free += global_page_state(NR_SLAB_RECLAIMABLE);
  960. /*
  961. * Leave the last 3% for root
  962. */
  963. if (!cap_sys_admin)
  964. free -= free / 32;
  965. if (free > pages)
  966. return 0;
  967. /*
  968. * nr_free_pages() is very expensive on large systems,
  969. * only call if we're about to fail.
  970. */
  971. n = nr_free_pages();
  972. /*
  973. * Leave reserved pages. The pages are not for anonymous pages.
  974. */
  975. if (n <= totalreserve_pages)
  976. goto error;
  977. else
  978. n -= totalreserve_pages;
  979. /*
  980. * Leave the last 3% for root
  981. */
  982. if (!cap_sys_admin)
  983. n -= n / 32;
  984. free += n;
  985. if (free > pages)
  986. return 0;
  987. goto error;
  988. }
  989. allowed = totalram_pages * sysctl_overcommit_ratio / 100;
  990. /*
  991. * Leave the last 3% for root
  992. */
  993. if (!cap_sys_admin)
  994. allowed -= allowed / 32;
  995. allowed += total_swap_pages;
  996. /* Don't let a single process grow too big:
  997. leave 3% of the size of this process for other processes */
  998. allowed -= current->mm->total_vm / 32;
  999. /*
  1000. * cast `allowed' as a signed long because vm_committed_space
  1001. * sometimes has a negative value
  1002. */
  1003. if (atomic_read(&vm_committed_space) < (long)allowed)
  1004. return 0;
  1005. error:
  1006. vm_unacct_memory(pages);
  1007. return -ENOMEM;
  1008. }
  1009. int in_gate_area_no_task(unsigned long addr)
  1010. {
  1011. return 0;
  1012. }
  1013. struct page *filemap_nopage(struct vm_area_struct *area,
  1014. unsigned long address, int *type)
  1015. {
  1016. BUG();
  1017. return NULL;
  1018. }
  1019. /*
  1020. * Access another process' address space.
  1021. * - source/target buffer must be kernel space
  1022. */
  1023. int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
  1024. {
  1025. struct vm_area_struct *vma;
  1026. struct mm_struct *mm;
  1027. if (addr + len < addr)
  1028. return 0;
  1029. mm = get_task_mm(tsk);
  1030. if (!mm)
  1031. return 0;
  1032. down_read(&mm->mmap_sem);
  1033. /* the access must start within one of the target process's mappings */
  1034. vma = find_vma(mm, addr);
  1035. if (vma) {
  1036. /* don't overrun this mapping */
  1037. if (addr + len >= vma->vm_end)
  1038. len = vma->vm_end - addr;
  1039. /* only read or write mappings where it is permitted */
  1040. if (write && vma->vm_flags & VM_WRITE)
  1041. len -= copy_to_user((void *) addr, buf, len);
  1042. else if (!write && vma->vm_flags & VM_READ)
  1043. len -= copy_from_user(buf, (void *) addr, len);
  1044. else
  1045. len = 0;
  1046. } else {
  1047. len = 0;
  1048. }
  1049. up_read(&mm->mmap_sem);
  1050. mmput(mm);
  1051. return len;
  1052. }