nommu.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193
  1. /*
  2. * linux/mm/nommu.c
  3. *
  4. * Replacement code for mm functions to support CPU's that don't
  5. * have any form of memory management unit (thus no virtual memory).
  6. *
  7. * See Documentation/nommu-mmap.txt
  8. *
  9. * Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
  10. * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11. * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12. * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
  13. */
  14. #include <linux/mm.h>
  15. #include <linux/mman.h>
  16. #include <linux/swap.h>
  17. #include <linux/file.h>
  18. #include <linux/highmem.h>
  19. #include <linux/pagemap.h>
  20. #include <linux/slab.h>
  21. #include <linux/vmalloc.h>
  22. #include <linux/ptrace.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/backing-dev.h>
  25. #include <linux/mount.h>
  26. #include <linux/personality.h>
  27. #include <linux/security.h>
  28. #include <linux/syscalls.h>
  29. #include <asm/uaccess.h>
  30. #include <asm/tlb.h>
  31. #include <asm/tlbflush.h>
  32. void *high_memory;
  33. struct page *mem_map;
  34. unsigned long max_mapnr;
  35. unsigned long num_physpages;
  36. unsigned long askedalloc, realalloc;
  37. atomic_t vm_committed_space = ATOMIC_INIT(0);
  38. int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
  39. int sysctl_overcommit_ratio = 50; /* default is 50% */
  40. int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
  41. int heap_stack_gap = 0;
  42. EXPORT_SYMBOL(mem_map);
  43. EXPORT_SYMBOL(sysctl_max_map_count);
  44. EXPORT_SYMBOL(sysctl_overcommit_memory);
  45. EXPORT_SYMBOL(sysctl_overcommit_ratio);
  46. EXPORT_SYMBOL(vm_committed_space);
  47. EXPORT_SYMBOL(__vm_enough_memory);
  48. /* list of shareable VMAs */
  49. struct rb_root nommu_vma_tree = RB_ROOT;
  50. DECLARE_RWSEM(nommu_vma_sem);
  51. struct vm_operations_struct generic_file_vm_ops = {
  52. };
  53. EXPORT_SYMBOL(vmalloc);
  54. EXPORT_SYMBOL(vfree);
  55. EXPORT_SYMBOL(vmalloc_to_page);
  56. EXPORT_SYMBOL(vmalloc_32);
  57. /*
  58. * Handle all mappings that got truncated by a "truncate()"
  59. * system call.
  60. *
  61. * NOTE! We have to be ready to update the memory sharing
  62. * between the file and the memory map for a potential last
  63. * incomplete page. Ugly, but necessary.
  64. */
  65. int vmtruncate(struct inode *inode, loff_t offset)
  66. {
  67. struct address_space *mapping = inode->i_mapping;
  68. unsigned long limit;
  69. if (inode->i_size < offset)
  70. goto do_expand;
  71. i_size_write(inode, offset);
  72. truncate_inode_pages(mapping, offset);
  73. goto out_truncate;
  74. do_expand:
  75. limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
  76. if (limit != RLIM_INFINITY && offset > limit)
  77. goto out_sig;
  78. if (offset > inode->i_sb->s_maxbytes)
  79. goto out;
  80. i_size_write(inode, offset);
  81. out_truncate:
  82. if (inode->i_op && inode->i_op->truncate)
  83. inode->i_op->truncate(inode);
  84. return 0;
  85. out_sig:
  86. send_sig(SIGXFSZ, current, 0);
  87. out:
  88. return -EFBIG;
  89. }
  90. EXPORT_SYMBOL(vmtruncate);
  91. /*
  92. * Return the total memory allocated for this pointer, not
  93. * just what the caller asked for.
  94. *
  95. * Doesn't have to be accurate, i.e. may have races.
  96. */
  97. unsigned int kobjsize(const void *objp)
  98. {
  99. struct page *page;
  100. if (!objp || !((page = virt_to_page(objp))))
  101. return 0;
  102. if (PageSlab(page))
  103. return ksize(objp);
  104. BUG_ON(page->index < 0);
  105. BUG_ON(page->index >= MAX_ORDER);
  106. return (PAGE_SIZE << page->index);
  107. }
  108. /*
  109. * The nommu dodgy version :-)
  110. */
  111. int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  112. unsigned long start, int len, int write, int force,
  113. struct page **pages, struct vm_area_struct **vmas)
  114. {
  115. int i;
  116. static struct vm_area_struct dummy_vma;
  117. for (i = 0; i < len; i++) {
  118. if (pages) {
  119. pages[i] = virt_to_page(start);
  120. if (pages[i])
  121. page_cache_get(pages[i]);
  122. }
  123. if (vmas)
  124. vmas[i] = &dummy_vma;
  125. start += PAGE_SIZE;
  126. }
  127. return(i);
  128. }
  129. EXPORT_SYMBOL(get_user_pages);
  130. DEFINE_RWLOCK(vmlist_lock);
  131. struct vm_struct *vmlist;
  132. void vfree(void *addr)
  133. {
  134. kfree(addr);
  135. }
  136. void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  137. {
  138. /*
  139. * kmalloc doesn't like __GFP_HIGHMEM for some reason
  140. */
  141. return kmalloc(size, gfp_mask & ~__GFP_HIGHMEM);
  142. }
  143. struct page * vmalloc_to_page(void *addr)
  144. {
  145. return virt_to_page(addr);
  146. }
  147. unsigned long vmalloc_to_pfn(void *addr)
  148. {
  149. return page_to_pfn(virt_to_page(addr));
  150. }
  151. long vread(char *buf, char *addr, unsigned long count)
  152. {
  153. memcpy(buf, addr, count);
  154. return count;
  155. }
  156. long vwrite(char *buf, char *addr, unsigned long count)
  157. {
  158. /* Don't allow overflow */
  159. if ((unsigned long) addr + count < count)
  160. count = -(unsigned long) addr;
  161. memcpy(addr, buf, count);
  162. return(count);
  163. }
  164. /*
  165. * vmalloc - allocate virtually continguos memory
  166. *
  167. * @size: allocation size
  168. *
  169. * Allocate enough pages to cover @size from the page level
  170. * allocator and map them into continguos kernel virtual space.
  171. *
  172. * For tight cotrol over page level allocator and protection flags
  173. * use __vmalloc() instead.
  174. */
  175. void *vmalloc(unsigned long size)
  176. {
  177. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
  178. }
  179. /*
  180. * vmalloc_32 - allocate virtually continguos memory (32bit addressable)
  181. *
  182. * @size: allocation size
  183. *
  184. * Allocate enough 32bit PA addressable pages to cover @size from the
  185. * page level allocator and map them into continguos kernel virtual space.
  186. */
  187. void *vmalloc_32(unsigned long size)
  188. {
  189. return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
  190. }
  191. void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
  192. {
  193. BUG();
  194. return NULL;
  195. }
  196. void vunmap(void *addr)
  197. {
  198. BUG();
  199. }
  200. /*
  201. * sys_brk() for the most part doesn't need the global kernel
  202. * lock, except when an application is doing something nasty
  203. * like trying to un-brk an area that has already been mapped
  204. * to a regular file. in this case, the unmapping will need
  205. * to invoke file system routines that need the global lock.
  206. */
  207. asmlinkage unsigned long sys_brk(unsigned long brk)
  208. {
  209. struct mm_struct *mm = current->mm;
  210. if (brk < mm->start_brk || brk > mm->context.end_brk)
  211. return mm->brk;
  212. if (mm->brk == brk)
  213. return mm->brk;
  214. /*
  215. * Always allow shrinking brk
  216. */
  217. if (brk <= mm->brk) {
  218. mm->brk = brk;
  219. return brk;
  220. }
  221. /*
  222. * Ok, looks good - let it rip.
  223. */
  224. return mm->brk = brk;
  225. }
  226. #ifdef DEBUG
  227. static void show_process_blocks(void)
  228. {
  229. struct vm_list_struct *vml;
  230. printk("Process blocks %d:", current->pid);
  231. for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
  232. printk(" %p: %p", vml, vml->vma);
  233. if (vml->vma)
  234. printk(" (%d @%lx #%d)",
  235. kobjsize((void *) vml->vma->vm_start),
  236. vml->vma->vm_start,
  237. atomic_read(&vml->vma->vm_usage));
  238. printk(vml->next ? " ->" : ".\n");
  239. }
  240. }
  241. #endif /* DEBUG */
  242. static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
  243. {
  244. struct vm_area_struct *vma;
  245. struct rb_node *n = nommu_vma_tree.rb_node;
  246. while (n) {
  247. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  248. if (start < vma->vm_start)
  249. n = n->rb_left;
  250. else if (start > vma->vm_start)
  251. n = n->rb_right;
  252. else
  253. return vma;
  254. }
  255. return NULL;
  256. }
  257. static void add_nommu_vma(struct vm_area_struct *vma)
  258. {
  259. struct vm_area_struct *pvma;
  260. struct address_space *mapping;
  261. struct rb_node **p = &nommu_vma_tree.rb_node;
  262. struct rb_node *parent = NULL;
  263. /* add the VMA to the mapping */
  264. if (vma->vm_file) {
  265. mapping = vma->vm_file->f_mapping;
  266. flush_dcache_mmap_lock(mapping);
  267. vma_prio_tree_insert(vma, &mapping->i_mmap);
  268. flush_dcache_mmap_unlock(mapping);
  269. }
  270. /* add the VMA to the master list */
  271. while (*p) {
  272. parent = *p;
  273. pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
  274. if (vma->vm_start < pvma->vm_start) {
  275. p = &(*p)->rb_left;
  276. }
  277. else if (vma->vm_start > pvma->vm_start) {
  278. p = &(*p)->rb_right;
  279. }
  280. else {
  281. /* mappings are at the same address - this can only
  282. * happen for shared-mem chardevs and shared file
  283. * mappings backed by ramfs/tmpfs */
  284. BUG_ON(!(pvma->vm_flags & VM_SHARED));
  285. if (vma < pvma)
  286. p = &(*p)->rb_left;
  287. else if (vma > pvma)
  288. p = &(*p)->rb_right;
  289. else
  290. BUG();
  291. }
  292. }
  293. rb_link_node(&vma->vm_rb, parent, p);
  294. rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
  295. }
  296. static void delete_nommu_vma(struct vm_area_struct *vma)
  297. {
  298. struct address_space *mapping;
  299. /* remove the VMA from the mapping */
  300. if (vma->vm_file) {
  301. mapping = vma->vm_file->f_mapping;
  302. flush_dcache_mmap_lock(mapping);
  303. vma_prio_tree_remove(vma, &mapping->i_mmap);
  304. flush_dcache_mmap_unlock(mapping);
  305. }
  306. /* remove from the master list */
  307. rb_erase(&vma->vm_rb, &nommu_vma_tree);
  308. }
  309. /*
  310. * determine whether a mapping should be permitted and, if so, what sort of
  311. * mapping we're capable of supporting
  312. */
  313. static int validate_mmap_request(struct file *file,
  314. unsigned long addr,
  315. unsigned long len,
  316. unsigned long prot,
  317. unsigned long flags,
  318. unsigned long pgoff,
  319. unsigned long *_capabilities)
  320. {
  321. unsigned long capabilities;
  322. unsigned long reqprot = prot;
  323. int ret;
  324. /* do the simple checks first */
  325. if (flags & MAP_FIXED || addr) {
  326. printk(KERN_DEBUG
  327. "%d: Can't do fixed-address/overlay mmap of RAM\n",
  328. current->pid);
  329. return -EINVAL;
  330. }
  331. if ((flags & MAP_TYPE) != MAP_PRIVATE &&
  332. (flags & MAP_TYPE) != MAP_SHARED)
  333. return -EINVAL;
  334. if (PAGE_ALIGN(len) == 0)
  335. return addr;
  336. if (len > TASK_SIZE)
  337. return -EINVAL;
  338. /* offset overflow? */
  339. if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
  340. return -EINVAL;
  341. if (file) {
  342. /* validate file mapping requests */
  343. struct address_space *mapping;
  344. /* files must support mmap */
  345. if (!file->f_op || !file->f_op->mmap)
  346. return -ENODEV;
  347. /* work out if what we've got could possibly be shared
  348. * - we support chardevs that provide their own "memory"
  349. * - we support files/blockdevs that are memory backed
  350. */
  351. mapping = file->f_mapping;
  352. if (!mapping)
  353. mapping = file->f_dentry->d_inode->i_mapping;
  354. capabilities = 0;
  355. if (mapping && mapping->backing_dev_info)
  356. capabilities = mapping->backing_dev_info->capabilities;
  357. if (!capabilities) {
  358. /* no explicit capabilities set, so assume some
  359. * defaults */
  360. switch (file->f_dentry->d_inode->i_mode & S_IFMT) {
  361. case S_IFREG:
  362. case S_IFBLK:
  363. capabilities = BDI_CAP_MAP_COPY;
  364. break;
  365. case S_IFCHR:
  366. capabilities =
  367. BDI_CAP_MAP_DIRECT |
  368. BDI_CAP_READ_MAP |
  369. BDI_CAP_WRITE_MAP;
  370. break;
  371. default:
  372. return -EINVAL;
  373. }
  374. }
  375. /* eliminate any capabilities that we can't support on this
  376. * device */
  377. if (!file->f_op->get_unmapped_area)
  378. capabilities &= ~BDI_CAP_MAP_DIRECT;
  379. if (!file->f_op->read)
  380. capabilities &= ~BDI_CAP_MAP_COPY;
  381. if (flags & MAP_SHARED) {
  382. /* do checks for writing, appending and locking */
  383. if ((prot & PROT_WRITE) &&
  384. !(file->f_mode & FMODE_WRITE))
  385. return -EACCES;
  386. if (IS_APPEND(file->f_dentry->d_inode) &&
  387. (file->f_mode & FMODE_WRITE))
  388. return -EACCES;
  389. if (locks_verify_locked(file->f_dentry->d_inode))
  390. return -EAGAIN;
  391. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  392. return -ENODEV;
  393. if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
  394. ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
  395. ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
  396. ) {
  397. printk("MAP_SHARED not completely supported on !MMU\n");
  398. return -EINVAL;
  399. }
  400. /* we mustn't privatise shared mappings */
  401. capabilities &= ~BDI_CAP_MAP_COPY;
  402. }
  403. else {
  404. /* we're going to read the file into private memory we
  405. * allocate */
  406. if (!(capabilities & BDI_CAP_MAP_COPY))
  407. return -ENODEV;
  408. /* we don't permit a private writable mapping to be
  409. * shared with the backing device */
  410. if (prot & PROT_WRITE)
  411. capabilities &= ~BDI_CAP_MAP_DIRECT;
  412. }
  413. /* handle executable mappings and implied executable
  414. * mappings */
  415. if (file->f_vfsmnt->mnt_flags & MNT_NOEXEC) {
  416. if (prot & PROT_EXEC)
  417. return -EPERM;
  418. }
  419. else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
  420. /* handle implication of PROT_EXEC by PROT_READ */
  421. if (current->personality & READ_IMPLIES_EXEC) {
  422. if (capabilities & BDI_CAP_EXEC_MAP)
  423. prot |= PROT_EXEC;
  424. }
  425. }
  426. else if ((prot & PROT_READ) &&
  427. (prot & PROT_EXEC) &&
  428. !(capabilities & BDI_CAP_EXEC_MAP)
  429. ) {
  430. /* backing file is not executable, try to copy */
  431. capabilities &= ~BDI_CAP_MAP_DIRECT;
  432. }
  433. }
  434. else {
  435. /* anonymous mappings are always memory backed and can be
  436. * privately mapped
  437. */
  438. capabilities = BDI_CAP_MAP_COPY;
  439. /* handle PROT_EXEC implication by PROT_READ */
  440. if ((prot & PROT_READ) &&
  441. (current->personality & READ_IMPLIES_EXEC))
  442. prot |= PROT_EXEC;
  443. }
  444. /* allow the security API to have its say */
  445. ret = security_file_mmap(file, reqprot, prot, flags);
  446. if (ret < 0)
  447. return ret;
  448. /* looks okay */
  449. *_capabilities = capabilities;
  450. return 0;
  451. }
  452. /*
  453. * we've determined that we can make the mapping, now translate what we
  454. * now know into VMA flags
  455. */
  456. static unsigned long determine_vm_flags(struct file *file,
  457. unsigned long prot,
  458. unsigned long flags,
  459. unsigned long capabilities)
  460. {
  461. unsigned long vm_flags;
  462. vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
  463. vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  464. /* vm_flags |= mm->def_flags; */
  465. if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
  466. /* attempt to share read-only copies of mapped file chunks */
  467. if (file && !(prot & PROT_WRITE))
  468. vm_flags |= VM_MAYSHARE;
  469. }
  470. else {
  471. /* overlay a shareable mapping on the backing device or inode
  472. * if possible - used for chardevs, ramfs/tmpfs/shmfs and
  473. * romfs/cramfs */
  474. if (flags & MAP_SHARED)
  475. vm_flags |= VM_MAYSHARE | VM_SHARED;
  476. else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
  477. vm_flags |= VM_MAYSHARE;
  478. }
  479. /* refuse to let anyone share private mappings with this process if
  480. * it's being traced - otherwise breakpoints set in it may interfere
  481. * with another untraced process
  482. */
  483. if ((flags & MAP_PRIVATE) && (current->ptrace & PT_PTRACED))
  484. vm_flags &= ~VM_MAYSHARE;
  485. return vm_flags;
  486. }
  487. /*
  488. * set up a shared mapping on a file
  489. */
  490. static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
  491. {
  492. int ret;
  493. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  494. if (ret != -ENOSYS)
  495. return ret;
  496. /* getting an ENOSYS error indicates that direct mmap isn't
  497. * possible (as opposed to tried but failed) so we'll fall
  498. * through to making a private copy of the data and mapping
  499. * that if we can */
  500. return -ENODEV;
  501. }
  502. /*
  503. * set up a private mapping or an anonymous shared mapping
  504. */
  505. static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
  506. {
  507. void *base;
  508. int ret;
  509. /* invoke the file's mapping function so that it can keep track of
  510. * shared mappings on devices or memory
  511. * - VM_MAYSHARE will be set if it may attempt to share
  512. */
  513. if (vma->vm_file) {
  514. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  515. if (ret != -ENOSYS) {
  516. /* shouldn't return success if we're not sharing */
  517. BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
  518. return ret; /* success or a real error */
  519. }
  520. /* getting an ENOSYS error indicates that direct mmap isn't
  521. * possible (as opposed to tried but failed) so we'll try to
  522. * make a private copy of the data and map that instead */
  523. }
  524. /* allocate some memory to hold the mapping
  525. * - note that this may not return a page-aligned address if the object
  526. * we're allocating is smaller than a page
  527. */
  528. base = kmalloc(len, GFP_KERNEL);
  529. if (!base)
  530. goto enomem;
  531. vma->vm_start = (unsigned long) base;
  532. vma->vm_end = vma->vm_start + len;
  533. vma->vm_flags |= VM_MAPPED_COPY;
  534. #ifdef WARN_ON_SLACK
  535. if (len + WARN_ON_SLACK <= kobjsize(result))
  536. printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
  537. len, current->pid, kobjsize(result) - len);
  538. #endif
  539. if (vma->vm_file) {
  540. /* read the contents of a file into the copy */
  541. mm_segment_t old_fs;
  542. loff_t fpos;
  543. fpos = vma->vm_pgoff;
  544. fpos <<= PAGE_SHIFT;
  545. old_fs = get_fs();
  546. set_fs(KERNEL_DS);
  547. ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
  548. set_fs(old_fs);
  549. if (ret < 0)
  550. goto error_free;
  551. /* clear the last little bit */
  552. if (ret < len)
  553. memset(base + ret, 0, len - ret);
  554. } else {
  555. /* if it's an anonymous mapping, then just clear it */
  556. memset(base, 0, len);
  557. }
  558. return 0;
  559. error_free:
  560. kfree(base);
  561. vma->vm_start = 0;
  562. return ret;
  563. enomem:
  564. printk("Allocation of length %lu from process %d failed\n",
  565. len, current->pid);
  566. show_free_areas();
  567. return -ENOMEM;
  568. }
  569. /*
  570. * handle mapping creation for uClinux
  571. */
  572. unsigned long do_mmap_pgoff(struct file *file,
  573. unsigned long addr,
  574. unsigned long len,
  575. unsigned long prot,
  576. unsigned long flags,
  577. unsigned long pgoff)
  578. {
  579. struct vm_list_struct *vml = NULL;
  580. struct vm_area_struct *vma = NULL;
  581. struct rb_node *rb;
  582. unsigned long capabilities, vm_flags;
  583. void *result;
  584. int ret;
  585. /* decide whether we should attempt the mapping, and if so what sort of
  586. * mapping */
  587. ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
  588. &capabilities);
  589. if (ret < 0)
  590. return ret;
  591. /* we've determined that we can make the mapping, now translate what we
  592. * now know into VMA flags */
  593. vm_flags = determine_vm_flags(file, prot, flags, capabilities);
  594. /* we're going to need to record the mapping if it works */
  595. vml = kmalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
  596. if (!vml)
  597. goto error_getting_vml;
  598. memset(vml, 0, sizeof(*vml));
  599. down_write(&nommu_vma_sem);
  600. /* if we want to share, we need to check for VMAs created by other
  601. * mmap() calls that overlap with our proposed mapping
  602. * - we can only share with an exact match on most regular files
  603. * - shared mappings on character devices and memory backed files are
  604. * permitted to overlap inexactly as far as we are concerned for in
  605. * these cases, sharing is handled in the driver or filesystem rather
  606. * than here
  607. */
  608. if (vm_flags & VM_MAYSHARE) {
  609. unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  610. unsigned long vmpglen;
  611. for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
  612. vma = rb_entry(rb, struct vm_area_struct, vm_rb);
  613. if (!(vma->vm_flags & VM_MAYSHARE))
  614. continue;
  615. /* search for overlapping mappings on the same file */
  616. if (vma->vm_file->f_dentry->d_inode != file->f_dentry->d_inode)
  617. continue;
  618. if (vma->vm_pgoff >= pgoff + pglen)
  619. continue;
  620. vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
  621. vmpglen >>= PAGE_SHIFT;
  622. if (pgoff >= vma->vm_pgoff + vmpglen)
  623. continue;
  624. /* handle inexactly overlapping matches between mappings */
  625. if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
  626. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  627. goto sharing_violation;
  628. continue;
  629. }
  630. /* we've found a VMA we can share */
  631. atomic_inc(&vma->vm_usage);
  632. vml->vma = vma;
  633. result = (void *) vma->vm_start;
  634. goto shared;
  635. }
  636. vma = NULL;
  637. /* obtain the address at which to make a shared mapping
  638. * - this is the hook for quasi-memory character devices to
  639. * tell us the location of a shared mapping
  640. */
  641. if (file && file->f_op->get_unmapped_area) {
  642. addr = file->f_op->get_unmapped_area(file, addr, len,
  643. pgoff, flags);
  644. if (IS_ERR((void *) addr)) {
  645. ret = addr;
  646. if (ret != (unsigned long) -ENOSYS)
  647. goto error;
  648. /* the driver refused to tell us where to site
  649. * the mapping so we'll have to attempt to copy
  650. * it */
  651. ret = (unsigned long) -ENODEV;
  652. if (!(capabilities & BDI_CAP_MAP_COPY))
  653. goto error;
  654. capabilities &= ~BDI_CAP_MAP_DIRECT;
  655. }
  656. }
  657. }
  658. /* we're going to need a VMA struct as well */
  659. vma = kmalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  660. if (!vma)
  661. goto error_getting_vma;
  662. memset(vma, 0, sizeof(*vma));
  663. INIT_LIST_HEAD(&vma->anon_vma_node);
  664. atomic_set(&vma->vm_usage, 1);
  665. if (file)
  666. get_file(file);
  667. vma->vm_file = file;
  668. vma->vm_flags = vm_flags;
  669. vma->vm_start = addr;
  670. vma->vm_end = addr + len;
  671. vma->vm_pgoff = pgoff;
  672. vml->vma = vma;
  673. /* set up the mapping */
  674. if (file && vma->vm_flags & VM_SHARED)
  675. ret = do_mmap_shared_file(vma, len);
  676. else
  677. ret = do_mmap_private(vma, len);
  678. if (ret < 0)
  679. goto error;
  680. /* okay... we have a mapping; now we have to register it */
  681. result = (void *) vma->vm_start;
  682. if (vma->vm_flags & VM_MAPPED_COPY) {
  683. realalloc += kobjsize(result);
  684. askedalloc += len;
  685. }
  686. realalloc += kobjsize(vma);
  687. askedalloc += sizeof(*vma);
  688. current->mm->total_vm += len >> PAGE_SHIFT;
  689. add_nommu_vma(vma);
  690. shared:
  691. realalloc += kobjsize(vml);
  692. askedalloc += sizeof(*vml);
  693. vml->next = current->mm->context.vmlist;
  694. current->mm->context.vmlist = vml;
  695. up_write(&nommu_vma_sem);
  696. if (prot & PROT_EXEC)
  697. flush_icache_range((unsigned long) result,
  698. (unsigned long) result + len);
  699. #ifdef DEBUG
  700. printk("do_mmap:\n");
  701. show_process_blocks();
  702. #endif
  703. return (unsigned long) result;
  704. error:
  705. up_write(&nommu_vma_sem);
  706. kfree(vml);
  707. if (vma) {
  708. fput(vma->vm_file);
  709. kfree(vma);
  710. }
  711. return ret;
  712. sharing_violation:
  713. up_write(&nommu_vma_sem);
  714. printk("Attempt to share mismatched mappings\n");
  715. kfree(vml);
  716. return -EINVAL;
  717. error_getting_vma:
  718. up_write(&nommu_vma_sem);
  719. kfree(vml);
  720. printk("Allocation of vma for %lu byte allocation from process %d failed\n",
  721. len, current->pid);
  722. show_free_areas();
  723. return -ENOMEM;
  724. error_getting_vml:
  725. printk("Allocation of vml for %lu byte allocation from process %d failed\n",
  726. len, current->pid);
  727. show_free_areas();
  728. return -ENOMEM;
  729. }
  730. /*
  731. * handle mapping disposal for uClinux
  732. */
  733. static void put_vma(struct vm_area_struct *vma)
  734. {
  735. if (vma) {
  736. down_write(&nommu_vma_sem);
  737. if (atomic_dec_and_test(&vma->vm_usage)) {
  738. delete_nommu_vma(vma);
  739. if (vma->vm_ops && vma->vm_ops->close)
  740. vma->vm_ops->close(vma);
  741. /* IO memory and memory shared directly out of the pagecache from
  742. * ramfs/tmpfs mustn't be released here */
  743. if (vma->vm_flags & VM_MAPPED_COPY) {
  744. realalloc -= kobjsize((void *) vma->vm_start);
  745. askedalloc -= vma->vm_end - vma->vm_start;
  746. kfree((void *) vma->vm_start);
  747. }
  748. realalloc -= kobjsize(vma);
  749. askedalloc -= sizeof(*vma);
  750. if (vma->vm_file)
  751. fput(vma->vm_file);
  752. kfree(vma);
  753. }
  754. up_write(&nommu_vma_sem);
  755. }
  756. }
  757. int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
  758. {
  759. struct vm_list_struct *vml, **parent;
  760. unsigned long end = addr + len;
  761. #ifdef DEBUG
  762. printk("do_munmap:\n");
  763. #endif
  764. for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next)
  765. if ((*parent)->vma->vm_start == addr &&
  766. ((len == 0) || ((*parent)->vma->vm_end == end)))
  767. goto found;
  768. printk("munmap of non-mmaped memory by process %d (%s): %p\n",
  769. current->pid, current->comm, (void *) addr);
  770. return -EINVAL;
  771. found:
  772. vml = *parent;
  773. put_vma(vml->vma);
  774. *parent = vml->next;
  775. realalloc -= kobjsize(vml);
  776. askedalloc -= sizeof(*vml);
  777. kfree(vml);
  778. mm->total_vm -= len >> PAGE_SHIFT;
  779. #ifdef DEBUG
  780. show_process_blocks();
  781. #endif
  782. return 0;
  783. }
  784. /* Release all mmaps. */
  785. void exit_mmap(struct mm_struct * mm)
  786. {
  787. struct vm_list_struct *tmp;
  788. if (mm) {
  789. #ifdef DEBUG
  790. printk("Exit_mmap:\n");
  791. #endif
  792. mm->total_vm = 0;
  793. while ((tmp = mm->context.vmlist)) {
  794. mm->context.vmlist = tmp->next;
  795. put_vma(tmp->vma);
  796. realalloc -= kobjsize(tmp);
  797. askedalloc -= sizeof(*tmp);
  798. kfree(tmp);
  799. }
  800. #ifdef DEBUG
  801. show_process_blocks();
  802. #endif
  803. }
  804. }
  805. asmlinkage long sys_munmap(unsigned long addr, size_t len)
  806. {
  807. int ret;
  808. struct mm_struct *mm = current->mm;
  809. down_write(&mm->mmap_sem);
  810. ret = do_munmap(mm, addr, len);
  811. up_write(&mm->mmap_sem);
  812. return ret;
  813. }
  814. unsigned long do_brk(unsigned long addr, unsigned long len)
  815. {
  816. return -ENOMEM;
  817. }
  818. /*
  819. * Expand (or shrink) an existing mapping, potentially moving it at the
  820. * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
  821. *
  822. * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
  823. * This option implies MREMAP_MAYMOVE.
  824. *
  825. * on uClinux, we only permit changing a mapping's size, and only as long as it stays within the
  826. * hole allocated by the kmalloc() call in do_mmap_pgoff() and the block is not shareable
  827. */
  828. unsigned long do_mremap(unsigned long addr,
  829. unsigned long old_len, unsigned long new_len,
  830. unsigned long flags, unsigned long new_addr)
  831. {
  832. struct vm_list_struct *vml = NULL;
  833. /* insanity checks first */
  834. if (new_len == 0)
  835. return (unsigned long) -EINVAL;
  836. if (flags & MREMAP_FIXED && new_addr != addr)
  837. return (unsigned long) -EINVAL;
  838. for (vml = current->mm->context.vmlist; vml; vml = vml->next)
  839. if (vml->vma->vm_start == addr)
  840. goto found;
  841. return (unsigned long) -EINVAL;
  842. found:
  843. if (vml->vma->vm_end != vml->vma->vm_start + old_len)
  844. return (unsigned long) -EFAULT;
  845. if (vml->vma->vm_flags & VM_MAYSHARE)
  846. return (unsigned long) -EPERM;
  847. if (new_len > kobjsize((void *) addr))
  848. return (unsigned long) -ENOMEM;
  849. /* all checks complete - do it */
  850. vml->vma->vm_end = vml->vma->vm_start + new_len;
  851. askedalloc -= old_len;
  852. askedalloc += new_len;
  853. return vml->vma->vm_start;
  854. }
  855. /*
  856. * Look up the first VMA which satisfies addr < vm_end, NULL if none
  857. */
  858. struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
  859. {
  860. struct vm_list_struct *vml;
  861. for (vml = mm->context.vmlist; vml; vml = vml->next)
  862. if (addr >= vml->vma->vm_start && addr < vml->vma->vm_end)
  863. return vml->vma;
  864. return NULL;
  865. }
  866. EXPORT_SYMBOL(find_vma);
  867. struct page * follow_page(struct mm_struct *mm, unsigned long addr, int write)
  868. {
  869. return NULL;
  870. }
  871. struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
  872. {
  873. return NULL;
  874. }
  875. int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  876. unsigned long to, unsigned long size, pgprot_t prot)
  877. {
  878. vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
  879. return 0;
  880. }
  881. void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  882. {
  883. }
  884. unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
  885. unsigned long len, unsigned long pgoff, unsigned long flags)
  886. {
  887. return -ENOMEM;
  888. }
  889. void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
  890. {
  891. }
  892. void update_mem_hiwater(struct task_struct *tsk)
  893. {
  894. unsigned long rss;
  895. if (likely(tsk->mm)) {
  896. rss = get_mm_counter(tsk->mm, rss);
  897. if (tsk->mm->hiwater_rss < rss)
  898. tsk->mm->hiwater_rss = rss;
  899. if (tsk->mm->hiwater_vm < tsk->mm->total_vm)
  900. tsk->mm->hiwater_vm = tsk->mm->total_vm;
  901. }
  902. }
  903. void unmap_mapping_range(struct address_space *mapping,
  904. loff_t const holebegin, loff_t const holelen,
  905. int even_cows)
  906. {
  907. }
  908. /*
  909. * Check that a process has enough memory to allocate a new virtual
  910. * mapping. 0 means there is enough memory for the allocation to
  911. * succeed and -ENOMEM implies there is not.
  912. *
  913. * We currently support three overcommit policies, which are set via the
  914. * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
  915. *
  916. * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
  917. * Additional code 2002 Jul 20 by Robert Love.
  918. *
  919. * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
  920. *
  921. * Note this is a helper function intended to be used by LSMs which
  922. * wish to use this logic.
  923. */
  924. int __vm_enough_memory(long pages, int cap_sys_admin)
  925. {
  926. unsigned long free, allowed;
  927. vm_acct_memory(pages);
  928. /*
  929. * Sometimes we want to use more memory than we have
  930. */
  931. if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
  932. return 0;
  933. if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
  934. unsigned long n;
  935. free = get_page_cache_size();
  936. free += nr_swap_pages;
  937. /*
  938. * Any slabs which are created with the
  939. * SLAB_RECLAIM_ACCOUNT flag claim to have contents
  940. * which are reclaimable, under pressure. The dentry
  941. * cache and most inode caches should fall into this
  942. */
  943. free += atomic_read(&slab_reclaim_pages);
  944. /*
  945. * Leave the last 3% for root
  946. */
  947. if (!cap_sys_admin)
  948. free -= free / 32;
  949. if (free > pages)
  950. return 0;
  951. /*
  952. * nr_free_pages() is very expensive on large systems,
  953. * only call if we're about to fail.
  954. */
  955. n = nr_free_pages();
  956. if (!cap_sys_admin)
  957. n -= n / 32;
  958. free += n;
  959. if (free > pages)
  960. return 0;
  961. vm_unacct_memory(pages);
  962. return -ENOMEM;
  963. }
  964. allowed = totalram_pages * sysctl_overcommit_ratio / 100;
  965. /*
  966. * Leave the last 3% for root
  967. */
  968. if (!cap_sys_admin)
  969. allowed -= allowed / 32;
  970. allowed += total_swap_pages;
  971. /* Don't let a single process grow too big:
  972. leave 3% of the size of this process for other processes */
  973. allowed -= current->mm->total_vm / 32;
  974. /*
  975. * cast `allowed' as a signed long because vm_committed_space
  976. * sometimes has a negative value
  977. */
  978. if (atomic_read(&vm_committed_space) < (long)allowed)
  979. return 0;
  980. vm_unacct_memory(pages);
  981. return -ENOMEM;
  982. }
  983. int in_gate_area_no_task(unsigned long addr)
  984. {
  985. return 0;
  986. }