nommu.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523
  1. /*
  2. * linux/mm/nommu.c
  3. *
  4. * Replacement code for mm functions to support CPU's that don't
  5. * have any form of memory management unit (thus no virtual memory).
  6. *
  7. * See Documentation/nommu-mmap.txt
  8. *
  9. * Copyright (c) 2004-2005 David Howells <dhowells@redhat.com>
  10. * Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
  11. * Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
  12. * Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
  13. * Copyright (c) 2007 Paul Mundt <lethal@linux-sh.org>
  14. */
  15. #include <linux/module.h>
  16. #include <linux/mm.h>
  17. #include <linux/mman.h>
  18. #include <linux/swap.h>
  19. #include <linux/file.h>
  20. #include <linux/highmem.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/slab.h>
  23. #include <linux/vmalloc.h>
  24. #include <linux/tracehook.h>
  25. #include <linux/blkdev.h>
  26. #include <linux/backing-dev.h>
  27. #include <linux/mount.h>
  28. #include <linux/personality.h>
  29. #include <linux/security.h>
  30. #include <linux/syscalls.h>
  31. #include <asm/uaccess.h>
  32. #include <asm/tlb.h>
  33. #include <asm/tlbflush.h>
  34. #include "internal.h"
  35. void *high_memory;
  36. struct page *mem_map;
  37. unsigned long max_mapnr;
  38. unsigned long num_physpages;
  39. unsigned long askedalloc, realalloc;
  40. atomic_long_t vm_committed_space = ATOMIC_LONG_INIT(0);
  41. int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
  42. int sysctl_overcommit_ratio = 50; /* default is 50% */
  43. int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
  44. int heap_stack_gap = 0;
  45. EXPORT_SYMBOL(mem_map);
  46. EXPORT_SYMBOL(num_physpages);
  47. /* list of shareable VMAs */
  48. struct rb_root nommu_vma_tree = RB_ROOT;
  49. DECLARE_RWSEM(nommu_vma_sem);
  50. struct vm_operations_struct generic_file_vm_ops = {
  51. };
  52. /*
  53. * Handle all mappings that got truncated by a "truncate()"
  54. * system call.
  55. *
  56. * NOTE! We have to be ready to update the memory sharing
  57. * between the file and the memory map for a potential last
  58. * incomplete page. Ugly, but necessary.
  59. */
  60. int vmtruncate(struct inode *inode, loff_t offset)
  61. {
  62. struct address_space *mapping = inode->i_mapping;
  63. unsigned long limit;
  64. if (inode->i_size < offset)
  65. goto do_expand;
  66. i_size_write(inode, offset);
  67. truncate_inode_pages(mapping, offset);
  68. goto out_truncate;
  69. do_expand:
  70. limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
  71. if (limit != RLIM_INFINITY && offset > limit)
  72. goto out_sig;
  73. if (offset > inode->i_sb->s_maxbytes)
  74. goto out;
  75. i_size_write(inode, offset);
  76. out_truncate:
  77. if (inode->i_op && inode->i_op->truncate)
  78. inode->i_op->truncate(inode);
  79. return 0;
  80. out_sig:
  81. send_sig(SIGXFSZ, current, 0);
  82. out:
  83. return -EFBIG;
  84. }
  85. EXPORT_SYMBOL(vmtruncate);
  86. /*
  87. * Return the total memory allocated for this pointer, not
  88. * just what the caller asked for.
  89. *
  90. * Doesn't have to be accurate, i.e. may have races.
  91. */
  92. unsigned int kobjsize(const void *objp)
  93. {
  94. struct page *page;
  95. /*
  96. * If the object we have should not have ksize performed on it,
  97. * return size of 0
  98. */
  99. if (!objp || !virt_addr_valid(objp))
  100. return 0;
  101. page = virt_to_head_page(objp);
  102. /*
  103. * If the allocator sets PageSlab, we know the pointer came from
  104. * kmalloc().
  105. */
  106. if (PageSlab(page))
  107. return ksize(objp);
  108. /*
  109. * The ksize() function is only guaranteed to work for pointers
  110. * returned by kmalloc(). So handle arbitrary pointers here.
  111. */
  112. return PAGE_SIZE << compound_order(page);
  113. }
  114. int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  115. unsigned long start, int len, int flags,
  116. struct page **pages, struct vm_area_struct **vmas)
  117. {
  118. struct vm_area_struct *vma;
  119. unsigned long vm_flags;
  120. int i;
  121. int write = !!(flags & GUP_FLAGS_WRITE);
  122. int force = !!(flags & GUP_FLAGS_FORCE);
  123. int ignore = !!(flags & GUP_FLAGS_IGNORE_VMA_PERMISSIONS);
  124. /* calculate required read or write permissions.
  125. * - if 'force' is set, we only require the "MAY" flags.
  126. */
  127. vm_flags = write ? (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
  128. vm_flags &= force ? (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
  129. for (i = 0; i < len; i++) {
  130. vma = find_vma(mm, start);
  131. if (!vma)
  132. goto finish_or_fault;
  133. /* protect what we can, including chardevs */
  134. if (vma->vm_flags & (VM_IO | VM_PFNMAP) ||
  135. (!ignore && !(vm_flags & vma->vm_flags)))
  136. goto finish_or_fault;
  137. if (pages) {
  138. pages[i] = virt_to_page(start);
  139. if (pages[i])
  140. page_cache_get(pages[i]);
  141. }
  142. if (vmas)
  143. vmas[i] = vma;
  144. start += PAGE_SIZE;
  145. }
  146. return i;
  147. finish_or_fault:
  148. return i ? : -EFAULT;
  149. }
  150. /*
  151. * get a list of pages in an address range belonging to the specified process
  152. * and indicate the VMA that covers each page
  153. * - this is potentially dodgy as we may end incrementing the page count of a
  154. * slab page or a secondary page from a compound page
  155. * - don't permit access to VMAs that don't support it, such as I/O mappings
  156. */
  157. int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
  158. unsigned long start, int len, int write, int force,
  159. struct page **pages, struct vm_area_struct **vmas)
  160. {
  161. int flags = 0;
  162. if (write)
  163. flags |= GUP_FLAGS_WRITE;
  164. if (force)
  165. flags |= GUP_FLAGS_FORCE;
  166. return __get_user_pages(tsk, mm,
  167. start, len, flags,
  168. pages, vmas);
  169. }
  170. EXPORT_SYMBOL(get_user_pages);
  171. DEFINE_RWLOCK(vmlist_lock);
  172. struct vm_struct *vmlist;
  173. void vfree(const void *addr)
  174. {
  175. kfree(addr);
  176. }
  177. EXPORT_SYMBOL(vfree);
  178. void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
  179. {
  180. /*
  181. * You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
  182. * returns only a logical address.
  183. */
  184. return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
  185. }
  186. EXPORT_SYMBOL(__vmalloc);
  187. void *vmalloc_user(unsigned long size)
  188. {
  189. void *ret;
  190. ret = __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
  191. PAGE_KERNEL);
  192. if (ret) {
  193. struct vm_area_struct *vma;
  194. down_write(&current->mm->mmap_sem);
  195. vma = find_vma(current->mm, (unsigned long)ret);
  196. if (vma)
  197. vma->vm_flags |= VM_USERMAP;
  198. up_write(&current->mm->mmap_sem);
  199. }
  200. return ret;
  201. }
  202. EXPORT_SYMBOL(vmalloc_user);
  203. struct page *vmalloc_to_page(const void *addr)
  204. {
  205. return virt_to_page(addr);
  206. }
  207. EXPORT_SYMBOL(vmalloc_to_page);
  208. unsigned long vmalloc_to_pfn(const void *addr)
  209. {
  210. return page_to_pfn(virt_to_page(addr));
  211. }
  212. EXPORT_SYMBOL(vmalloc_to_pfn);
  213. long vread(char *buf, char *addr, unsigned long count)
  214. {
  215. memcpy(buf, addr, count);
  216. return count;
  217. }
  218. long vwrite(char *buf, char *addr, unsigned long count)
  219. {
  220. /* Don't allow overflow */
  221. if ((unsigned long) addr + count < count)
  222. count = -(unsigned long) addr;
  223. memcpy(addr, buf, count);
  224. return(count);
  225. }
  226. /*
  227. * vmalloc - allocate virtually continguos memory
  228. *
  229. * @size: allocation size
  230. *
  231. * Allocate enough pages to cover @size from the page level
  232. * allocator and map them into continguos kernel virtual space.
  233. *
  234. * For tight control over page level allocator and protection flags
  235. * use __vmalloc() instead.
  236. */
  237. void *vmalloc(unsigned long size)
  238. {
  239. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
  240. }
  241. EXPORT_SYMBOL(vmalloc);
  242. void *vmalloc_node(unsigned long size, int node)
  243. {
  244. return vmalloc(size);
  245. }
  246. EXPORT_SYMBOL(vmalloc_node);
  247. #ifndef PAGE_KERNEL_EXEC
  248. # define PAGE_KERNEL_EXEC PAGE_KERNEL
  249. #endif
  250. /**
  251. * vmalloc_exec - allocate virtually contiguous, executable memory
  252. * @size: allocation size
  253. *
  254. * Kernel-internal function to allocate enough pages to cover @size
  255. * the page level allocator and map them into contiguous and
  256. * executable kernel virtual space.
  257. *
  258. * For tight control over page level allocator and protection flags
  259. * use __vmalloc() instead.
  260. */
  261. void *vmalloc_exec(unsigned long size)
  262. {
  263. return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC);
  264. }
  265. /**
  266. * vmalloc_32 - allocate virtually contiguous memory (32bit addressable)
  267. * @size: allocation size
  268. *
  269. * Allocate enough 32bit PA addressable pages to cover @size from the
  270. * page level allocator and map them into continguos kernel virtual space.
  271. */
  272. void *vmalloc_32(unsigned long size)
  273. {
  274. return __vmalloc(size, GFP_KERNEL, PAGE_KERNEL);
  275. }
  276. EXPORT_SYMBOL(vmalloc_32);
  277. /**
  278. * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
  279. * @size: allocation size
  280. *
  281. * The resulting memory area is 32bit addressable and zeroed so it can be
  282. * mapped to userspace without leaking data.
  283. *
  284. * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
  285. * remap_vmalloc_range() are permissible.
  286. */
  287. void *vmalloc_32_user(unsigned long size)
  288. {
  289. /*
  290. * We'll have to sort out the ZONE_DMA bits for 64-bit,
  291. * but for now this can simply use vmalloc_user() directly.
  292. */
  293. return vmalloc_user(size);
  294. }
  295. EXPORT_SYMBOL(vmalloc_32_user);
  296. void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
  297. {
  298. BUG();
  299. return NULL;
  300. }
  301. EXPORT_SYMBOL(vmap);
  302. void vunmap(const void *addr)
  303. {
  304. BUG();
  305. }
  306. EXPORT_SYMBOL(vunmap);
  307. /*
  308. * Implement a stub for vmalloc_sync_all() if the architecture chose not to
  309. * have one.
  310. */
  311. void __attribute__((weak)) vmalloc_sync_all(void)
  312. {
  313. }
  314. int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
  315. struct page *page)
  316. {
  317. return -EINVAL;
  318. }
  319. EXPORT_SYMBOL(vm_insert_page);
  320. /*
  321. * sys_brk() for the most part doesn't need the global kernel
  322. * lock, except when an application is doing something nasty
  323. * like trying to un-brk an area that has already been mapped
  324. * to a regular file. in this case, the unmapping will need
  325. * to invoke file system routines that need the global lock.
  326. */
  327. asmlinkage unsigned long sys_brk(unsigned long brk)
  328. {
  329. struct mm_struct *mm = current->mm;
  330. if (brk < mm->start_brk || brk > mm->context.end_brk)
  331. return mm->brk;
  332. if (mm->brk == brk)
  333. return mm->brk;
  334. /*
  335. * Always allow shrinking brk
  336. */
  337. if (brk <= mm->brk) {
  338. mm->brk = brk;
  339. return brk;
  340. }
  341. /*
  342. * Ok, looks good - let it rip.
  343. */
  344. return mm->brk = brk;
  345. }
  346. #ifdef DEBUG
  347. static void show_process_blocks(void)
  348. {
  349. struct vm_list_struct *vml;
  350. printk("Process blocks %d:", current->pid);
  351. for (vml = &current->mm->context.vmlist; vml; vml = vml->next) {
  352. printk(" %p: %p", vml, vml->vma);
  353. if (vml->vma)
  354. printk(" (%d @%lx #%d)",
  355. kobjsize((void *) vml->vma->vm_start),
  356. vml->vma->vm_start,
  357. atomic_read(&vml->vma->vm_usage));
  358. printk(vml->next ? " ->" : ".\n");
  359. }
  360. }
  361. #endif /* DEBUG */
  362. /*
  363. * add a VMA into a process's mm_struct in the appropriate place in the list
  364. * - should be called with mm->mmap_sem held writelocked
  365. */
  366. static void add_vma_to_mm(struct mm_struct *mm, struct vm_list_struct *vml)
  367. {
  368. struct vm_list_struct **ppv;
  369. for (ppv = &current->mm->context.vmlist; *ppv; ppv = &(*ppv)->next)
  370. if ((*ppv)->vma->vm_start > vml->vma->vm_start)
  371. break;
  372. vml->next = *ppv;
  373. *ppv = vml;
  374. }
  375. /*
  376. * look up the first VMA in which addr resides, NULL if none
  377. * - should be called with mm->mmap_sem at least held readlocked
  378. */
  379. struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
  380. {
  381. struct vm_list_struct *loop, *vml;
  382. /* search the vm_start ordered list */
  383. vml = NULL;
  384. for (loop = mm->context.vmlist; loop; loop = loop->next) {
  385. if (loop->vma->vm_start > addr)
  386. break;
  387. vml = loop;
  388. }
  389. if (vml && vml->vma->vm_end > addr)
  390. return vml->vma;
  391. return NULL;
  392. }
  393. EXPORT_SYMBOL(find_vma);
  394. /*
  395. * find a VMA
  396. * - we don't extend stack VMAs under NOMMU conditions
  397. */
  398. struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
  399. {
  400. return find_vma(mm, addr);
  401. }
  402. int expand_stack(struct vm_area_struct *vma, unsigned long address)
  403. {
  404. return -ENOMEM;
  405. }
  406. /*
  407. * look up the first VMA exactly that exactly matches addr
  408. * - should be called with mm->mmap_sem at least held readlocked
  409. */
  410. static inline struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
  411. unsigned long addr)
  412. {
  413. struct vm_list_struct *vml;
  414. /* search the vm_start ordered list */
  415. for (vml = mm->context.vmlist; vml; vml = vml->next) {
  416. if (vml->vma->vm_start == addr)
  417. return vml->vma;
  418. if (vml->vma->vm_start > addr)
  419. break;
  420. }
  421. return NULL;
  422. }
  423. /*
  424. * find a VMA in the global tree
  425. */
  426. static inline struct vm_area_struct *find_nommu_vma(unsigned long start)
  427. {
  428. struct vm_area_struct *vma;
  429. struct rb_node *n = nommu_vma_tree.rb_node;
  430. while (n) {
  431. vma = rb_entry(n, struct vm_area_struct, vm_rb);
  432. if (start < vma->vm_start)
  433. n = n->rb_left;
  434. else if (start > vma->vm_start)
  435. n = n->rb_right;
  436. else
  437. return vma;
  438. }
  439. return NULL;
  440. }
  441. /*
  442. * add a VMA in the global tree
  443. */
  444. static void add_nommu_vma(struct vm_area_struct *vma)
  445. {
  446. struct vm_area_struct *pvma;
  447. struct address_space *mapping;
  448. struct rb_node **p = &nommu_vma_tree.rb_node;
  449. struct rb_node *parent = NULL;
  450. /* add the VMA to the mapping */
  451. if (vma->vm_file) {
  452. mapping = vma->vm_file->f_mapping;
  453. flush_dcache_mmap_lock(mapping);
  454. vma_prio_tree_insert(vma, &mapping->i_mmap);
  455. flush_dcache_mmap_unlock(mapping);
  456. }
  457. /* add the VMA to the master list */
  458. while (*p) {
  459. parent = *p;
  460. pvma = rb_entry(parent, struct vm_area_struct, vm_rb);
  461. if (vma->vm_start < pvma->vm_start) {
  462. p = &(*p)->rb_left;
  463. }
  464. else if (vma->vm_start > pvma->vm_start) {
  465. p = &(*p)->rb_right;
  466. }
  467. else {
  468. /* mappings are at the same address - this can only
  469. * happen for shared-mem chardevs and shared file
  470. * mappings backed by ramfs/tmpfs */
  471. BUG_ON(!(pvma->vm_flags & VM_SHARED));
  472. if (vma < pvma)
  473. p = &(*p)->rb_left;
  474. else if (vma > pvma)
  475. p = &(*p)->rb_right;
  476. else
  477. BUG();
  478. }
  479. }
  480. rb_link_node(&vma->vm_rb, parent, p);
  481. rb_insert_color(&vma->vm_rb, &nommu_vma_tree);
  482. }
  483. /*
  484. * delete a VMA from the global list
  485. */
  486. static void delete_nommu_vma(struct vm_area_struct *vma)
  487. {
  488. struct address_space *mapping;
  489. /* remove the VMA from the mapping */
  490. if (vma->vm_file) {
  491. mapping = vma->vm_file->f_mapping;
  492. flush_dcache_mmap_lock(mapping);
  493. vma_prio_tree_remove(vma, &mapping->i_mmap);
  494. flush_dcache_mmap_unlock(mapping);
  495. }
  496. /* remove from the master list */
  497. rb_erase(&vma->vm_rb, &nommu_vma_tree);
  498. }
  499. /*
  500. * determine whether a mapping should be permitted and, if so, what sort of
  501. * mapping we're capable of supporting
  502. */
  503. static int validate_mmap_request(struct file *file,
  504. unsigned long addr,
  505. unsigned long len,
  506. unsigned long prot,
  507. unsigned long flags,
  508. unsigned long pgoff,
  509. unsigned long *_capabilities)
  510. {
  511. unsigned long capabilities;
  512. unsigned long reqprot = prot;
  513. int ret;
  514. /* do the simple checks first */
  515. if (flags & MAP_FIXED || addr) {
  516. printk(KERN_DEBUG
  517. "%d: Can't do fixed-address/overlay mmap of RAM\n",
  518. current->pid);
  519. return -EINVAL;
  520. }
  521. if ((flags & MAP_TYPE) != MAP_PRIVATE &&
  522. (flags & MAP_TYPE) != MAP_SHARED)
  523. return -EINVAL;
  524. if (!len)
  525. return -EINVAL;
  526. /* Careful about overflows.. */
  527. len = PAGE_ALIGN(len);
  528. if (!len || len > TASK_SIZE)
  529. return -ENOMEM;
  530. /* offset overflow? */
  531. if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
  532. return -EOVERFLOW;
  533. if (file) {
  534. /* validate file mapping requests */
  535. struct address_space *mapping;
  536. /* files must support mmap */
  537. if (!file->f_op || !file->f_op->mmap)
  538. return -ENODEV;
  539. /* work out if what we've got could possibly be shared
  540. * - we support chardevs that provide their own "memory"
  541. * - we support files/blockdevs that are memory backed
  542. */
  543. mapping = file->f_mapping;
  544. if (!mapping)
  545. mapping = file->f_path.dentry->d_inode->i_mapping;
  546. capabilities = 0;
  547. if (mapping && mapping->backing_dev_info)
  548. capabilities = mapping->backing_dev_info->capabilities;
  549. if (!capabilities) {
  550. /* no explicit capabilities set, so assume some
  551. * defaults */
  552. switch (file->f_path.dentry->d_inode->i_mode & S_IFMT) {
  553. case S_IFREG:
  554. case S_IFBLK:
  555. capabilities = BDI_CAP_MAP_COPY;
  556. break;
  557. case S_IFCHR:
  558. capabilities =
  559. BDI_CAP_MAP_DIRECT |
  560. BDI_CAP_READ_MAP |
  561. BDI_CAP_WRITE_MAP;
  562. break;
  563. default:
  564. return -EINVAL;
  565. }
  566. }
  567. /* eliminate any capabilities that we can't support on this
  568. * device */
  569. if (!file->f_op->get_unmapped_area)
  570. capabilities &= ~BDI_CAP_MAP_DIRECT;
  571. if (!file->f_op->read)
  572. capabilities &= ~BDI_CAP_MAP_COPY;
  573. if (flags & MAP_SHARED) {
  574. /* do checks for writing, appending and locking */
  575. if ((prot & PROT_WRITE) &&
  576. !(file->f_mode & FMODE_WRITE))
  577. return -EACCES;
  578. if (IS_APPEND(file->f_path.dentry->d_inode) &&
  579. (file->f_mode & FMODE_WRITE))
  580. return -EACCES;
  581. if (locks_verify_locked(file->f_path.dentry->d_inode))
  582. return -EAGAIN;
  583. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  584. return -ENODEV;
  585. if (((prot & PROT_READ) && !(capabilities & BDI_CAP_READ_MAP)) ||
  586. ((prot & PROT_WRITE) && !(capabilities & BDI_CAP_WRITE_MAP)) ||
  587. ((prot & PROT_EXEC) && !(capabilities & BDI_CAP_EXEC_MAP))
  588. ) {
  589. printk("MAP_SHARED not completely supported on !MMU\n");
  590. return -EINVAL;
  591. }
  592. /* we mustn't privatise shared mappings */
  593. capabilities &= ~BDI_CAP_MAP_COPY;
  594. }
  595. else {
  596. /* we're going to read the file into private memory we
  597. * allocate */
  598. if (!(capabilities & BDI_CAP_MAP_COPY))
  599. return -ENODEV;
  600. /* we don't permit a private writable mapping to be
  601. * shared with the backing device */
  602. if (prot & PROT_WRITE)
  603. capabilities &= ~BDI_CAP_MAP_DIRECT;
  604. }
  605. /* handle executable mappings and implied executable
  606. * mappings */
  607. if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
  608. if (prot & PROT_EXEC)
  609. return -EPERM;
  610. }
  611. else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
  612. /* handle implication of PROT_EXEC by PROT_READ */
  613. if (current->personality & READ_IMPLIES_EXEC) {
  614. if (capabilities & BDI_CAP_EXEC_MAP)
  615. prot |= PROT_EXEC;
  616. }
  617. }
  618. else if ((prot & PROT_READ) &&
  619. (prot & PROT_EXEC) &&
  620. !(capabilities & BDI_CAP_EXEC_MAP)
  621. ) {
  622. /* backing file is not executable, try to copy */
  623. capabilities &= ~BDI_CAP_MAP_DIRECT;
  624. }
  625. }
  626. else {
  627. /* anonymous mappings are always memory backed and can be
  628. * privately mapped
  629. */
  630. capabilities = BDI_CAP_MAP_COPY;
  631. /* handle PROT_EXEC implication by PROT_READ */
  632. if ((prot & PROT_READ) &&
  633. (current->personality & READ_IMPLIES_EXEC))
  634. prot |= PROT_EXEC;
  635. }
  636. /* allow the security API to have its say */
  637. ret = security_file_mmap(file, reqprot, prot, flags, addr, 0);
  638. if (ret < 0)
  639. return ret;
  640. /* looks okay */
  641. *_capabilities = capabilities;
  642. return 0;
  643. }
  644. /*
  645. * we've determined that we can make the mapping, now translate what we
  646. * now know into VMA flags
  647. */
  648. static unsigned long determine_vm_flags(struct file *file,
  649. unsigned long prot,
  650. unsigned long flags,
  651. unsigned long capabilities)
  652. {
  653. unsigned long vm_flags;
  654. vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags);
  655. vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
  656. /* vm_flags |= mm->def_flags; */
  657. if (!(capabilities & BDI_CAP_MAP_DIRECT)) {
  658. /* attempt to share read-only copies of mapped file chunks */
  659. if (file && !(prot & PROT_WRITE))
  660. vm_flags |= VM_MAYSHARE;
  661. }
  662. else {
  663. /* overlay a shareable mapping on the backing device or inode
  664. * if possible - used for chardevs, ramfs/tmpfs/shmfs and
  665. * romfs/cramfs */
  666. if (flags & MAP_SHARED)
  667. vm_flags |= VM_MAYSHARE | VM_SHARED;
  668. else if ((((vm_flags & capabilities) ^ vm_flags) & BDI_CAP_VMFLAGS) == 0)
  669. vm_flags |= VM_MAYSHARE;
  670. }
  671. /* refuse to let anyone share private mappings with this process if
  672. * it's being traced - otherwise breakpoints set in it may interfere
  673. * with another untraced process
  674. */
  675. if ((flags & MAP_PRIVATE) && tracehook_expect_breakpoints(current))
  676. vm_flags &= ~VM_MAYSHARE;
  677. return vm_flags;
  678. }
  679. /*
  680. * set up a shared mapping on a file
  681. */
  682. static int do_mmap_shared_file(struct vm_area_struct *vma, unsigned long len)
  683. {
  684. int ret;
  685. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  686. if (ret != -ENOSYS)
  687. return ret;
  688. /* getting an ENOSYS error indicates that direct mmap isn't
  689. * possible (as opposed to tried but failed) so we'll fall
  690. * through to making a private copy of the data and mapping
  691. * that if we can */
  692. return -ENODEV;
  693. }
  694. /*
  695. * set up a private mapping or an anonymous shared mapping
  696. */
  697. static int do_mmap_private(struct vm_area_struct *vma, unsigned long len)
  698. {
  699. void *base;
  700. int ret;
  701. /* invoke the file's mapping function so that it can keep track of
  702. * shared mappings on devices or memory
  703. * - VM_MAYSHARE will be set if it may attempt to share
  704. */
  705. if (vma->vm_file) {
  706. ret = vma->vm_file->f_op->mmap(vma->vm_file, vma);
  707. if (ret != -ENOSYS) {
  708. /* shouldn't return success if we're not sharing */
  709. BUG_ON(ret == 0 && !(vma->vm_flags & VM_MAYSHARE));
  710. return ret; /* success or a real error */
  711. }
  712. /* getting an ENOSYS error indicates that direct mmap isn't
  713. * possible (as opposed to tried but failed) so we'll try to
  714. * make a private copy of the data and map that instead */
  715. }
  716. /* allocate some memory to hold the mapping
  717. * - note that this may not return a page-aligned address if the object
  718. * we're allocating is smaller than a page
  719. */
  720. base = kmalloc(len, GFP_KERNEL|__GFP_COMP);
  721. if (!base)
  722. goto enomem;
  723. vma->vm_start = (unsigned long) base;
  724. vma->vm_end = vma->vm_start + len;
  725. vma->vm_flags |= VM_MAPPED_COPY;
  726. #ifdef WARN_ON_SLACK
  727. if (len + WARN_ON_SLACK <= kobjsize(result))
  728. printk("Allocation of %lu bytes from process %d has %lu bytes of slack\n",
  729. len, current->pid, kobjsize(result) - len);
  730. #endif
  731. if (vma->vm_file) {
  732. /* read the contents of a file into the copy */
  733. mm_segment_t old_fs;
  734. loff_t fpos;
  735. fpos = vma->vm_pgoff;
  736. fpos <<= PAGE_SHIFT;
  737. old_fs = get_fs();
  738. set_fs(KERNEL_DS);
  739. ret = vma->vm_file->f_op->read(vma->vm_file, base, len, &fpos);
  740. set_fs(old_fs);
  741. if (ret < 0)
  742. goto error_free;
  743. /* clear the last little bit */
  744. if (ret < len)
  745. memset(base + ret, 0, len - ret);
  746. } else {
  747. /* if it's an anonymous mapping, then just clear it */
  748. memset(base, 0, len);
  749. }
  750. return 0;
  751. error_free:
  752. kfree(base);
  753. vma->vm_start = 0;
  754. return ret;
  755. enomem:
  756. printk("Allocation of length %lu from process %d failed\n",
  757. len, current->pid);
  758. show_free_areas();
  759. return -ENOMEM;
  760. }
  761. /*
  762. * handle mapping creation for uClinux
  763. */
  764. unsigned long do_mmap_pgoff(struct file *file,
  765. unsigned long addr,
  766. unsigned long len,
  767. unsigned long prot,
  768. unsigned long flags,
  769. unsigned long pgoff)
  770. {
  771. struct vm_list_struct *vml = NULL;
  772. struct vm_area_struct *vma = NULL;
  773. struct rb_node *rb;
  774. unsigned long capabilities, vm_flags;
  775. void *result;
  776. int ret;
  777. if (!(flags & MAP_FIXED))
  778. addr = round_hint_to_min(addr);
  779. /* decide whether we should attempt the mapping, and if so what sort of
  780. * mapping */
  781. ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
  782. &capabilities);
  783. if (ret < 0)
  784. return ret;
  785. /* we've determined that we can make the mapping, now translate what we
  786. * now know into VMA flags */
  787. vm_flags = determine_vm_flags(file, prot, flags, capabilities);
  788. /* we're going to need to record the mapping if it works */
  789. vml = kzalloc(sizeof(struct vm_list_struct), GFP_KERNEL);
  790. if (!vml)
  791. goto error_getting_vml;
  792. down_write(&nommu_vma_sem);
  793. /* if we want to share, we need to check for VMAs created by other
  794. * mmap() calls that overlap with our proposed mapping
  795. * - we can only share with an exact match on most regular files
  796. * - shared mappings on character devices and memory backed files are
  797. * permitted to overlap inexactly as far as we are concerned for in
  798. * these cases, sharing is handled in the driver or filesystem rather
  799. * than here
  800. */
  801. if (vm_flags & VM_MAYSHARE) {
  802. unsigned long pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
  803. unsigned long vmpglen;
  804. /* suppress VMA sharing for shared regions */
  805. if (vm_flags & VM_SHARED &&
  806. capabilities & BDI_CAP_MAP_DIRECT)
  807. goto dont_share_VMAs;
  808. for (rb = rb_first(&nommu_vma_tree); rb; rb = rb_next(rb)) {
  809. vma = rb_entry(rb, struct vm_area_struct, vm_rb);
  810. if (!(vma->vm_flags & VM_MAYSHARE))
  811. continue;
  812. /* search for overlapping mappings on the same file */
  813. if (vma->vm_file->f_path.dentry->d_inode != file->f_path.dentry->d_inode)
  814. continue;
  815. if (vma->vm_pgoff >= pgoff + pglen)
  816. continue;
  817. vmpglen = vma->vm_end - vma->vm_start + PAGE_SIZE - 1;
  818. vmpglen >>= PAGE_SHIFT;
  819. if (pgoff >= vma->vm_pgoff + vmpglen)
  820. continue;
  821. /* handle inexactly overlapping matches between mappings */
  822. if (vma->vm_pgoff != pgoff || vmpglen != pglen) {
  823. if (!(capabilities & BDI_CAP_MAP_DIRECT))
  824. goto sharing_violation;
  825. continue;
  826. }
  827. /* we've found a VMA we can share */
  828. atomic_inc(&vma->vm_usage);
  829. vml->vma = vma;
  830. result = (void *) vma->vm_start;
  831. goto shared;
  832. }
  833. dont_share_VMAs:
  834. vma = NULL;
  835. /* obtain the address at which to make a shared mapping
  836. * - this is the hook for quasi-memory character devices to
  837. * tell us the location of a shared mapping
  838. */
  839. if (file && file->f_op->get_unmapped_area) {
  840. addr = file->f_op->get_unmapped_area(file, addr, len,
  841. pgoff, flags);
  842. if (IS_ERR((void *) addr)) {
  843. ret = addr;
  844. if (ret != (unsigned long) -ENOSYS)
  845. goto error;
  846. /* the driver refused to tell us where to site
  847. * the mapping so we'll have to attempt to copy
  848. * it */
  849. ret = (unsigned long) -ENODEV;
  850. if (!(capabilities & BDI_CAP_MAP_COPY))
  851. goto error;
  852. capabilities &= ~BDI_CAP_MAP_DIRECT;
  853. }
  854. }
  855. }
  856. /* we're going to need a VMA struct as well */
  857. vma = kzalloc(sizeof(struct vm_area_struct), GFP_KERNEL);
  858. if (!vma)
  859. goto error_getting_vma;
  860. INIT_LIST_HEAD(&vma->anon_vma_node);
  861. atomic_set(&vma->vm_usage, 1);
  862. if (file) {
  863. get_file(file);
  864. if (vm_flags & VM_EXECUTABLE) {
  865. added_exe_file_vma(current->mm);
  866. vma->vm_mm = current->mm;
  867. }
  868. }
  869. vma->vm_file = file;
  870. vma->vm_flags = vm_flags;
  871. vma->vm_start = addr;
  872. vma->vm_end = addr + len;
  873. vma->vm_pgoff = pgoff;
  874. vml->vma = vma;
  875. /* set up the mapping */
  876. if (file && vma->vm_flags & VM_SHARED)
  877. ret = do_mmap_shared_file(vma, len);
  878. else
  879. ret = do_mmap_private(vma, len);
  880. if (ret < 0)
  881. goto error;
  882. /* okay... we have a mapping; now we have to register it */
  883. result = (void *) vma->vm_start;
  884. if (vma->vm_flags & VM_MAPPED_COPY) {
  885. realalloc += kobjsize(result);
  886. askedalloc += len;
  887. }
  888. realalloc += kobjsize(vma);
  889. askedalloc += sizeof(*vma);
  890. current->mm->total_vm += len >> PAGE_SHIFT;
  891. add_nommu_vma(vma);
  892. shared:
  893. realalloc += kobjsize(vml);
  894. askedalloc += sizeof(*vml);
  895. add_vma_to_mm(current->mm, vml);
  896. up_write(&nommu_vma_sem);
  897. if (prot & PROT_EXEC)
  898. flush_icache_range((unsigned long) result,
  899. (unsigned long) result + len);
  900. #ifdef DEBUG
  901. printk("do_mmap:\n");
  902. show_process_blocks();
  903. #endif
  904. return (unsigned long) result;
  905. error:
  906. up_write(&nommu_vma_sem);
  907. kfree(vml);
  908. if (vma) {
  909. if (vma->vm_file) {
  910. fput(vma->vm_file);
  911. if (vma->vm_flags & VM_EXECUTABLE)
  912. removed_exe_file_vma(vma->vm_mm);
  913. }
  914. kfree(vma);
  915. }
  916. return ret;
  917. sharing_violation:
  918. up_write(&nommu_vma_sem);
  919. printk("Attempt to share mismatched mappings\n");
  920. kfree(vml);
  921. return -EINVAL;
  922. error_getting_vma:
  923. up_write(&nommu_vma_sem);
  924. kfree(vml);
  925. printk("Allocation of vma for %lu byte allocation from process %d failed\n",
  926. len, current->pid);
  927. show_free_areas();
  928. return -ENOMEM;
  929. error_getting_vml:
  930. printk("Allocation of vml for %lu byte allocation from process %d failed\n",
  931. len, current->pid);
  932. show_free_areas();
  933. return -ENOMEM;
  934. }
  935. EXPORT_SYMBOL(do_mmap_pgoff);
  936. /*
  937. * handle mapping disposal for uClinux
  938. */
  939. static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma)
  940. {
  941. if (vma) {
  942. down_write(&nommu_vma_sem);
  943. if (atomic_dec_and_test(&vma->vm_usage)) {
  944. delete_nommu_vma(vma);
  945. if (vma->vm_ops && vma->vm_ops->close)
  946. vma->vm_ops->close(vma);
  947. /* IO memory and memory shared directly out of the pagecache from
  948. * ramfs/tmpfs mustn't be released here */
  949. if (vma->vm_flags & VM_MAPPED_COPY) {
  950. realalloc -= kobjsize((void *) vma->vm_start);
  951. askedalloc -= vma->vm_end - vma->vm_start;
  952. kfree((void *) vma->vm_start);
  953. }
  954. realalloc -= kobjsize(vma);
  955. askedalloc -= sizeof(*vma);
  956. if (vma->vm_file) {
  957. fput(vma->vm_file);
  958. if (vma->vm_flags & VM_EXECUTABLE)
  959. removed_exe_file_vma(mm);
  960. }
  961. kfree(vma);
  962. }
  963. up_write(&nommu_vma_sem);
  964. }
  965. }
  966. /*
  967. * release a mapping
  968. * - under NOMMU conditions the parameters must match exactly to the mapping to
  969. * be removed
  970. */
  971. int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len)
  972. {
  973. struct vm_list_struct *vml, **parent;
  974. unsigned long end = addr + len;
  975. #ifdef DEBUG
  976. printk("do_munmap:\n");
  977. #endif
  978. for (parent = &mm->context.vmlist; *parent; parent = &(*parent)->next) {
  979. if ((*parent)->vma->vm_start > addr)
  980. break;
  981. if ((*parent)->vma->vm_start == addr &&
  982. ((len == 0) || ((*parent)->vma->vm_end == end)))
  983. goto found;
  984. }
  985. printk("munmap of non-mmaped memory by process %d (%s): %p\n",
  986. current->pid, current->comm, (void *) addr);
  987. return -EINVAL;
  988. found:
  989. vml = *parent;
  990. put_vma(mm, vml->vma);
  991. *parent = vml->next;
  992. realalloc -= kobjsize(vml);
  993. askedalloc -= sizeof(*vml);
  994. kfree(vml);
  995. update_hiwater_vm(mm);
  996. mm->total_vm -= len >> PAGE_SHIFT;
  997. #ifdef DEBUG
  998. show_process_blocks();
  999. #endif
  1000. return 0;
  1001. }
  1002. EXPORT_SYMBOL(do_munmap);
  1003. asmlinkage long sys_munmap(unsigned long addr, size_t len)
  1004. {
  1005. int ret;
  1006. struct mm_struct *mm = current->mm;
  1007. down_write(&mm->mmap_sem);
  1008. ret = do_munmap(mm, addr, len);
  1009. up_write(&mm->mmap_sem);
  1010. return ret;
  1011. }
  1012. /*
  1013. * Release all mappings
  1014. */
  1015. void exit_mmap(struct mm_struct * mm)
  1016. {
  1017. struct vm_list_struct *tmp;
  1018. if (mm) {
  1019. #ifdef DEBUG
  1020. printk("Exit_mmap:\n");
  1021. #endif
  1022. mm->total_vm = 0;
  1023. while ((tmp = mm->context.vmlist)) {
  1024. mm->context.vmlist = tmp->next;
  1025. put_vma(mm, tmp->vma);
  1026. realalloc -= kobjsize(tmp);
  1027. askedalloc -= sizeof(*tmp);
  1028. kfree(tmp);
  1029. }
  1030. #ifdef DEBUG
  1031. show_process_blocks();
  1032. #endif
  1033. }
  1034. }
  1035. unsigned long do_brk(unsigned long addr, unsigned long len)
  1036. {
  1037. return -ENOMEM;
  1038. }
  1039. /*
  1040. * expand (or shrink) an existing mapping, potentially moving it at the same
  1041. * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
  1042. *
  1043. * under NOMMU conditions, we only permit changing a mapping's size, and only
  1044. * as long as it stays within the hole allocated by the kmalloc() call in
  1045. * do_mmap_pgoff() and the block is not shareable
  1046. *
  1047. * MREMAP_FIXED is not supported under NOMMU conditions
  1048. */
  1049. unsigned long do_mremap(unsigned long addr,
  1050. unsigned long old_len, unsigned long new_len,
  1051. unsigned long flags, unsigned long new_addr)
  1052. {
  1053. struct vm_area_struct *vma;
  1054. /* insanity checks first */
  1055. if (new_len == 0)
  1056. return (unsigned long) -EINVAL;
  1057. if (flags & MREMAP_FIXED && new_addr != addr)
  1058. return (unsigned long) -EINVAL;
  1059. vma = find_vma_exact(current->mm, addr);
  1060. if (!vma)
  1061. return (unsigned long) -EINVAL;
  1062. if (vma->vm_end != vma->vm_start + old_len)
  1063. return (unsigned long) -EFAULT;
  1064. if (vma->vm_flags & VM_MAYSHARE)
  1065. return (unsigned long) -EPERM;
  1066. if (new_len > kobjsize((void *) addr))
  1067. return (unsigned long) -ENOMEM;
  1068. /* all checks complete - do it */
  1069. vma->vm_end = vma->vm_start + new_len;
  1070. askedalloc -= old_len;
  1071. askedalloc += new_len;
  1072. return vma->vm_start;
  1073. }
  1074. EXPORT_SYMBOL(do_mremap);
  1075. asmlinkage unsigned long sys_mremap(unsigned long addr,
  1076. unsigned long old_len, unsigned long new_len,
  1077. unsigned long flags, unsigned long new_addr)
  1078. {
  1079. unsigned long ret;
  1080. down_write(&current->mm->mmap_sem);
  1081. ret = do_mremap(addr, old_len, new_len, flags, new_addr);
  1082. up_write(&current->mm->mmap_sem);
  1083. return ret;
  1084. }
  1085. struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
  1086. unsigned int foll_flags)
  1087. {
  1088. return NULL;
  1089. }
  1090. int remap_pfn_range(struct vm_area_struct *vma, unsigned long from,
  1091. unsigned long to, unsigned long size, pgprot_t prot)
  1092. {
  1093. vma->vm_start = vma->vm_pgoff << PAGE_SHIFT;
  1094. return 0;
  1095. }
  1096. EXPORT_SYMBOL(remap_pfn_range);
  1097. int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
  1098. unsigned long pgoff)
  1099. {
  1100. unsigned int size = vma->vm_end - vma->vm_start;
  1101. if (!(vma->vm_flags & VM_USERMAP))
  1102. return -EINVAL;
  1103. vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
  1104. vma->vm_end = vma->vm_start + size;
  1105. return 0;
  1106. }
  1107. EXPORT_SYMBOL(remap_vmalloc_range);
  1108. void swap_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  1109. {
  1110. }
  1111. unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
  1112. unsigned long len, unsigned long pgoff, unsigned long flags)
  1113. {
  1114. return -ENOMEM;
  1115. }
  1116. void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
  1117. {
  1118. }
  1119. void unmap_mapping_range(struct address_space *mapping,
  1120. loff_t const holebegin, loff_t const holelen,
  1121. int even_cows)
  1122. {
  1123. }
  1124. EXPORT_SYMBOL(unmap_mapping_range);
  1125. /*
  1126. * ask for an unmapped area at which to create a mapping on a file
  1127. */
  1128. unsigned long get_unmapped_area(struct file *file, unsigned long addr,
  1129. unsigned long len, unsigned long pgoff,
  1130. unsigned long flags)
  1131. {
  1132. unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
  1133. unsigned long, unsigned long);
  1134. get_area = current->mm->get_unmapped_area;
  1135. if (file && file->f_op && file->f_op->get_unmapped_area)
  1136. get_area = file->f_op->get_unmapped_area;
  1137. if (!get_area)
  1138. return -ENOSYS;
  1139. return get_area(file, addr, len, pgoff, flags);
  1140. }
  1141. EXPORT_SYMBOL(get_unmapped_area);
  1142. /*
  1143. * Check that a process has enough memory to allocate a new virtual
  1144. * mapping. 0 means there is enough memory for the allocation to
  1145. * succeed and -ENOMEM implies there is not.
  1146. *
  1147. * We currently support three overcommit policies, which are set via the
  1148. * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
  1149. *
  1150. * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
  1151. * Additional code 2002 Jul 20 by Robert Love.
  1152. *
  1153. * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
  1154. *
  1155. * Note this is a helper function intended to be used by LSMs which
  1156. * wish to use this logic.
  1157. */
  1158. int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
  1159. {
  1160. unsigned long free, allowed;
  1161. vm_acct_memory(pages);
  1162. /*
  1163. * Sometimes we want to use more memory than we have
  1164. */
  1165. if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
  1166. return 0;
  1167. if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
  1168. unsigned long n;
  1169. free = global_page_state(NR_FILE_PAGES);
  1170. free += nr_swap_pages;
  1171. /*
  1172. * Any slabs which are created with the
  1173. * SLAB_RECLAIM_ACCOUNT flag claim to have contents
  1174. * which are reclaimable, under pressure. The dentry
  1175. * cache and most inode caches should fall into this
  1176. */
  1177. free += global_page_state(NR_SLAB_RECLAIMABLE);
  1178. /*
  1179. * Leave the last 3% for root
  1180. */
  1181. if (!cap_sys_admin)
  1182. free -= free / 32;
  1183. if (free > pages)
  1184. return 0;
  1185. /*
  1186. * nr_free_pages() is very expensive on large systems,
  1187. * only call if we're about to fail.
  1188. */
  1189. n = nr_free_pages();
  1190. /*
  1191. * Leave reserved pages. The pages are not for anonymous pages.
  1192. */
  1193. if (n <= totalreserve_pages)
  1194. goto error;
  1195. else
  1196. n -= totalreserve_pages;
  1197. /*
  1198. * Leave the last 3% for root
  1199. */
  1200. if (!cap_sys_admin)
  1201. n -= n / 32;
  1202. free += n;
  1203. if (free > pages)
  1204. return 0;
  1205. goto error;
  1206. }
  1207. allowed = totalram_pages * sysctl_overcommit_ratio / 100;
  1208. /*
  1209. * Leave the last 3% for root
  1210. */
  1211. if (!cap_sys_admin)
  1212. allowed -= allowed / 32;
  1213. allowed += total_swap_pages;
  1214. /* Don't let a single process grow too big:
  1215. leave 3% of the size of this process for other processes */
  1216. if (mm)
  1217. allowed -= mm->total_vm / 32;
  1218. /*
  1219. * cast `allowed' as a signed long because vm_committed_space
  1220. * sometimes has a negative value
  1221. */
  1222. if (atomic_long_read(&vm_committed_space) < (long)allowed)
  1223. return 0;
  1224. error:
  1225. vm_unacct_memory(pages);
  1226. return -ENOMEM;
  1227. }
  1228. int in_gate_area_no_task(unsigned long addr)
  1229. {
  1230. return 0;
  1231. }
  1232. int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  1233. {
  1234. BUG();
  1235. return 0;
  1236. }
  1237. EXPORT_SYMBOL(filemap_fault);
  1238. /*
  1239. * Access another process' address space.
  1240. * - source/target buffer must be kernel space
  1241. */
  1242. int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
  1243. {
  1244. struct vm_area_struct *vma;
  1245. struct mm_struct *mm;
  1246. if (addr + len < addr)
  1247. return 0;
  1248. mm = get_task_mm(tsk);
  1249. if (!mm)
  1250. return 0;
  1251. down_read(&mm->mmap_sem);
  1252. /* the access must start within one of the target process's mappings */
  1253. vma = find_vma(mm, addr);
  1254. if (vma) {
  1255. /* don't overrun this mapping */
  1256. if (addr + len >= vma->vm_end)
  1257. len = vma->vm_end - addr;
  1258. /* only read or write mappings where it is permitted */
  1259. if (write && vma->vm_flags & VM_MAYWRITE)
  1260. len -= copy_to_user((void *) addr, buf, len);
  1261. else if (!write && vma->vm_flags & VM_MAYREAD)
  1262. len -= copy_from_user(buf, (void *) addr, len);
  1263. else
  1264. len = 0;
  1265. } else {
  1266. len = 0;
  1267. }
  1268. up_read(&mm->mmap_sem);
  1269. mmput(mm);
  1270. return len;
  1271. }