mmap.c 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322
  1. /*
  2. * linux/arch/arm/mm/mmap.c
  3. */
  4. #include <linux/fs.h>
  5. #include <linux/mm.h>
  6. #include <linux/mman.h>
  7. #include <linux/shm.h>
  8. #include <linux/sched.h>
  9. #include <linux/io.h>
  10. #include <linux/personality.h>
  11. #include <linux/random.h>
  12. #include <asm/cachetype.h>
  13. static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
  14. unsigned long pgoff)
  15. {
  16. unsigned long base = addr & ~(SHMLBA-1);
  17. unsigned long off = (pgoff << PAGE_SHIFT) & (SHMLBA-1);
  18. if (base + off <= addr)
  19. return base + off;
  20. return base - off;
  21. }
  22. #define COLOUR_ALIGN(addr,pgoff) \
  23. ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
  24. (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
  25. /* gap between mmap and stack */
  26. #define MIN_GAP (128*1024*1024UL)
  27. #define MAX_GAP ((TASK_SIZE)/6*5)
  28. static int mmap_is_legacy(void)
  29. {
  30. if (current->personality & ADDR_COMPAT_LAYOUT)
  31. return 1;
  32. if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
  33. return 1;
  34. return sysctl_legacy_va_layout;
  35. }
  36. static unsigned long mmap_base(unsigned long rnd)
  37. {
  38. unsigned long gap = rlimit(RLIMIT_STACK);
  39. if (gap < MIN_GAP)
  40. gap = MIN_GAP;
  41. else if (gap > MAX_GAP)
  42. gap = MAX_GAP;
  43. return PAGE_ALIGN(TASK_SIZE - gap - rnd);
  44. }
  45. /*
  46. * We need to ensure that shared mappings are correctly aligned to
  47. * avoid aliasing issues with VIPT caches. We need to ensure that
  48. * a specific page of an object is always mapped at a multiple of
  49. * SHMLBA bytes.
  50. *
  51. * We unconditionally provide this function for all cases, however
  52. * in the VIVT case, we optimise out the alignment rules.
  53. */
  54. unsigned long
  55. arch_get_unmapped_area(struct file *filp, unsigned long addr,
  56. unsigned long len, unsigned long pgoff, unsigned long flags)
  57. {
  58. struct mm_struct *mm = current->mm;
  59. struct vm_area_struct *vma;
  60. unsigned long start_addr;
  61. int do_align = 0;
  62. int aliasing = cache_is_vipt_aliasing();
  63. /*
  64. * We only need to do colour alignment if either the I or D
  65. * caches alias.
  66. */
  67. if (aliasing)
  68. do_align = filp || (flags & MAP_SHARED);
  69. /*
  70. * We enforce the MAP_FIXED case.
  71. */
  72. if (flags & MAP_FIXED) {
  73. if (aliasing && flags & MAP_SHARED &&
  74. (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
  75. return -EINVAL;
  76. return addr;
  77. }
  78. if (len > TASK_SIZE)
  79. return -ENOMEM;
  80. if (addr) {
  81. if (do_align)
  82. addr = COLOUR_ALIGN(addr, pgoff);
  83. else
  84. addr = PAGE_ALIGN(addr);
  85. vma = find_vma(mm, addr);
  86. if (TASK_SIZE - len >= addr &&
  87. (!vma || addr + len <= vma->vm_start))
  88. return addr;
  89. }
  90. if (len > mm->cached_hole_size) {
  91. start_addr = addr = mm->free_area_cache;
  92. } else {
  93. start_addr = addr = mm->mmap_base;
  94. mm->cached_hole_size = 0;
  95. }
  96. full_search:
  97. if (do_align)
  98. addr = COLOUR_ALIGN(addr, pgoff);
  99. else
  100. addr = PAGE_ALIGN(addr);
  101. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  102. /* At this point: (!vma || addr < vma->vm_end). */
  103. if (TASK_SIZE - len < addr) {
  104. /*
  105. * Start a new search - just in case we missed
  106. * some holes.
  107. */
  108. if (start_addr != TASK_UNMAPPED_BASE) {
  109. start_addr = addr = TASK_UNMAPPED_BASE;
  110. mm->cached_hole_size = 0;
  111. goto full_search;
  112. }
  113. return -ENOMEM;
  114. }
  115. if (!vma || addr + len <= vma->vm_start) {
  116. /*
  117. * Remember the place where we stopped the search:
  118. */
  119. mm->free_area_cache = addr + len;
  120. return addr;
  121. }
  122. if (addr + mm->cached_hole_size < vma->vm_start)
  123. mm->cached_hole_size = vma->vm_start - addr;
  124. addr = vma->vm_end;
  125. if (do_align)
  126. addr = COLOUR_ALIGN(addr, pgoff);
  127. }
  128. }
  129. unsigned long
  130. arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
  131. const unsigned long len, const unsigned long pgoff,
  132. const unsigned long flags)
  133. {
  134. struct vm_area_struct *vma;
  135. struct mm_struct *mm = current->mm;
  136. unsigned long addr = addr0;
  137. int do_align = 0;
  138. int aliasing = cache_is_vipt_aliasing();
  139. /*
  140. * We only need to do colour alignment if either the I or D
  141. * caches alias.
  142. */
  143. if (aliasing)
  144. do_align = filp || (flags & MAP_SHARED);
  145. /* requested length too big for entire address space */
  146. if (len > TASK_SIZE)
  147. return -ENOMEM;
  148. if (flags & MAP_FIXED) {
  149. if (aliasing && flags & MAP_SHARED &&
  150. (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
  151. return -EINVAL;
  152. return addr;
  153. }
  154. /* requesting a specific address */
  155. if (addr) {
  156. if (do_align)
  157. addr = COLOUR_ALIGN(addr, pgoff);
  158. else
  159. addr = PAGE_ALIGN(addr);
  160. vma = find_vma(mm, addr);
  161. if (TASK_SIZE - len >= addr &&
  162. (!vma || addr + len <= vma->vm_start))
  163. return addr;
  164. }
  165. /* check if free_area_cache is useful for us */
  166. if (len <= mm->cached_hole_size) {
  167. mm->cached_hole_size = 0;
  168. mm->free_area_cache = mm->mmap_base;
  169. }
  170. /* either no address requested or can't fit in requested address hole */
  171. addr = mm->free_area_cache;
  172. if (do_align) {
  173. unsigned long base = COLOUR_ALIGN_DOWN(addr - len, pgoff);
  174. addr = base + len;
  175. }
  176. /* make sure it can fit in the remaining address space */
  177. if (addr > len) {
  178. vma = find_vma(mm, addr-len);
  179. if (!vma || addr <= vma->vm_start)
  180. /* remember the address as a hint for next time */
  181. return (mm->free_area_cache = addr-len);
  182. }
  183. if (mm->mmap_base < len)
  184. goto bottomup;
  185. addr = mm->mmap_base - len;
  186. if (do_align)
  187. addr = COLOUR_ALIGN_DOWN(addr, pgoff);
  188. do {
  189. /*
  190. * Lookup failure means no vma is above this address,
  191. * else if new region fits below vma->vm_start,
  192. * return with success:
  193. */
  194. vma = find_vma(mm, addr);
  195. if (!vma || addr+len <= vma->vm_start)
  196. /* remember the address as a hint for next time */
  197. return (mm->free_area_cache = addr);
  198. /* remember the largest hole we saw so far */
  199. if (addr + mm->cached_hole_size < vma->vm_start)
  200. mm->cached_hole_size = vma->vm_start - addr;
  201. /* try just below the current vma->vm_start */
  202. addr = vma->vm_start - len;
  203. if (do_align)
  204. addr = COLOUR_ALIGN_DOWN(addr, pgoff);
  205. } while (len < vma->vm_start);
  206. bottomup:
  207. /*
  208. * A failed mmap() very likely causes application failure,
  209. * so fall back to the bottom-up function here. This scenario
  210. * can happen with large stack limits and large mmap()
  211. * allocations.
  212. */
  213. mm->cached_hole_size = ~0UL;
  214. mm->free_area_cache = TASK_UNMAPPED_BASE;
  215. addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
  216. /*
  217. * Restore the topdown base:
  218. */
  219. mm->free_area_cache = mm->mmap_base;
  220. mm->cached_hole_size = ~0UL;
  221. return addr;
  222. }
  223. void arch_pick_mmap_layout(struct mm_struct *mm)
  224. {
  225. unsigned long random_factor = 0UL;
  226. /* 8 bits of randomness in 20 address space bits */
  227. if ((current->flags & PF_RANDOMIZE) &&
  228. !(current->personality & ADDR_NO_RANDOMIZE))
  229. random_factor = (get_random_int() % (1 << 8)) << PAGE_SHIFT;
  230. if (mmap_is_legacy()) {
  231. mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
  232. mm->get_unmapped_area = arch_get_unmapped_area;
  233. mm->unmap_area = arch_unmap_area;
  234. } else {
  235. mm->mmap_base = mmap_base(random_factor);
  236. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  237. mm->unmap_area = arch_unmap_area_topdown;
  238. }
  239. }
  240. /*
  241. * You really shouldn't be using read() or write() on /dev/mem. This
  242. * might go away in the future.
  243. */
  244. int valid_phys_addr_range(unsigned long addr, size_t size)
  245. {
  246. if (addr < PHYS_OFFSET)
  247. return 0;
  248. if (addr + size > __pa(high_memory - 1) + 1)
  249. return 0;
  250. return 1;
  251. }
  252. /*
  253. * We don't use supersection mappings for mmap() on /dev/mem, which
  254. * means that we can't map the memory area above the 4G barrier into
  255. * userspace.
  256. */
  257. int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
  258. {
  259. return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
  260. }
  261. #ifdef CONFIG_STRICT_DEVMEM
  262. #include <linux/ioport.h>
  263. /*
  264. * devmem_is_allowed() checks to see if /dev/mem access to a certain
  265. * address is valid. The argument is a physical page number.
  266. * We mimic x86 here by disallowing access to system RAM as well as
  267. * device-exclusive MMIO regions. This effectively disable read()/write()
  268. * on /dev/mem.
  269. */
  270. int devmem_is_allowed(unsigned long pfn)
  271. {
  272. if (iomem_is_exclusive(pfn << PAGE_SHIFT))
  273. return 0;
  274. if (!page_is_ram(pfn))
  275. return 1;
  276. return 0;
  277. }
  278. #endif