mmap.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. /*
  2. * linux/arch/arm/mm/mmap.c
  3. */
  4. #include <linux/config.h>
  5. #include <linux/fs.h>
  6. #include <linux/mm.h>
  7. #include <linux/mman.h>
  8. #include <linux/shm.h>
  9. #include <asm/system.h>
  10. #define COLOUR_ALIGN(addr,pgoff) \
  11. ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
  12. (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
  13. /*
  14. * We need to ensure that shared mappings are correctly aligned to
  15. * avoid aliasing issues with VIPT caches. We need to ensure that
  16. * a specific page of an object is always mapped at a multiple of
  17. * SHMLBA bytes.
  18. *
  19. * We unconditionally provide this function for all cases, however
  20. * in the VIVT case, we optimise out the alignment rules.
  21. */
  22. unsigned long
  23. arch_get_unmapped_area(struct file *filp, unsigned long addr,
  24. unsigned long len, unsigned long pgoff, unsigned long flags)
  25. {
  26. struct mm_struct *mm = current->mm;
  27. struct vm_area_struct *vma;
  28. unsigned long start_addr;
  29. #ifdef CONFIG_CPU_V6
  30. unsigned int cache_type;
  31. int do_align = 0, aliasing = 0;
  32. /*
  33. * We only need to do colour alignment if either the I or D
  34. * caches alias. This is indicated by bits 9 and 21 of the
  35. * cache type register.
  36. */
  37. cache_type = read_cpuid(CPUID_CACHETYPE);
  38. if (cache_type != read_cpuid(CPUID_ID)) {
  39. aliasing = (cache_type | cache_type >> 12) & (1 << 11);
  40. if (aliasing)
  41. do_align = filp || flags & MAP_SHARED;
  42. }
  43. #else
  44. #define do_align 0
  45. #define aliasing 0
  46. #endif
  47. /*
  48. * We should enforce the MAP_FIXED case. However, currently
  49. * the generic kernel code doesn't allow us to handle this.
  50. */
  51. if (flags & MAP_FIXED) {
  52. if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1))
  53. return -EINVAL;
  54. return addr;
  55. }
  56. if (len > TASK_SIZE)
  57. return -ENOMEM;
  58. if (addr) {
  59. if (do_align)
  60. addr = COLOUR_ALIGN(addr, pgoff);
  61. else
  62. addr = PAGE_ALIGN(addr);
  63. vma = find_vma(mm, addr);
  64. if (TASK_SIZE - len >= addr &&
  65. (!vma || addr + len <= vma->vm_start))
  66. return addr;
  67. }
  68. if (len > mm->cached_hole_size) {
  69. start_addr = addr = mm->free_area_cache;
  70. } else {
  71. start_addr = addr = TASK_UNMAPPED_BASE;
  72. mm->cached_hole_size = 0;
  73. }
  74. full_search:
  75. if (do_align)
  76. addr = COLOUR_ALIGN(addr, pgoff);
  77. else
  78. addr = PAGE_ALIGN(addr);
  79. for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
  80. /* At this point: (!vma || addr < vma->vm_end). */
  81. if (TASK_SIZE - len < addr) {
  82. /*
  83. * Start a new search - just in case we missed
  84. * some holes.
  85. */
  86. if (start_addr != TASK_UNMAPPED_BASE) {
  87. start_addr = addr = TASK_UNMAPPED_BASE;
  88. mm->cached_hole_size = 0;
  89. goto full_search;
  90. }
  91. return -ENOMEM;
  92. }
  93. if (!vma || addr + len <= vma->vm_start) {
  94. /*
  95. * Remember the place where we stopped the search:
  96. */
  97. mm->free_area_cache = addr + len;
  98. return addr;
  99. }
  100. if (addr + mm->cached_hole_size < vma->vm_start)
  101. mm->cached_hole_size = vma->vm_start - addr;
  102. addr = vma->vm_end;
  103. if (do_align)
  104. addr = COLOUR_ALIGN(addr, pgoff);
  105. }
  106. }