mmap.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. /*
  2. * linux/arch/s390/mm/mmap.c
  3. *
  4. * flexible mmap layout support
  5. *
  6. * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina.
  7. * All Rights Reserved.
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation; either version 2 of the License, or
  12. * (at your option) any later version.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program; if not, write to the Free Software
  21. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  22. *
  23. *
  24. * Started by Ingo Molnar <mingo@elte.hu>
  25. */
  26. #include <linux/personality.h>
  27. #include <linux/mm.h>
  28. #include <linux/module.h>
  29. #include <asm/pgalloc.h>
  30. #include <asm/compat.h>
  31. /*
  32. * Top of mmap area (just below the process stack).
  33. *
  34. * Leave an at least ~128 MB hole.
  35. */
  36. #define MIN_GAP (128*1024*1024)
  37. #define MAX_GAP (STACK_TOP/6*5)
  38. static inline unsigned long mmap_base(void)
  39. {
  40. unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
  41. if (gap < MIN_GAP)
  42. gap = MIN_GAP;
  43. else if (gap > MAX_GAP)
  44. gap = MAX_GAP;
  45. return STACK_TOP - (gap & PAGE_MASK);
  46. }
  47. static inline int mmap_is_legacy(void)
  48. {
  49. #ifdef CONFIG_64BIT
  50. /*
  51. * Force standard allocation for 64 bit programs.
  52. */
  53. if (!is_compat_task())
  54. return 1;
  55. #endif
  56. return sysctl_legacy_va_layout ||
  57. (current->personality & ADDR_COMPAT_LAYOUT) ||
  58. current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY;
  59. }
  60. #ifndef CONFIG_64BIT
  61. /*
  62. * This function, called very early during the creation of a new
  63. * process VM image, sets up which VM layout function to use:
  64. */
  65. void arch_pick_mmap_layout(struct mm_struct *mm)
  66. {
  67. /*
  68. * Fall back to the standard layout if the personality
  69. * bit is set, or if the expected stack growth is unlimited:
  70. */
  71. if (mmap_is_legacy()) {
  72. mm->mmap_base = TASK_UNMAPPED_BASE;
  73. mm->get_unmapped_area = arch_get_unmapped_area;
  74. mm->unmap_area = arch_unmap_area;
  75. } else {
  76. mm->mmap_base = mmap_base();
  77. mm->get_unmapped_area = arch_get_unmapped_area_topdown;
  78. mm->unmap_area = arch_unmap_area_topdown;
  79. }
  80. }
  81. EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
  82. #else
  83. int s390_mmap_check(unsigned long addr, unsigned long len)
  84. {
  85. if (!is_compat_task() &&
  86. len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
  87. return crst_table_upgrade(current->mm, 1UL << 53);
  88. return 0;
  89. }
  90. static unsigned long
  91. s390_get_unmapped_area(struct file *filp, unsigned long addr,
  92. unsigned long len, unsigned long pgoff, unsigned long flags)
  93. {
  94. struct mm_struct *mm = current->mm;
  95. unsigned long area;
  96. int rc;
  97. area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
  98. if (!(area & ~PAGE_MASK))
  99. return area;
  100. if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
  101. /* Upgrade the page table to 4 levels and retry. */
  102. rc = crst_table_upgrade(mm, 1UL << 53);
  103. if (rc)
  104. return (unsigned long) rc;
  105. area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
  106. }
  107. return area;
  108. }
  109. static unsigned long
  110. s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
  111. const unsigned long len, const unsigned long pgoff,
  112. const unsigned long flags)
  113. {
  114. struct mm_struct *mm = current->mm;
  115. unsigned long area;
  116. int rc;
  117. area = arch_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
  118. if (!(area & ~PAGE_MASK))
  119. return area;
  120. if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < (1UL << 53)) {
  121. /* Upgrade the page table to 4 levels and retry. */
  122. rc = crst_table_upgrade(mm, 1UL << 53);
  123. if (rc)
  124. return (unsigned long) rc;
  125. area = arch_get_unmapped_area_topdown(filp, addr, len,
  126. pgoff, flags);
  127. }
  128. return area;
  129. }
  130. /*
  131. * This function, called very early during the creation of a new
  132. * process VM image, sets up which VM layout function to use:
  133. */
  134. void arch_pick_mmap_layout(struct mm_struct *mm)
  135. {
  136. /*
  137. * Fall back to the standard layout if the personality
  138. * bit is set, or if the expected stack growth is unlimited:
  139. */
  140. if (mmap_is_legacy()) {
  141. mm->mmap_base = TASK_UNMAPPED_BASE;
  142. mm->get_unmapped_area = s390_get_unmapped_area;
  143. mm->unmap_area = arch_unmap_area;
  144. } else {
  145. mm->mmap_base = mmap_base();
  146. mm->get_unmapped_area = s390_get_unmapped_area_topdown;
  147. mm->unmap_area = arch_unmap_area_topdown;
  148. }
  149. }
  150. EXPORT_SYMBOL_GPL(arch_pick_mmap_layout);
  151. #endif