fremap.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230
  1. /*
  2. * linux/mm/fremap.c
  3. *
  4. * Explicit pagetable population and nonlinear (random) mappings support.
  5. *
  6. * started by Ingo Molnar, Copyright (C) 2002, 2003
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/swap.h>
  10. #include <linux/file.h>
  11. #include <linux/mman.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/swapops.h>
  14. #include <linux/rmap.h>
  15. #include <linux/module.h>
  16. #include <linux/syscalls.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/tlbflush.h>
  20. static int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
  21. unsigned long addr, pte_t *ptep)
  22. {
  23. pte_t pte = *ptep;
  24. struct page *page = NULL;
  25. if (pte_present(pte)) {
  26. flush_cache_page(vma, addr, pte_pfn(pte));
  27. pte = ptep_clear_flush(vma, addr, ptep);
  28. page = vm_normal_page(vma, addr, pte);
  29. if (page) {
  30. if (pte_dirty(pte))
  31. set_page_dirty(page);
  32. page_remove_rmap(page);
  33. page_cache_release(page);
  34. }
  35. } else {
  36. if (!pte_file(pte))
  37. free_swap_and_cache(pte_to_swp_entry(pte));
  38. pte_clear(mm, addr, ptep);
  39. }
  40. return !!page;
  41. }
  42. /*
  43. * Install a file page to a given virtual memory address, release any
  44. * previously existing mapping.
  45. */
  46. int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
  47. unsigned long addr, struct page *page, pgprot_t prot)
  48. {
  49. struct inode *inode;
  50. pgoff_t size;
  51. int err = -ENOMEM;
  52. pte_t *pte;
  53. pte_t pte_val;
  54. spinlock_t *ptl;
  55. pte = get_locked_pte(mm, addr, &ptl);
  56. if (!pte)
  57. goto out;
  58. /*
  59. * This page may have been truncated. Tell the
  60. * caller about it.
  61. */
  62. err = -EINVAL;
  63. inode = vma->vm_file->f_mapping->host;
  64. size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  65. if (!page->mapping || page->index >= size)
  66. goto unlock;
  67. err = -ENOMEM;
  68. if (page_mapcount(page) > INT_MAX/2)
  69. goto unlock;
  70. if (pte_none(*pte) || !zap_pte(mm, vma, addr, pte))
  71. inc_mm_counter(mm, file_rss);
  72. flush_icache_page(vma, page);
  73. pte_val = mk_pte(page, prot);
  74. set_pte_at(mm, addr, pte, pte_val);
  75. page_add_file_rmap(page);
  76. update_mmu_cache(vma, addr, pte_val);
  77. lazy_mmu_prot_update(pte_val);
  78. err = 0;
  79. unlock:
  80. pte_unmap_unlock(pte, ptl);
  81. out:
  82. return err;
  83. }
  84. EXPORT_SYMBOL(install_page);
  85. /*
  86. * Install a file pte to a given virtual memory address, release any
  87. * previously existing mapping.
  88. */
  89. int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
  90. unsigned long addr, unsigned long pgoff, pgprot_t prot)
  91. {
  92. int err = -ENOMEM;
  93. pte_t *pte;
  94. pte_t pte_val;
  95. spinlock_t *ptl;
  96. pte = get_locked_pte(mm, addr, &ptl);
  97. if (!pte)
  98. goto out;
  99. if (!pte_none(*pte) && zap_pte(mm, vma, addr, pte)) {
  100. update_hiwater_rss(mm);
  101. dec_mm_counter(mm, file_rss);
  102. }
  103. set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
  104. pte_val = *pte;
  105. /*
  106. * We don't need to run update_mmu_cache() here because the "file pte"
  107. * being installed by install_file_pte() is not a real pte - it's a
  108. * non-present entry (like a swap entry), noting what file offset should
  109. * be mapped there when there's a fault (in a non-linear vma where
  110. * that's not obvious).
  111. */
  112. pte_unmap_unlock(pte, ptl);
  113. err = 0;
  114. out:
  115. return err;
  116. }
  117. /***
  118. * sys_remap_file_pages - remap arbitrary pages of a shared backing store
  119. * file within an existing vma.
  120. * @start: start of the remapped virtual memory range
  121. * @size: size of the remapped virtual memory range
  122. * @prot: new protection bits of the range
  123. * @pgoff: to be mapped page of the backing store file
  124. * @flags: 0 or MAP_NONBLOCKED - the later will cause no IO.
  125. *
  126. * this syscall works purely via pagetables, so it's the most efficient
  127. * way to map the same (large) file into a given virtual window. Unlike
  128. * mmap()/mremap() it does not create any new vmas. The new mappings are
  129. * also safe across swapout.
  130. *
  131. * NOTE: the 'prot' parameter right now is ignored, and the vma's default
  132. * protection is used. Arbitrary protections might be implemented in the
  133. * future.
  134. */
  135. asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
  136. unsigned long __prot, unsigned long pgoff, unsigned long flags)
  137. {
  138. struct mm_struct *mm = current->mm;
  139. struct address_space *mapping;
  140. unsigned long end = start + size;
  141. struct vm_area_struct *vma;
  142. int err = -EINVAL;
  143. int has_write_lock = 0;
  144. if (__prot)
  145. return err;
  146. /*
  147. * Sanitize the syscall parameters:
  148. */
  149. start = start & PAGE_MASK;
  150. size = size & PAGE_MASK;
  151. /* Does the address range wrap, or is the span zero-sized? */
  152. if (start + size <= start)
  153. return err;
  154. /* Can we represent this offset inside this architecture's pte's? */
  155. #if PTE_FILE_MAX_BITS < BITS_PER_LONG
  156. if (pgoff + (size >> PAGE_SHIFT) >= (1UL << PTE_FILE_MAX_BITS))
  157. return err;
  158. #endif
  159. /* We need down_write() to change vma->vm_flags. */
  160. down_read(&mm->mmap_sem);
  161. retry:
  162. vma = find_vma(mm, start);
  163. /*
  164. * Make sure the vma is shared, that it supports prefaulting,
  165. * and that the remapped range is valid and fully within
  166. * the single existing vma. vm_private_data is used as a
  167. * swapout cursor in a VM_NONLINEAR vma.
  168. */
  169. if (vma && (vma->vm_flags & VM_SHARED) &&
  170. (!vma->vm_private_data || (vma->vm_flags & VM_NONLINEAR)) &&
  171. vma->vm_ops && vma->vm_ops->populate &&
  172. end > start && start >= vma->vm_start &&
  173. end <= vma->vm_end) {
  174. /* Must set VM_NONLINEAR before any pages are populated. */
  175. if (pgoff != linear_page_index(vma, start) &&
  176. !(vma->vm_flags & VM_NONLINEAR)) {
  177. if (!has_write_lock) {
  178. up_read(&mm->mmap_sem);
  179. down_write(&mm->mmap_sem);
  180. has_write_lock = 1;
  181. goto retry;
  182. }
  183. mapping = vma->vm_file->f_mapping;
  184. spin_lock(&mapping->i_mmap_lock);
  185. flush_dcache_mmap_lock(mapping);
  186. vma->vm_flags |= VM_NONLINEAR;
  187. vma_prio_tree_remove(vma, &mapping->i_mmap);
  188. vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
  189. flush_dcache_mmap_unlock(mapping);
  190. spin_unlock(&mapping->i_mmap_lock);
  191. }
  192. err = vma->vm_ops->populate(vma, start, size,
  193. vma->vm_page_prot,
  194. pgoff, flags & MAP_NONBLOCK);
  195. /*
  196. * We can't clear VM_NONLINEAR because we'd have to do
  197. * it after ->populate completes, and that would prevent
  198. * downgrading the lock. (Locks can't be upgraded).
  199. */
  200. }
  201. if (likely(!has_write_lock))
  202. up_read(&mm->mmap_sem);
  203. else
  204. up_write(&mm->mmap_sem);
  205. return err;
  206. }