gup.c 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * Lockless get_user_pages_fast for s390
  3. *
  4. * Copyright IBM Corp. 2010
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/vmstat.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/rwsem.h>
  13. #include <asm/pgtable.h>
  14. /*
  15. * The performance critical leaf functions are made noinline otherwise gcc
  16. * inlines everything into a single function which results in too much
  17. * register pressure.
  18. */
  19. static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
  20. unsigned long end, int write, struct page **pages, int *nr)
  21. {
  22. unsigned long mask;
  23. pte_t *ptep, pte;
  24. struct page *page;
  25. mask = (write ? _PAGE_RO : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
  26. ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
  27. do {
  28. pte = *ptep;
  29. barrier();
  30. if ((pte_val(pte) & mask) != 0)
  31. return 0;
  32. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  33. page = pte_page(pte);
  34. if (!page_cache_get_speculative(page))
  35. return 0;
  36. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  37. put_page(page);
  38. return 0;
  39. }
  40. pages[*nr] = page;
  41. (*nr)++;
  42. } while (ptep++, addr += PAGE_SIZE, addr != end);
  43. return 1;
  44. }
  45. static inline void get_huge_page_tail(struct page *page)
  46. {
  47. /*
  48. * __split_huge_page_refcount() cannot run
  49. * from under us.
  50. */
  51. VM_BUG_ON(page_mapcount(page) < 0);
  52. VM_BUG_ON(atomic_read(&page->_count) != 0);
  53. atomic_inc(&page->_mapcount);
  54. }
  55. static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
  56. unsigned long end, int write, struct page **pages, int *nr)
  57. {
  58. unsigned long mask, result;
  59. struct page *head, *page, *tail;
  60. int refs;
  61. result = write ? 0 : _SEGMENT_ENTRY_RO;
  62. mask = result | _SEGMENT_ENTRY_INV;
  63. if ((pmd_val(pmd) & mask) != result)
  64. return 0;
  65. VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
  66. refs = 0;
  67. head = pmd_page(pmd);
  68. page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  69. tail = page;
  70. do {
  71. VM_BUG_ON(compound_head(page) != head);
  72. pages[*nr] = page;
  73. (*nr)++;
  74. page++;
  75. refs++;
  76. } while (addr += PAGE_SIZE, addr != end);
  77. if (!page_cache_add_speculative(head, refs)) {
  78. *nr -= refs;
  79. return 0;
  80. }
  81. if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
  82. *nr -= refs;
  83. while (refs--)
  84. put_page(head);
  85. return 0;
  86. }
  87. /*
  88. * Any tail page need their mapcount reference taken before we
  89. * return.
  90. */
  91. while (refs--) {
  92. if (PageTail(tail))
  93. get_huge_page_tail(tail);
  94. tail++;
  95. }
  96. return 1;
  97. }
  98. static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
  99. unsigned long end, int write, struct page **pages, int *nr)
  100. {
  101. unsigned long next;
  102. pmd_t *pmdp, pmd;
  103. pmdp = (pmd_t *) pudp;
  104. #ifdef CONFIG_64BIT
  105. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  106. pmdp = (pmd_t *) pud_deref(pud);
  107. pmdp += pmd_index(addr);
  108. #endif
  109. do {
  110. pmd = *pmdp;
  111. barrier();
  112. next = pmd_addr_end(addr, end);
  113. if (pmd_none(pmd))
  114. return 0;
  115. if (unlikely(pmd_huge(pmd))) {
  116. if (!gup_huge_pmd(pmdp, pmd, addr, next,
  117. write, pages, nr))
  118. return 0;
  119. } else if (!gup_pte_range(pmdp, pmd, addr, next,
  120. write, pages, nr))
  121. return 0;
  122. } while (pmdp++, addr = next, addr != end);
  123. return 1;
  124. }
  125. static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
  126. unsigned long end, int write, struct page **pages, int *nr)
  127. {
  128. unsigned long next;
  129. pud_t *pudp, pud;
  130. pudp = (pud_t *) pgdp;
  131. #ifdef CONFIG_64BIT
  132. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  133. pudp = (pud_t *) pgd_deref(pgd);
  134. pudp += pud_index(addr);
  135. #endif
  136. do {
  137. pud = *pudp;
  138. barrier();
  139. next = pud_addr_end(addr, end);
  140. if (pud_none(pud))
  141. return 0;
  142. if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
  143. return 0;
  144. } while (pudp++, addr = next, addr != end);
  145. return 1;
  146. }
  147. /**
  148. * get_user_pages_fast() - pin user pages in memory
  149. * @start: starting user address
  150. * @nr_pages: number of pages from start to pin
  151. * @write: whether pages will be written to
  152. * @pages: array that receives pointers to the pages pinned.
  153. * Should be at least nr_pages long.
  154. *
  155. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  156. * If not successful, it will fall back to taking the lock and
  157. * calling get_user_pages().
  158. *
  159. * Returns number of pages pinned. This may be fewer than the number
  160. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  161. * were pinned, returns -errno.
  162. */
  163. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  164. struct page **pages)
  165. {
  166. struct mm_struct *mm = current->mm;
  167. unsigned long addr, len, end;
  168. unsigned long next;
  169. pgd_t *pgdp, pgd;
  170. int nr = 0;
  171. start &= PAGE_MASK;
  172. addr = start;
  173. len = (unsigned long) nr_pages << PAGE_SHIFT;
  174. end = start + len;
  175. if (end < start)
  176. goto slow_irqon;
  177. /*
  178. * local_irq_disable() doesn't prevent pagetable teardown, but does
  179. * prevent the pagetables from being freed on s390.
  180. *
  181. * So long as we atomically load page table pointers versus teardown,
  182. * we can follow the address down to the the page and take a ref on it.
  183. */
  184. local_irq_disable();
  185. pgdp = pgd_offset(mm, addr);
  186. do {
  187. pgd = *pgdp;
  188. barrier();
  189. next = pgd_addr_end(addr, end);
  190. if (pgd_none(pgd))
  191. goto slow;
  192. if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
  193. goto slow;
  194. } while (pgdp++, addr = next, addr != end);
  195. local_irq_enable();
  196. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  197. return nr;
  198. {
  199. int ret;
  200. slow:
  201. local_irq_enable();
  202. slow_irqon:
  203. /* Try to get the remaining pages with get_user_pages */
  204. start += nr << PAGE_SHIFT;
  205. pages += nr;
  206. down_read(&mm->mmap_sem);
  207. ret = get_user_pages(current, mm, start,
  208. (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
  209. up_read(&mm->mmap_sem);
  210. /* Have to be a bit careful with return values */
  211. if (nr > 0) {
  212. if (ret < 0)
  213. ret = nr;
  214. else
  215. ret += nr;
  216. }
  217. return ret;
  218. }
  219. }