gup.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280
  1. /*
  2. * Lockless get_user_pages_fast for powerpc
  3. *
  4. * Copyright (C) 2008 Nick Piggin
  5. * Copyright (C) 2008 Novell Inc.
  6. */
  7. #undef DEBUG
  8. #include <linux/sched.h>
  9. #include <linux/mm.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/vmstat.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/rwsem.h>
  14. #include <asm/pgtable.h>
  15. /*
  16. * The performance critical leaf functions are made noinline otherwise gcc
  17. * inlines everything into a single function which results in too much
  18. * register pressure.
  19. */
  20. static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
  21. unsigned long end, int write, struct page **pages, int *nr)
  22. {
  23. unsigned long mask, result;
  24. pte_t *ptep;
  25. result = _PAGE_PRESENT|_PAGE_USER;
  26. if (write)
  27. result |= _PAGE_RW;
  28. mask = result | _PAGE_SPECIAL;
  29. ptep = pte_offset_kernel(&pmd, addr);
  30. do {
  31. pte_t pte = *ptep;
  32. struct page *page;
  33. if ((pte_val(pte) & mask) != result)
  34. return 0;
  35. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  36. page = pte_page(pte);
  37. if (!page_cache_get_speculative(page))
  38. return 0;
  39. if (unlikely(pte != *ptep)) {
  40. put_page(page);
  41. return 0;
  42. }
  43. pages[*nr] = page;
  44. (*nr)++;
  45. } while (ptep++, addr += PAGE_SIZE, addr != end);
  46. return 1;
  47. }
  48. #ifdef CONFIG_HUGETLB_PAGE
  49. static noinline int gup_huge_pte(pte_t *ptep, struct hstate *hstate,
  50. unsigned long *addr, unsigned long end,
  51. int write, struct page **pages, int *nr)
  52. {
  53. unsigned long mask;
  54. unsigned long pte_end;
  55. struct page *head, *page;
  56. pte_t pte;
  57. int refs;
  58. pte_end = (*addr + huge_page_size(hstate)) & huge_page_mask(hstate);
  59. if (pte_end < end)
  60. end = pte_end;
  61. pte = *ptep;
  62. mask = _PAGE_PRESENT|_PAGE_USER;
  63. if (write)
  64. mask |= _PAGE_RW;
  65. if ((pte_val(pte) & mask) != mask)
  66. return 0;
  67. /* hugepages are never "special" */
  68. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  69. refs = 0;
  70. head = pte_page(pte);
  71. page = head + ((*addr & ~huge_page_mask(hstate)) >> PAGE_SHIFT);
  72. do {
  73. VM_BUG_ON(compound_head(page) != head);
  74. pages[*nr] = page;
  75. (*nr)++;
  76. page++;
  77. refs++;
  78. } while (*addr += PAGE_SIZE, *addr != end);
  79. if (!page_cache_add_speculative(head, refs)) {
  80. *nr -= refs;
  81. return 0;
  82. }
  83. if (unlikely(pte != *ptep)) {
  84. /* Could be optimized better */
  85. while (*nr) {
  86. put_page(page);
  87. (*nr)--;
  88. }
  89. }
  90. return 1;
  91. }
  92. #endif /* CONFIG_HUGETLB_PAGE */
  93. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  94. int write, struct page **pages, int *nr)
  95. {
  96. unsigned long next;
  97. pmd_t *pmdp;
  98. pmdp = pmd_offset(&pud, addr);
  99. do {
  100. pmd_t pmd = *pmdp;
  101. next = pmd_addr_end(addr, end);
  102. if (pmd_none(pmd))
  103. return 0;
  104. if (!gup_pte_range(pmd, addr, next, write, pages, nr))
  105. return 0;
  106. } while (pmdp++, addr = next, addr != end);
  107. return 1;
  108. }
  109. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  110. int write, struct page **pages, int *nr)
  111. {
  112. unsigned long next;
  113. pud_t *pudp;
  114. pudp = pud_offset(&pgd, addr);
  115. do {
  116. pud_t pud = *pudp;
  117. next = pud_addr_end(addr, end);
  118. if (pud_none(pud))
  119. return 0;
  120. if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  121. return 0;
  122. } while (pudp++, addr = next, addr != end);
  123. return 1;
  124. }
  125. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  126. struct page **pages)
  127. {
  128. struct mm_struct *mm = current->mm;
  129. unsigned long addr, len, end;
  130. unsigned long next;
  131. pgd_t *pgdp;
  132. int psize, nr = 0;
  133. unsigned int shift;
  134. pr_debug("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
  135. start &= PAGE_MASK;
  136. addr = start;
  137. len = (unsigned long) nr_pages << PAGE_SHIFT;
  138. end = start + len;
  139. if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  140. start, len)))
  141. goto slow_irqon;
  142. pr_debug(" aligned: %lx .. %lx\n", start, end);
  143. #ifdef CONFIG_HUGETLB_PAGE
  144. /* We bail out on slice boundary crossing when hugetlb is
  145. * enabled in order to not have to deal with two different
  146. * page table formats
  147. */
  148. if (addr < SLICE_LOW_TOP) {
  149. if (end > SLICE_LOW_TOP)
  150. goto slow_irqon;
  151. if (unlikely(GET_LOW_SLICE_INDEX(addr) !=
  152. GET_LOW_SLICE_INDEX(end - 1)))
  153. goto slow_irqon;
  154. } else {
  155. if (unlikely(GET_HIGH_SLICE_INDEX(addr) !=
  156. GET_HIGH_SLICE_INDEX(end - 1)))
  157. goto slow_irqon;
  158. }
  159. #endif /* CONFIG_HUGETLB_PAGE */
  160. /*
  161. * XXX: batch / limit 'nr', to avoid large irq off latency
  162. * needs some instrumenting to determine the common sizes used by
  163. * important workloads (eg. DB2), and whether limiting the batch size
  164. * will decrease performance.
  165. *
  166. * It seems like we're in the clear for the moment. Direct-IO is
  167. * the main guy that batches up lots of get_user_pages, and even
  168. * they are limited to 64-at-a-time which is not so many.
  169. */
  170. /*
  171. * This doesn't prevent pagetable teardown, but does prevent
  172. * the pagetables from being freed on powerpc.
  173. *
  174. * So long as we atomically load page table pointers versus teardown,
  175. * we can follow the address down to the the page and take a ref on it.
  176. */
  177. local_irq_disable();
  178. psize = get_slice_psize(mm, addr);
  179. shift = mmu_psize_defs[psize].shift;
  180. #ifdef CONFIG_HUGETLB_PAGE
  181. if (unlikely(mmu_huge_psizes[psize])) {
  182. pte_t *ptep;
  183. unsigned long a = addr;
  184. unsigned long sz = ((1UL) << shift);
  185. struct hstate *hstate = size_to_hstate(sz);
  186. BUG_ON(!hstate);
  187. /*
  188. * XXX: could be optimized to avoid hstate
  189. * lookup entirely (just use shift)
  190. */
  191. do {
  192. VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, a)].shift);
  193. ptep = huge_pte_offset(mm, a);
  194. pr_debug(" %016lx: huge ptep %p\n", a, ptep);
  195. if (!ptep || !gup_huge_pte(ptep, hstate, &a, end, write, pages,
  196. &nr))
  197. goto slow;
  198. } while (a != end);
  199. } else
  200. #endif /* CONFIG_HUGETLB_PAGE */
  201. {
  202. pgdp = pgd_offset(mm, addr);
  203. do {
  204. pgd_t pgd = *pgdp;
  205. VM_BUG_ON(shift != mmu_psize_defs[get_slice_psize(mm, addr)].shift);
  206. pr_debug(" %016lx: normal pgd %p\n", addr, (void *)pgd);
  207. next = pgd_addr_end(addr, end);
  208. if (pgd_none(pgd))
  209. goto slow;
  210. if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  211. goto slow;
  212. } while (pgdp++, addr = next, addr != end);
  213. }
  214. local_irq_enable();
  215. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  216. return nr;
  217. {
  218. int ret;
  219. slow:
  220. local_irq_enable();
  221. slow_irqon:
  222. pr_debug(" slow path ! nr = %d\n", nr);
  223. /* Try to get the remaining pages with get_user_pages */
  224. start += nr << PAGE_SHIFT;
  225. pages += nr;
  226. down_read(&mm->mmap_sem);
  227. ret = get_user_pages(current, mm, start,
  228. (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
  229. up_read(&mm->mmap_sem);
  230. /* Have to be a bit careful with return values */
  231. if (nr > 0) {
  232. if (ret < 0)
  233. ret = nr;
  234. else
  235. ret += nr;
  236. }
  237. return ret;
  238. }
  239. }