gup.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * Lockless get_user_pages_fast for powerpc
  3. *
  4. * Copyright (C) 2008 Nick Piggin
  5. * Copyright (C) 2008 Novell Inc.
  6. */
  7. #undef DEBUG
  8. #include <linux/sched.h>
  9. #include <linux/mm.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/vmstat.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/rwsem.h>
  14. #include <asm/pgtable.h>
  15. #ifdef __HAVE_ARCH_PTE_SPECIAL
  16. /*
  17. * The performance critical leaf functions are made noinline otherwise gcc
  18. * inlines everything into a single function which results in too much
  19. * register pressure.
  20. */
  21. static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
  22. unsigned long end, int write, struct page **pages, int *nr)
  23. {
  24. unsigned long mask, result;
  25. pte_t *ptep;
  26. result = _PAGE_PRESENT|_PAGE_USER;
  27. if (write)
  28. result |= _PAGE_RW;
  29. mask = result | _PAGE_SPECIAL;
  30. ptep = pte_offset_kernel(&pmd, addr);
  31. do {
  32. pte_t pte = ACCESS_ONCE(*ptep);
  33. struct page *page;
  34. if ((pte_val(pte) & mask) != result)
  35. return 0;
  36. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  37. page = pte_page(pte);
  38. if (!page_cache_get_speculative(page))
  39. return 0;
  40. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  41. put_page(page);
  42. return 0;
  43. }
  44. pages[*nr] = page;
  45. (*nr)++;
  46. } while (ptep++, addr += PAGE_SIZE, addr != end);
  47. return 1;
  48. }
  49. static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
  50. int write, struct page **pages, int *nr)
  51. {
  52. unsigned long next;
  53. pmd_t *pmdp;
  54. pmdp = pmd_offset(&pud, addr);
  55. do {
  56. pmd_t pmd = ACCESS_ONCE(*pmdp);
  57. next = pmd_addr_end(addr, end);
  58. /*
  59. * If we find a splitting transparent hugepage we
  60. * return zero. That will result in taking the slow
  61. * path which will call wait_split_huge_page()
  62. * if the pmd is still in splitting state
  63. */
  64. if (pmd_none(pmd) || pmd_trans_splitting(pmd))
  65. return 0;
  66. if (pmd_huge(pmd) || pmd_large(pmd)) {
  67. if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
  68. write, pages, nr))
  69. return 0;
  70. } else if (is_hugepd(pmdp)) {
  71. if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
  72. addr, next, write, pages, nr))
  73. return 0;
  74. } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
  75. return 0;
  76. } while (pmdp++, addr = next, addr != end);
  77. return 1;
  78. }
  79. static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
  80. int write, struct page **pages, int *nr)
  81. {
  82. unsigned long next;
  83. pud_t *pudp;
  84. pudp = pud_offset(&pgd, addr);
  85. do {
  86. pud_t pud = ACCESS_ONCE(*pudp);
  87. next = pud_addr_end(addr, end);
  88. if (pud_none(pud))
  89. return 0;
  90. if (pud_huge(pud)) {
  91. if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next,
  92. write, pages, nr))
  93. return 0;
  94. } else if (is_hugepd(pudp)) {
  95. if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
  96. addr, next, write, pages, nr))
  97. return 0;
  98. } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
  99. return 0;
  100. } while (pudp++, addr = next, addr != end);
  101. return 1;
  102. }
  103. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  104. struct page **pages)
  105. {
  106. struct mm_struct *mm = current->mm;
  107. unsigned long addr, len, end;
  108. unsigned long next;
  109. pgd_t *pgdp;
  110. int nr = 0;
  111. pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
  112. start &= PAGE_MASK;
  113. addr = start;
  114. len = (unsigned long) nr_pages << PAGE_SHIFT;
  115. end = start + len;
  116. if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
  117. start, len)))
  118. goto slow_irqon;
  119. pr_devel(" aligned: %lx .. %lx\n", start, end);
  120. /*
  121. * XXX: batch / limit 'nr', to avoid large irq off latency
  122. * needs some instrumenting to determine the common sizes used by
  123. * important workloads (eg. DB2), and whether limiting the batch size
  124. * will decrease performance.
  125. *
  126. * It seems like we're in the clear for the moment. Direct-IO is
  127. * the main guy that batches up lots of get_user_pages, and even
  128. * they are limited to 64-at-a-time which is not so many.
  129. */
  130. /*
  131. * This doesn't prevent pagetable teardown, but does prevent
  132. * the pagetables from being freed on powerpc.
  133. *
  134. * So long as we atomically load page table pointers versus teardown,
  135. * we can follow the address down to the the page and take a ref on it.
  136. */
  137. local_irq_disable();
  138. pgdp = pgd_offset(mm, addr);
  139. do {
  140. pgd_t pgd = ACCESS_ONCE(*pgdp);
  141. pr_devel(" %016lx: normal pgd %p\n", addr,
  142. (void *)pgd_val(pgd));
  143. next = pgd_addr_end(addr, end);
  144. if (pgd_none(pgd))
  145. goto slow;
  146. if (pgd_huge(pgd)) {
  147. if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
  148. write, pages, &nr))
  149. goto slow;
  150. } else if (is_hugepd(pgdp)) {
  151. if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
  152. addr, next, write, pages, &nr))
  153. goto slow;
  154. } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
  155. goto slow;
  156. } while (pgdp++, addr = next, addr != end);
  157. local_irq_enable();
  158. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  159. return nr;
  160. {
  161. int ret;
  162. slow:
  163. local_irq_enable();
  164. slow_irqon:
  165. pr_devel(" slow path ! nr = %d\n", nr);
  166. /* Try to get the remaining pages with get_user_pages */
  167. start += nr << PAGE_SHIFT;
  168. pages += nr;
  169. down_read(&mm->mmap_sem);
  170. ret = get_user_pages(current, mm, start,
  171. (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
  172. up_read(&mm->mmap_sem);
  173. /* Have to be a bit careful with return values */
  174. if (nr > 0) {
  175. if (ret < 0)
  176. ret = nr;
  177. else
  178. ret += nr;
  179. }
  180. return ret;
  181. }
  182. }
  183. #endif /* __HAVE_ARCH_PTE_SPECIAL */