gup.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. /*
  2. * Lockless get_user_pages_fast for s390
  3. *
  4. * Copyright IBM Corp. 2010
  5. * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  6. */
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/hugetlb.h>
  10. #include <linux/vmstat.h>
  11. #include <linux/pagemap.h>
  12. #include <linux/rwsem.h>
  13. #include <asm/pgtable.h>
  14. /*
  15. * The performance critical leaf functions are made noinline otherwise gcc
  16. * inlines everything into a single function which results in too much
  17. * register pressure.
  18. */
  19. static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
  20. unsigned long end, int write, struct page **pages, int *nr)
  21. {
  22. unsigned long mask, result;
  23. pte_t *ptep, pte;
  24. struct page *page;
  25. result = write ? 0 : _PAGE_RO;
  26. mask = result | _PAGE_INVALID | _PAGE_SPECIAL;
  27. ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
  28. do {
  29. pte = *ptep;
  30. barrier();
  31. if ((pte_val(pte) & mask) != result)
  32. return 0;
  33. VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
  34. page = pte_page(pte);
  35. if (!page_cache_get_speculative(page))
  36. return 0;
  37. if (unlikely(pte_val(pte) != pte_val(*ptep))) {
  38. put_page(page);
  39. return 0;
  40. }
  41. pages[*nr] = page;
  42. (*nr)++;
  43. } while (ptep++, addr += PAGE_SIZE, addr != end);
  44. return 1;
  45. }
  46. static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
  47. unsigned long end, int write, struct page **pages, int *nr)
  48. {
  49. unsigned long mask, result;
  50. struct page *head, *page;
  51. int refs;
  52. result = write ? 0 : _SEGMENT_ENTRY_RO;
  53. mask = result | _SEGMENT_ENTRY_INV;
  54. if ((pmd_val(pmd) & mask) != result)
  55. return 0;
  56. VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
  57. refs = 0;
  58. head = pmd_page(pmd);
  59. page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
  60. do {
  61. VM_BUG_ON(compound_head(page) != head);
  62. pages[*nr] = page;
  63. (*nr)++;
  64. page++;
  65. refs++;
  66. } while (addr += PAGE_SIZE, addr != end);
  67. if (!page_cache_add_speculative(head, refs)) {
  68. *nr -= refs;
  69. return 0;
  70. }
  71. if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
  72. *nr -= refs;
  73. while (refs--)
  74. put_page(head);
  75. }
  76. return 1;
  77. }
  78. static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
  79. unsigned long end, int write, struct page **pages, int *nr)
  80. {
  81. unsigned long next;
  82. pmd_t *pmdp, pmd;
  83. pmdp = (pmd_t *) pudp;
  84. #ifdef CONFIG_64BIT
  85. if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
  86. pmdp = (pmd_t *) pud_deref(pud);
  87. pmdp += pmd_index(addr);
  88. #endif
  89. do {
  90. pmd = *pmdp;
  91. barrier();
  92. next = pmd_addr_end(addr, end);
  93. if (pmd_none(pmd))
  94. return 0;
  95. if (unlikely(pmd_huge(pmd))) {
  96. if (!gup_huge_pmd(pmdp, pmd, addr, next,
  97. write, pages, nr))
  98. return 0;
  99. } else if (!gup_pte_range(pmdp, pmd, addr, next,
  100. write, pages, nr))
  101. return 0;
  102. } while (pmdp++, addr = next, addr != end);
  103. return 1;
  104. }
  105. static inline int gup_pud_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
  106. unsigned long end, int write, struct page **pages, int *nr)
  107. {
  108. unsigned long next;
  109. pud_t *pudp, pud;
  110. pudp = (pud_t *) pgdp;
  111. #ifdef CONFIG_64BIT
  112. if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
  113. pudp = (pud_t *) pgd_deref(pgd);
  114. pudp += pud_index(addr);
  115. #endif
  116. do {
  117. pud = *pudp;
  118. barrier();
  119. next = pud_addr_end(addr, end);
  120. if (pud_none(pud))
  121. return 0;
  122. if (!gup_pmd_range(pudp, pud, addr, next, write, pages, nr))
  123. return 0;
  124. } while (pudp++, addr = next, addr != end);
  125. return 1;
  126. }
  127. /**
  128. * get_user_pages_fast() - pin user pages in memory
  129. * @start: starting user address
  130. * @nr_pages: number of pages from start to pin
  131. * @write: whether pages will be written to
  132. * @pages: array that receives pointers to the pages pinned.
  133. * Should be at least nr_pages long.
  134. *
  135. * Attempt to pin user pages in memory without taking mm->mmap_sem.
  136. * If not successful, it will fall back to taking the lock and
  137. * calling get_user_pages().
  138. *
  139. * Returns number of pages pinned. This may be fewer than the number
  140. * requested. If nr_pages is 0 or negative, returns 0. If no pages
  141. * were pinned, returns -errno.
  142. */
  143. int get_user_pages_fast(unsigned long start, int nr_pages, int write,
  144. struct page **pages)
  145. {
  146. struct mm_struct *mm = current->mm;
  147. unsigned long addr, len, end;
  148. unsigned long next;
  149. pgd_t *pgdp, pgd;
  150. int nr = 0;
  151. start &= PAGE_MASK;
  152. addr = start;
  153. len = (unsigned long) nr_pages << PAGE_SHIFT;
  154. end = start + len;
  155. if (end < start)
  156. goto slow_irqon;
  157. /*
  158. * local_irq_disable() doesn't prevent pagetable teardown, but does
  159. * prevent the pagetables from being freed on s390.
  160. *
  161. * So long as we atomically load page table pointers versus teardown,
  162. * we can follow the address down to the the page and take a ref on it.
  163. */
  164. local_irq_disable();
  165. pgdp = pgd_offset(mm, addr);
  166. do {
  167. pgd = *pgdp;
  168. barrier();
  169. next = pgd_addr_end(addr, end);
  170. if (pgd_none(pgd))
  171. goto slow;
  172. if (!gup_pud_range(pgdp, pgd, addr, next, write, pages, &nr))
  173. goto slow;
  174. } while (pgdp++, addr = next, addr != end);
  175. local_irq_enable();
  176. VM_BUG_ON(nr != (end - start) >> PAGE_SHIFT);
  177. return nr;
  178. {
  179. int ret;
  180. slow:
  181. local_irq_enable();
  182. slow_irqon:
  183. /* Try to get the remaining pages with get_user_pages */
  184. start += nr << PAGE_SHIFT;
  185. pages += nr;
  186. down_read(&mm->mmap_sem);
  187. ret = get_user_pages(current, mm, start,
  188. (end - start) >> PAGE_SHIFT, write, 0, pages, NULL);
  189. up_read(&mm->mmap_sem);
  190. /* Have to be a bit careful with return values */
  191. if (nr > 0) {
  192. if (ret < 0)
  193. ret = nr;
  194. else
  195. ret += nr;
  196. }
  197. return ret;
  198. }
  199. }