subpage-prot.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220
  1. /*
  2. * Copyright 2007-2008 Paul Mackerras, IBM Corp.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/errno.h>
  10. #include <linux/kernel.h>
  11. #include <linux/gfp.h>
  12. #include <linux/slab.h>
  13. #include <linux/types.h>
  14. #include <linux/mm.h>
  15. #include <linux/hugetlb.h>
  16. #include <asm/pgtable.h>
  17. #include <asm/uaccess.h>
  18. #include <asm/tlbflush.h>
  19. /*
  20. * Free all pages allocated for subpage protection maps and pointers.
  21. * Also makes sure that the subpage_prot_table structure is
  22. * reinitialized for the next user.
  23. */
  24. void subpage_prot_free(struct mm_struct *mm)
  25. {
  26. struct subpage_prot_table *spt = &mm->context.spt;
  27. unsigned long i, j, addr;
  28. u32 **p;
  29. for (i = 0; i < 4; ++i) {
  30. if (spt->low_prot[i]) {
  31. free_page((unsigned long)spt->low_prot[i]);
  32. spt->low_prot[i] = NULL;
  33. }
  34. }
  35. addr = 0;
  36. for (i = 0; i < 2; ++i) {
  37. p = spt->protptrs[i];
  38. if (!p)
  39. continue;
  40. spt->protptrs[i] = NULL;
  41. for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
  42. ++j, addr += PAGE_SIZE)
  43. if (p[j])
  44. free_page((unsigned long)p[j]);
  45. free_page((unsigned long)p);
  46. }
  47. spt->maxaddr = 0;
  48. }
  49. void subpage_prot_init_new_context(struct mm_struct *mm)
  50. {
  51. struct subpage_prot_table *spt = &mm->context.spt;
  52. memset(spt, 0, sizeof(*spt));
  53. }
  54. static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
  55. int npages)
  56. {
  57. pgd_t *pgd;
  58. pud_t *pud;
  59. pmd_t *pmd;
  60. pte_t *pte;
  61. spinlock_t *ptl;
  62. pgd = pgd_offset(mm, addr);
  63. if (pgd_none(*pgd))
  64. return;
  65. pud = pud_offset(pgd, addr);
  66. if (pud_none(*pud))
  67. return;
  68. pmd = pmd_offset(pud, addr);
  69. if (pmd_none(*pmd))
  70. return;
  71. pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
  72. arch_enter_lazy_mmu_mode();
  73. for (; npages > 0; --npages) {
  74. pte_update(mm, addr, pte, 0, 0);
  75. addr += PAGE_SIZE;
  76. ++pte;
  77. }
  78. arch_leave_lazy_mmu_mode();
  79. pte_unmap_unlock(pte - 1, ptl);
  80. }
  81. /*
  82. * Clear the subpage protection map for an address range, allowing
  83. * all accesses that are allowed by the pte permissions.
  84. */
  85. static void subpage_prot_clear(unsigned long addr, unsigned long len)
  86. {
  87. struct mm_struct *mm = current->mm;
  88. struct subpage_prot_table *spt = &mm->context.spt;
  89. u32 **spm, *spp;
  90. int i, nw;
  91. unsigned long next, limit;
  92. down_write(&mm->mmap_sem);
  93. limit = addr + len;
  94. if (limit > spt->maxaddr)
  95. limit = spt->maxaddr;
  96. for (; addr < limit; addr = next) {
  97. next = pmd_addr_end(addr, limit);
  98. if (addr < 0x100000000) {
  99. spm = spt->low_prot;
  100. } else {
  101. spm = spt->protptrs[addr >> SBP_L3_SHIFT];
  102. if (!spm)
  103. continue;
  104. }
  105. spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
  106. if (!spp)
  107. continue;
  108. spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
  109. i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  110. nw = PTRS_PER_PTE - i;
  111. if (addr + (nw << PAGE_SHIFT) > next)
  112. nw = (next - addr) >> PAGE_SHIFT;
  113. memset(spp, 0, nw * sizeof(u32));
  114. /* now flush any existing HPTEs for the range */
  115. hpte_flush_range(mm, addr, nw);
  116. }
  117. up_write(&mm->mmap_sem);
  118. }
  119. /*
  120. * Copy in a subpage protection map for an address range.
  121. * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
  122. * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
  123. * 2 or 3 to prevent all accesses.
  124. * Note that the normal page protections also apply; the subpage
  125. * protection mechanism is an additional constraint, so putting 0
  126. * in a 2-bit field won't allow writes to a page that is otherwise
  127. * write-protected.
  128. */
  129. long sys_subpage_prot(unsigned long addr, unsigned long len, u32 __user *map)
  130. {
  131. struct mm_struct *mm = current->mm;
  132. struct subpage_prot_table *spt = &mm->context.spt;
  133. u32 **spm, *spp;
  134. int i, nw;
  135. unsigned long next, limit;
  136. int err;
  137. /* Check parameters */
  138. if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
  139. addr >= TASK_SIZE || len >= TASK_SIZE || addr + len > TASK_SIZE)
  140. return -EINVAL;
  141. if (is_hugepage_only_range(mm, addr, len))
  142. return -EINVAL;
  143. if (!map) {
  144. /* Clear out the protection map for the address range */
  145. subpage_prot_clear(addr, len);
  146. return 0;
  147. }
  148. if (!access_ok(VERIFY_READ, map, (len >> PAGE_SHIFT) * sizeof(u32)))
  149. return -EFAULT;
  150. down_write(&mm->mmap_sem);
  151. for (limit = addr + len; addr < limit; addr = next) {
  152. next = pmd_addr_end(addr, limit);
  153. err = -ENOMEM;
  154. if (addr < 0x100000000) {
  155. spm = spt->low_prot;
  156. } else {
  157. spm = spt->protptrs[addr >> SBP_L3_SHIFT];
  158. if (!spm) {
  159. spm = (u32 **)get_zeroed_page(GFP_KERNEL);
  160. if (!spm)
  161. goto out;
  162. spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
  163. }
  164. }
  165. spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
  166. spp = *spm;
  167. if (!spp) {
  168. spp = (u32 *)get_zeroed_page(GFP_KERNEL);
  169. if (!spp)
  170. goto out;
  171. *spm = spp;
  172. }
  173. spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
  174. local_irq_disable();
  175. demote_segment_4k(mm, addr);
  176. local_irq_enable();
  177. i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  178. nw = PTRS_PER_PTE - i;
  179. if (addr + (nw << PAGE_SHIFT) > next)
  180. nw = (next - addr) >> PAGE_SHIFT;
  181. up_write(&mm->mmap_sem);
  182. err = -EFAULT;
  183. if (__copy_from_user(spp, map, nw * sizeof(u32)))
  184. goto out2;
  185. map += nw;
  186. down_write(&mm->mmap_sem);
  187. /* now flush any existing HPTEs for the range */
  188. hpte_flush_range(mm, addr, nw);
  189. }
  190. if (limit > spt->maxaddr)
  191. spt->maxaddr = limit;
  192. err = 0;
  193. out:
  194. up_write(&mm->mmap_sem);
  195. out2:
  196. return err;
  197. }