pgtable.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352
  1. #include <linux/mm.h>
  2. #include <asm/pgalloc.h>
  3. #include <asm/pgtable.h>
  4. #include <asm/tlb.h>
  5. #include <asm/fixmap.h>
  6. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  7. {
  8. return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
  9. }
  10. pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
  11. {
  12. struct page *pte;
  13. #ifdef CONFIG_HIGHPTE
  14. pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
  15. #else
  16. pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  17. #endif
  18. if (pte)
  19. pgtable_page_ctor(pte);
  20. return pte;
  21. }
  22. void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
  23. {
  24. pgtable_page_dtor(pte);
  25. paravirt_release_pte(page_to_pfn(pte));
  26. tlb_remove_page(tlb, pte);
  27. }
  28. #if PAGETABLE_LEVELS > 2
  29. void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
  30. {
  31. paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
  32. tlb_remove_page(tlb, virt_to_page(pmd));
  33. }
  34. #if PAGETABLE_LEVELS > 3
  35. void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
  36. {
  37. paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
  38. tlb_remove_page(tlb, virt_to_page(pud));
  39. }
  40. #endif /* PAGETABLE_LEVELS > 3 */
  41. #endif /* PAGETABLE_LEVELS > 2 */
  42. static inline void pgd_list_add(pgd_t *pgd)
  43. {
  44. struct page *page = virt_to_page(pgd);
  45. list_add(&page->lru, &pgd_list);
  46. }
  47. static inline void pgd_list_del(pgd_t *pgd)
  48. {
  49. struct page *page = virt_to_page(pgd);
  50. list_del(&page->lru);
  51. }
  52. #define UNSHARED_PTRS_PER_PGD \
  53. (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
  54. static void pgd_ctor(pgd_t *pgd)
  55. {
  56. /* If the pgd points to a shared pagetable level (either the
  57. ptes in non-PAE, or shared PMD in PAE), then just copy the
  58. references from swapper_pg_dir. */
  59. if (PAGETABLE_LEVELS == 2 ||
  60. (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
  61. PAGETABLE_LEVELS == 4) {
  62. clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
  63. swapper_pg_dir + KERNEL_PGD_BOUNDARY,
  64. KERNEL_PGD_PTRS);
  65. paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
  66. __pa(swapper_pg_dir) >> PAGE_SHIFT,
  67. KERNEL_PGD_BOUNDARY,
  68. KERNEL_PGD_PTRS);
  69. }
  70. /* list required to sync kernel mapping updates */
  71. if (!SHARED_KERNEL_PMD)
  72. pgd_list_add(pgd);
  73. }
  74. static void pgd_dtor(pgd_t *pgd)
  75. {
  76. unsigned long flags; /* can be called from interrupt context */
  77. if (SHARED_KERNEL_PMD)
  78. return;
  79. spin_lock_irqsave(&pgd_lock, flags);
  80. pgd_list_del(pgd);
  81. spin_unlock_irqrestore(&pgd_lock, flags);
  82. }
  83. /*
  84. * List of all pgd's needed for non-PAE so it can invalidate entries
  85. * in both cached and uncached pgd's; not needed for PAE since the
  86. * kernel pmd is shared. If PAE were not to share the pmd a similar
  87. * tactic would be needed. This is essentially codepath-based locking
  88. * against pageattr.c; it is the unique case in which a valid change
  89. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  90. * vmalloc faults work because attached pagetables are never freed.
  91. * -- wli
  92. */
  93. #ifdef CONFIG_X86_PAE
  94. /*
  95. * In PAE mode, we need to do a cr3 reload (=tlb flush) when
  96. * updating the top-level pagetable entries to guarantee the
  97. * processor notices the update. Since this is expensive, and
  98. * all 4 top-level entries are used almost immediately in a
  99. * new process's life, we just pre-populate them here.
  100. *
  101. * Also, if we're in a paravirt environment where the kernel pmd is
  102. * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
  103. * and initialize the kernel pmds here.
  104. */
  105. #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
  106. void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
  107. {
  108. paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
  109. /* Note: almost everything apart from _PAGE_PRESENT is
  110. reserved at the pmd (PDPT) level. */
  111. set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
  112. /*
  113. * According to Intel App note "TLBs, Paging-Structure Caches,
  114. * and Their Invalidation", April 2007, document 317080-001,
  115. * section 8.1: in PAE mode we explicitly have to flush the
  116. * TLB via cr3 if the top-level pgd is changed...
  117. */
  118. if (mm == current->active_mm)
  119. write_cr3(read_cr3());
  120. }
  121. #else /* !CONFIG_X86_PAE */
  122. /* No need to prepopulate any pagetable entries in non-PAE modes. */
  123. #define PREALLOCATED_PMDS 0
  124. #endif /* CONFIG_X86_PAE */
  125. static void free_pmds(pmd_t *pmds[])
  126. {
  127. int i;
  128. for(i = 0; i < PREALLOCATED_PMDS; i++)
  129. if (pmds[i])
  130. free_page((unsigned long)pmds[i]);
  131. }
  132. static int preallocate_pmds(pmd_t *pmds[])
  133. {
  134. int i;
  135. bool failed = false;
  136. for(i = 0; i < PREALLOCATED_PMDS; i++) {
  137. pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  138. if (pmd == NULL)
  139. failed = true;
  140. pmds[i] = pmd;
  141. }
  142. if (failed) {
  143. free_pmds(pmds);
  144. return -ENOMEM;
  145. }
  146. return 0;
  147. }
  148. /*
  149. * Mop up any pmd pages which may still be attached to the pgd.
  150. * Normally they will be freed by munmap/exit_mmap, but any pmd we
  151. * preallocate which never got a corresponding vma will need to be
  152. * freed manually.
  153. */
  154. static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
  155. {
  156. int i;
  157. for(i = 0; i < PREALLOCATED_PMDS; i++) {
  158. pgd_t pgd = pgdp[i];
  159. if (pgd_val(pgd) != 0) {
  160. pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
  161. pgdp[i] = native_make_pgd(0);
  162. paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
  163. pmd_free(mm, pmd);
  164. }
  165. }
  166. }
  167. static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
  168. {
  169. pud_t *pud;
  170. unsigned long addr;
  171. int i;
  172. if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
  173. return;
  174. pud = pud_offset(pgd, 0);
  175. for (addr = i = 0; i < PREALLOCATED_PMDS;
  176. i++, pud++, addr += PUD_SIZE) {
  177. pmd_t *pmd = pmds[i];
  178. if (i >= KERNEL_PGD_BOUNDARY)
  179. memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
  180. sizeof(pmd_t) * PTRS_PER_PMD);
  181. pud_populate(mm, pud, pmd);
  182. }
  183. }
  184. pgd_t *pgd_alloc(struct mm_struct *mm)
  185. {
  186. pgd_t *pgd;
  187. pmd_t *pmds[PREALLOCATED_PMDS];
  188. unsigned long flags;
  189. pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
  190. if (pgd == NULL)
  191. goto out;
  192. mm->pgd = pgd;
  193. if (preallocate_pmds(pmds) != 0)
  194. goto out_free_pgd;
  195. if (paravirt_pgd_alloc(mm) != 0)
  196. goto out_free_pmds;
  197. /*
  198. * Make sure that pre-populating the pmds is atomic with
  199. * respect to anything walking the pgd_list, so that they
  200. * never see a partially populated pgd.
  201. */
  202. spin_lock_irqsave(&pgd_lock, flags);
  203. pgd_ctor(pgd);
  204. pgd_prepopulate_pmd(mm, pgd, pmds);
  205. spin_unlock_irqrestore(&pgd_lock, flags);
  206. return pgd;
  207. out_free_pmds:
  208. free_pmds(pmds);
  209. out_free_pgd:
  210. free_page((unsigned long)pgd);
  211. out:
  212. return NULL;
  213. }
  214. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  215. {
  216. pgd_mop_up_pmds(mm, pgd);
  217. pgd_dtor(pgd);
  218. paravirt_pgd_free(mm, pgd);
  219. free_page((unsigned long)pgd);
  220. }
  221. int ptep_set_access_flags(struct vm_area_struct *vma,
  222. unsigned long address, pte_t *ptep,
  223. pte_t entry, int dirty)
  224. {
  225. int changed = !pte_same(*ptep, entry);
  226. if (changed && dirty) {
  227. *ptep = entry;
  228. pte_update_defer(vma->vm_mm, address, ptep);
  229. flush_tlb_page(vma, address);
  230. }
  231. return changed;
  232. }
  233. int ptep_test_and_clear_young(struct vm_area_struct *vma,
  234. unsigned long addr, pte_t *ptep)
  235. {
  236. int ret = 0;
  237. if (pte_young(*ptep))
  238. ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
  239. (unsigned long *) &ptep->pte);
  240. if (ret)
  241. pte_update(vma->vm_mm, addr, ptep);
  242. return ret;
  243. }
  244. int ptep_clear_flush_young(struct vm_area_struct *vma,
  245. unsigned long address, pte_t *ptep)
  246. {
  247. int young;
  248. young = ptep_test_and_clear_young(vma, address, ptep);
  249. if (young)
  250. flush_tlb_page(vma, address);
  251. return young;
  252. }
  253. /**
  254. * reserve_top_address - reserves a hole in the top of kernel address space
  255. * @reserve - size of hole to reserve
  256. *
  257. * Can be used to relocate the fixmap area and poke a hole in the top
  258. * of kernel address space to make room for a hypervisor.
  259. */
  260. void __init reserve_top_address(unsigned long reserve)
  261. {
  262. #ifdef CONFIG_X86_32
  263. BUG_ON(fixmaps_set > 0);
  264. printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
  265. (int)-reserve);
  266. __FIXADDR_TOP = -reserve - PAGE_SIZE;
  267. __VMALLOC_RESERVE += reserve;
  268. #endif
  269. }
  270. int fixmaps_set;
  271. void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
  272. {
  273. unsigned long address = __fix_to_virt(idx);
  274. if (idx >= __end_of_fixed_addresses) {
  275. BUG();
  276. return;
  277. }
  278. set_pte_vaddr(address, pte);
  279. fixmaps_set++;
  280. }
  281. void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
  282. pgprot_t flags)
  283. {
  284. __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
  285. }