pageattr.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/highmem.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <asm/uaccess.h>
  11. #include <asm/processor.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/pgalloc.h>
  14. #include <asm/sections.h>
  15. static DEFINE_SPINLOCK(cpa_lock);
  16. static struct list_head df_list = LIST_HEAD_INIT(df_list);
  17. pte_t *lookup_address(unsigned long address)
  18. {
  19. pgd_t *pgd = pgd_offset_k(address);
  20. pud_t *pud;
  21. pmd_t *pmd;
  22. if (pgd_none(*pgd))
  23. return NULL;
  24. pud = pud_offset(pgd, address);
  25. if (pud_none(*pud))
  26. return NULL;
  27. pmd = pmd_offset(pud, address);
  28. if (pmd_none(*pmd))
  29. return NULL;
  30. if (pmd_large(*pmd))
  31. return (pte_t *)pmd;
  32. return pte_offset_kernel(pmd, address);
  33. }
  34. static struct page *split_large_page(unsigned long address, pgprot_t prot,
  35. pgprot_t ref_prot)
  36. {
  37. int i;
  38. unsigned long addr;
  39. struct page *base;
  40. pte_t *pbase;
  41. spin_unlock_irq(&cpa_lock);
  42. base = alloc_pages(GFP_KERNEL, 0);
  43. spin_lock_irq(&cpa_lock);
  44. if (!base)
  45. return NULL;
  46. /*
  47. * page_private is used to track the number of entries in
  48. * the page table page that have non standard attributes.
  49. */
  50. SetPagePrivate(base);
  51. page_private(base) = 0;
  52. address = __pa(address);
  53. addr = address & LARGE_PAGE_MASK;
  54. pbase = (pte_t *)page_address(base);
  55. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
  56. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
  57. addr == address ? prot : ref_prot));
  58. }
  59. return base;
  60. }
  61. static void flush_kernel_map(void *dummy)
  62. {
  63. /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
  64. if (boot_cpu_data.x86_model >= 4)
  65. wbinvd();
  66. /* Flush all to work around Errata in early athlons regarding
  67. * large page flushing.
  68. */
  69. __flush_tlb_all();
  70. }
  71. static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  72. {
  73. struct page *page;
  74. unsigned long flags;
  75. set_pte_atomic(kpte, pte); /* change init_mm */
  76. if (PTRS_PER_PMD > 1)
  77. return;
  78. spin_lock_irqsave(&pgd_lock, flags);
  79. for (page = pgd_list; page; page = (struct page *)page->index) {
  80. pgd_t *pgd;
  81. pud_t *pud;
  82. pmd_t *pmd;
  83. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  84. pud = pud_offset(pgd, address);
  85. pmd = pmd_offset(pud, address);
  86. set_pte_atomic((pte_t *)pmd, pte);
  87. }
  88. spin_unlock_irqrestore(&pgd_lock, flags);
  89. }
  90. /*
  91. * No more special protections in this 2/4MB area - revert to a
  92. * large page again.
  93. */
  94. static inline void revert_page(struct page *kpte_page, unsigned long address)
  95. {
  96. pgprot_t ref_prot;
  97. pte_t *linear;
  98. ref_prot =
  99. ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
  100. ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
  101. linear = (pte_t *)
  102. pmd_offset(pud_offset(pgd_offset_k(address), address), address);
  103. set_pmd_pte(linear, address,
  104. pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
  105. ref_prot));
  106. }
  107. static int
  108. __change_page_attr(struct page *page, pgprot_t prot)
  109. {
  110. pte_t *kpte;
  111. unsigned long address;
  112. struct page *kpte_page;
  113. BUG_ON(PageHighMem(page));
  114. address = (unsigned long)page_address(page);
  115. kpte = lookup_address(address);
  116. if (!kpte)
  117. return -EINVAL;
  118. kpte_page = virt_to_page(kpte);
  119. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
  120. if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
  121. set_pte_atomic(kpte, mk_pte(page, prot));
  122. } else {
  123. pgprot_t ref_prot;
  124. struct page *split;
  125. ref_prot =
  126. ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
  127. ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
  128. split = split_large_page(address, prot, ref_prot);
  129. if (!split)
  130. return -ENOMEM;
  131. set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
  132. kpte_page = split;
  133. }
  134. page_private(kpte_page)++;
  135. } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
  136. set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
  137. BUG_ON(page_private(kpte_page) == 0);
  138. page_private(kpte_page)--;
  139. } else
  140. BUG();
  141. /*
  142. * If the pte was reserved, it means it was created at boot
  143. * time (not via split_large_page) and in turn we must not
  144. * replace it with a largepage.
  145. */
  146. if (!PageReserved(kpte_page)) {
  147. if (cpu_has_pse && (page_private(kpte_page) == 0)) {
  148. ClearPagePrivate(kpte_page);
  149. list_add(&kpte_page->lru, &df_list);
  150. revert_page(kpte_page, address);
  151. }
  152. }
  153. return 0;
  154. }
  155. static inline void flush_map(void)
  156. {
  157. on_each_cpu(flush_kernel_map, NULL, 1, 1);
  158. }
  159. /*
  160. * Change the page attributes of an page in the linear mapping.
  161. *
  162. * This should be used when a page is mapped with a different caching policy
  163. * than write-back somewhere - some CPUs do not like it when mappings with
  164. * different caching policies exist. This changes the page attributes of the
  165. * in kernel linear mapping too.
  166. *
  167. * The caller needs to ensure that there are no conflicting mappings elsewhere.
  168. * This function only deals with the kernel linear map.
  169. *
  170. * Caller must call global_flush_tlb() after this.
  171. */
  172. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  173. {
  174. int err = 0;
  175. int i;
  176. unsigned long flags;
  177. spin_lock_irqsave(&cpa_lock, flags);
  178. for (i = 0; i < numpages; i++, page++) {
  179. err = __change_page_attr(page, prot);
  180. if (err)
  181. break;
  182. }
  183. spin_unlock_irqrestore(&cpa_lock, flags);
  184. return err;
  185. }
  186. void global_flush_tlb(void)
  187. {
  188. struct list_head l;
  189. struct page *pg, *next;
  190. BUG_ON(irqs_disabled());
  191. spin_lock_irq(&cpa_lock);
  192. list_replace_init(&df_list, &l);
  193. spin_unlock_irq(&cpa_lock);
  194. flush_map();
  195. list_for_each_entry_safe(pg, next, &l, lru)
  196. __free_page(pg);
  197. }
  198. #ifdef CONFIG_DEBUG_PAGEALLOC
  199. void kernel_map_pages(struct page *page, int numpages, int enable)
  200. {
  201. if (PageHighMem(page))
  202. return;
  203. if (!enable)
  204. debug_check_no_locks_freed(page_address(page),
  205. numpages * PAGE_SIZE);
  206. /* the return value is ignored - the calls cannot fail,
  207. * large pages are disabled at boot time.
  208. */
  209. change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
  210. /* we should perform an IPI and flush all tlbs,
  211. * but that can deadlock->flush only current cpu.
  212. */
  213. __flush_tlb_all();
  214. }
  215. #endif
  216. EXPORT_SYMBOL(change_page_attr);
  217. EXPORT_SYMBOL(global_flush_tlb);