pageattr_32.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <linux/slab.h>
  9. #include <linux/mm.h>
  10. #include <asm/processor.h>
  11. #include <asm/tlbflush.h>
  12. #include <asm/sections.h>
  13. #include <asm/uaccess.h>
  14. #include <asm/pgalloc.h>
  15. static DEFINE_SPINLOCK(cpa_lock);
  16. static struct list_head df_list = LIST_HEAD_INIT(df_list);
  17. pte_t *lookup_address(unsigned long address, int *level)
  18. {
  19. pgd_t *pgd = pgd_offset_k(address);
  20. pud_t *pud;
  21. pmd_t *pmd;
  22. if (pgd_none(*pgd))
  23. return NULL;
  24. pud = pud_offset(pgd, address);
  25. if (pud_none(*pud))
  26. return NULL;
  27. pmd = pmd_offset(pud, address);
  28. if (pmd_none(*pmd))
  29. return NULL;
  30. *level = 2;
  31. if (pmd_large(*pmd))
  32. return (pte_t *)pmd;
  33. *level = 3;
  34. return pte_offset_kernel(pmd, address);
  35. }
  36. static struct page *
  37. split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
  38. {
  39. unsigned long addr;
  40. struct page *base;
  41. pte_t *pbase;
  42. int i;
  43. spin_unlock_irq(&cpa_lock);
  44. base = alloc_pages(GFP_KERNEL, 0);
  45. spin_lock_irq(&cpa_lock);
  46. if (!base)
  47. return NULL;
  48. /*
  49. * page_private is used to track the number of entries in
  50. * the page table page that have non standard attributes.
  51. */
  52. SetPagePrivate(base);
  53. page_private(base) = 0;
  54. address = __pa(address);
  55. addr = address & LARGE_PAGE_MASK;
  56. pbase = (pte_t *)page_address(base);
  57. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  58. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
  59. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
  60. addr == address ? prot : ref_prot));
  61. }
  62. return base;
  63. }
  64. static void cache_flush_page(struct page *p)
  65. {
  66. void *addr = page_address(p);
  67. int i;
  68. for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
  69. clflush(addr + i);
  70. }
  71. static void flush_kernel_map(void *arg)
  72. {
  73. struct list_head *lh = (struct list_head *)arg;
  74. struct page *p;
  75. /* High level code is not ready for clflush yet */
  76. if (0 && cpu_has_clflush) {
  77. list_for_each_entry(p, lh, lru)
  78. cache_flush_page(p);
  79. } else {
  80. if (boot_cpu_data.x86_model >= 4)
  81. wbinvd();
  82. }
  83. /*
  84. * Flush all to work around Errata in early athlons regarding
  85. * large page flushing.
  86. */
  87. __flush_tlb_all();
  88. }
  89. static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  90. {
  91. unsigned long flags;
  92. struct page *page;
  93. /* change init_mm */
  94. set_pte_atomic(kpte, pte);
  95. if (SHARED_KERNEL_PMD)
  96. return;
  97. spin_lock_irqsave(&pgd_lock, flags);
  98. for (page = pgd_list; page; page = (struct page *)page->index) {
  99. pgd_t *pgd;
  100. pud_t *pud;
  101. pmd_t *pmd;
  102. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  103. pud = pud_offset(pgd, address);
  104. pmd = pmd_offset(pud, address);
  105. set_pte_atomic((pte_t *)pmd, pte);
  106. }
  107. spin_unlock_irqrestore(&pgd_lock, flags);
  108. }
  109. /*
  110. * No more special protections in this 2/4MB area - revert to a large
  111. * page again.
  112. */
  113. static inline void revert_page(struct page *kpte_page, unsigned long address)
  114. {
  115. pgprot_t ref_prot;
  116. pte_t *linear;
  117. ref_prot =
  118. ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
  119. ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
  120. linear = (pte_t *)
  121. pmd_offset(pud_offset(pgd_offset_k(address), address), address);
  122. set_pmd_pte(linear, address,
  123. pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
  124. ref_prot));
  125. }
  126. static inline void save_page(struct page *kpte_page)
  127. {
  128. if (!test_and_set_bit(PG_arch_1, &kpte_page->flags))
  129. list_add(&kpte_page->lru, &df_list);
  130. }
  131. static int __change_page_attr(struct page *page, pgprot_t prot)
  132. {
  133. struct page *kpte_page;
  134. unsigned long address;
  135. pte_t *kpte;
  136. int level;
  137. BUG_ON(PageHighMem(page));
  138. address = (unsigned long)page_address(page);
  139. kpte = lookup_address(address, &level);
  140. if (!kpte)
  141. return -EINVAL;
  142. kpte_page = virt_to_page(kpte);
  143. BUG_ON(PageLRU(kpte_page));
  144. BUG_ON(PageCompound(kpte_page));
  145. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
  146. if (!pte_huge(*kpte)) {
  147. set_pte_atomic(kpte, mk_pte(page, prot));
  148. } else {
  149. struct page *split;
  150. pgprot_t ref_prot;
  151. ref_prot =
  152. ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
  153. ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
  154. split = split_large_page(address, prot, ref_prot);
  155. if (!split)
  156. return -ENOMEM;
  157. set_pmd_pte(kpte, address, mk_pte(split, ref_prot));
  158. kpte_page = split;
  159. }
  160. page_private(kpte_page)++;
  161. } else {
  162. if (!pte_huge(*kpte)) {
  163. set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
  164. BUG_ON(page_private(kpte_page) == 0);
  165. page_private(kpte_page)--;
  166. } else
  167. BUG();
  168. }
  169. /*
  170. * If the pte was reserved, it means it was created at boot
  171. * time (not via split_large_page) and in turn we must not
  172. * replace it with a largepage.
  173. */
  174. save_page(kpte_page);
  175. if (!PageReserved(kpte_page)) {
  176. if (cpu_has_pse && (page_private(kpte_page) == 0)) {
  177. paravirt_release_pt(page_to_pfn(kpte_page));
  178. revert_page(kpte_page, address);
  179. }
  180. }
  181. return 0;
  182. }
  183. static inline void flush_map(struct list_head *l)
  184. {
  185. on_each_cpu(flush_kernel_map, l, 1, 1);
  186. }
  187. /*
  188. * Change the page attributes of an page in the linear mapping.
  189. *
  190. * This should be used when a page is mapped with a different caching policy
  191. * than write-back somewhere - some CPUs do not like it when mappings with
  192. * different caching policies exist. This changes the page attributes of the
  193. * in kernel linear mapping too.
  194. *
  195. * The caller needs to ensure that there are no conflicting mappings elsewhere.
  196. * This function only deals with the kernel linear map.
  197. *
  198. * Caller must call global_flush_tlb() after this.
  199. */
  200. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  201. {
  202. unsigned long flags;
  203. int err = 0, i;
  204. spin_lock_irqsave(&cpa_lock, flags);
  205. for (i = 0; i < numpages; i++, page++) {
  206. err = __change_page_attr(page, prot);
  207. if (err)
  208. break;
  209. }
  210. spin_unlock_irqrestore(&cpa_lock, flags);
  211. return err;
  212. }
  213. EXPORT_SYMBOL(change_page_attr);
  214. void global_flush_tlb(void)
  215. {
  216. struct page *pg, *next;
  217. struct list_head l;
  218. BUG_ON(irqs_disabled());
  219. spin_lock_irq(&cpa_lock);
  220. list_replace_init(&df_list, &l);
  221. spin_unlock_irq(&cpa_lock);
  222. flush_map(&l);
  223. list_for_each_entry_safe(pg, next, &l, lru) {
  224. list_del(&pg->lru);
  225. clear_bit(PG_arch_1, &pg->flags);
  226. if (PageReserved(pg) || !cpu_has_pse || page_private(pg) != 0)
  227. continue;
  228. ClearPagePrivate(pg);
  229. __free_page(pg);
  230. }
  231. }
  232. EXPORT_SYMBOL(global_flush_tlb);
  233. #ifdef CONFIG_DEBUG_PAGEALLOC
  234. void kernel_map_pages(struct page *page, int numpages, int enable)
  235. {
  236. if (PageHighMem(page))
  237. return;
  238. if (!enable) {
  239. debug_check_no_locks_freed(page_address(page),
  240. numpages * PAGE_SIZE);
  241. }
  242. /*
  243. * the return value is ignored - the calls cannot fail,
  244. * large pages are disabled at boot time.
  245. */
  246. change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
  247. /*
  248. * we should perform an IPI and flush all tlbs,
  249. * but that can deadlock->flush only current cpu.
  250. */
  251. __flush_tlb_all();
  252. }
  253. #endif