pageattr.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/config.h>
  6. #include <linux/mm.h>
  7. #include <linux/sched.h>
  8. #include <linux/highmem.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/processor.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/pgalloc.h>
  15. #include <asm/sections.h>
  16. static DEFINE_SPINLOCK(cpa_lock);
  17. static struct list_head df_list = LIST_HEAD_INIT(df_list);
  18. pte_t *lookup_address(unsigned long address)
  19. {
  20. pgd_t *pgd = pgd_offset_k(address);
  21. pud_t *pud;
  22. pmd_t *pmd;
  23. if (pgd_none(*pgd))
  24. return NULL;
  25. pud = pud_offset(pgd, address);
  26. if (pud_none(*pud))
  27. return NULL;
  28. pmd = pmd_offset(pud, address);
  29. if (pmd_none(*pmd))
  30. return NULL;
  31. if (pmd_large(*pmd))
  32. return (pte_t *)pmd;
  33. return pte_offset_kernel(pmd, address);
  34. }
  35. static struct page *split_large_page(unsigned long address, pgprot_t prot,
  36. pgprot_t ref_prot)
  37. {
  38. int i;
  39. unsigned long addr;
  40. struct page *base;
  41. pte_t *pbase;
  42. spin_unlock_irq(&cpa_lock);
  43. base = alloc_pages(GFP_KERNEL, 0);
  44. spin_lock_irq(&cpa_lock);
  45. if (!base)
  46. return NULL;
  47. /*
  48. * page_private is used to track the number of entries in
  49. * the page table page that have non standard attributes.
  50. */
  51. SetPagePrivate(base);
  52. page_private(base) = 0;
  53. address = __pa(address);
  54. addr = address & LARGE_PAGE_MASK;
  55. pbase = (pte_t *)page_address(base);
  56. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
  57. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
  58. addr == address ? prot : ref_prot));
  59. }
  60. return base;
  61. }
  62. static void flush_kernel_map(void *dummy)
  63. {
  64. /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
  65. if (boot_cpu_data.x86_model >= 4)
  66. wbinvd();
  67. /* Flush all to work around Errata in early athlons regarding
  68. * large page flushing.
  69. */
  70. __flush_tlb_all();
  71. }
  72. static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  73. {
  74. struct page *page;
  75. unsigned long flags;
  76. set_pte_atomic(kpte, pte); /* change init_mm */
  77. if (PTRS_PER_PMD > 1)
  78. return;
  79. spin_lock_irqsave(&pgd_lock, flags);
  80. for (page = pgd_list; page; page = (struct page *)page->index) {
  81. pgd_t *pgd;
  82. pud_t *pud;
  83. pmd_t *pmd;
  84. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  85. pud = pud_offset(pgd, address);
  86. pmd = pmd_offset(pud, address);
  87. set_pte_atomic((pte_t *)pmd, pte);
  88. }
  89. spin_unlock_irqrestore(&pgd_lock, flags);
  90. }
  91. /*
  92. * No more special protections in this 2/4MB area - revert to a
  93. * large page again.
  94. */
  95. static inline void revert_page(struct page *kpte_page, unsigned long address)
  96. {
  97. pgprot_t ref_prot;
  98. pte_t *linear;
  99. ref_prot =
  100. ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
  101. ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
  102. linear = (pte_t *)
  103. pmd_offset(pud_offset(pgd_offset_k(address), address), address);
  104. set_pmd_pte(linear, address,
  105. pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
  106. ref_prot));
  107. }
  108. static int
  109. __change_page_attr(struct page *page, pgprot_t prot)
  110. {
  111. pte_t *kpte;
  112. unsigned long address;
  113. struct page *kpte_page;
  114. BUG_ON(PageHighMem(page));
  115. address = (unsigned long)page_address(page);
  116. kpte = lookup_address(address);
  117. if (!kpte)
  118. return -EINVAL;
  119. kpte_page = virt_to_page(kpte);
  120. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
  121. if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
  122. set_pte_atomic(kpte, mk_pte(page, prot));
  123. } else {
  124. pgprot_t ref_prot;
  125. struct page *split;
  126. ref_prot =
  127. ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
  128. ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
  129. split = split_large_page(address, prot, ref_prot);
  130. if (!split)
  131. return -ENOMEM;
  132. set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
  133. kpte_page = split;
  134. }
  135. page_private(kpte_page)++;
  136. } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
  137. set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
  138. BUG_ON(page_private(kpte_page) == 0);
  139. page_private(kpte_page)--;
  140. } else
  141. BUG();
  142. /*
  143. * If the pte was reserved, it means it was created at boot
  144. * time (not via split_large_page) and in turn we must not
  145. * replace it with a largepage.
  146. */
  147. if (!PageReserved(kpte_page)) {
  148. if (cpu_has_pse && (page_private(kpte_page) == 0)) {
  149. ClearPagePrivate(kpte_page);
  150. list_add(&kpte_page->lru, &df_list);
  151. revert_page(kpte_page, address);
  152. }
  153. }
  154. return 0;
  155. }
  156. static inline void flush_map(void)
  157. {
  158. on_each_cpu(flush_kernel_map, NULL, 1, 1);
  159. }
  160. /*
  161. * Change the page attributes of an page in the linear mapping.
  162. *
  163. * This should be used when a page is mapped with a different caching policy
  164. * than write-back somewhere - some CPUs do not like it when mappings with
  165. * different caching policies exist. This changes the page attributes of the
  166. * in kernel linear mapping too.
  167. *
  168. * The caller needs to ensure that there are no conflicting mappings elsewhere.
  169. * This function only deals with the kernel linear map.
  170. *
  171. * Caller must call global_flush_tlb() after this.
  172. */
  173. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  174. {
  175. int err = 0;
  176. int i;
  177. unsigned long flags;
  178. spin_lock_irqsave(&cpa_lock, flags);
  179. for (i = 0; i < numpages; i++, page++) {
  180. err = __change_page_attr(page, prot);
  181. if (err)
  182. break;
  183. }
  184. spin_unlock_irqrestore(&cpa_lock, flags);
  185. return err;
  186. }
  187. void global_flush_tlb(void)
  188. {
  189. LIST_HEAD(l);
  190. struct page *pg, *next;
  191. BUG_ON(irqs_disabled());
  192. spin_lock_irq(&cpa_lock);
  193. list_splice_init(&df_list, &l);
  194. spin_unlock_irq(&cpa_lock);
  195. flush_map();
  196. list_for_each_entry_safe(pg, next, &l, lru)
  197. __free_page(pg);
  198. }
  199. #ifdef CONFIG_DEBUG_PAGEALLOC
  200. void kernel_map_pages(struct page *page, int numpages, int enable)
  201. {
  202. if (PageHighMem(page))
  203. return;
  204. if (!enable)
  205. mutex_debug_check_no_locks_freed(page_address(page),
  206. numpages * PAGE_SIZE);
  207. /* the return value is ignored - the calls cannot fail,
  208. * large pages are disabled at boot time.
  209. */
  210. change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
  211. /* we should perform an IPI and flush all tlbs,
  212. * but that can deadlock->flush only current cpu.
  213. */
  214. __flush_tlb_all();
  215. }
  216. #endif
  217. EXPORT_SYMBOL(change_page_attr);
  218. EXPORT_SYMBOL(global_flush_tlb);