pageattr_64.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/module.h>
  7. #include <linux/sched.h>
  8. #include <linux/slab.h>
  9. #include <linux/mm.h>
  10. #include <asm/processor.h>
  11. #include <asm/tlbflush.h>
  12. #include <asm/uaccess.h>
  13. #include <asm/io.h>
  14. pte_t *lookup_address(unsigned long address, int *level)
  15. {
  16. pgd_t *pgd = pgd_offset_k(address);
  17. pud_t *pud;
  18. pmd_t *pmd;
  19. if (pgd_none(*pgd))
  20. return NULL;
  21. pud = pud_offset(pgd, address);
  22. if (pud_none(*pud))
  23. return NULL;
  24. pmd = pmd_offset(pud, address);
  25. if (pmd_none(*pmd))
  26. return NULL;
  27. *level = 3;
  28. if (pmd_large(*pmd))
  29. return (pte_t *)pmd;
  30. *level = 4;
  31. return pte_offset_kernel(pmd, address);
  32. }
  33. static struct page *
  34. split_large_page(unsigned long address, pgprot_t prot, pgprot_t ref_prot)
  35. {
  36. unsigned long addr;
  37. struct page *base;
  38. pte_t *pbase;
  39. int i;
  40. base = alloc_pages(GFP_KERNEL, 0);
  41. if (!base)
  42. return NULL;
  43. /*
  44. * page_private is used to track the number of entries in
  45. * the page table page have non standard attributes.
  46. */
  47. SetPagePrivate(base);
  48. page_private(base) = 0;
  49. address = __pa(address);
  50. addr = address & LARGE_PAGE_MASK;
  51. pbase = (pte_t *)page_address(base);
  52. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
  53. pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
  54. addr == address ? prot : ref_prot);
  55. }
  56. return base;
  57. }
  58. void clflush_cache_range(void *addr, int size)
  59. {
  60. int i;
  61. for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
  62. clflush(addr+i);
  63. }
  64. static void flush_kernel_map(void *arg)
  65. {
  66. struct list_head *l = (struct list_head *)arg;
  67. struct page *pg;
  68. __flush_tlb_all();
  69. /* When clflush is available always use it because it is
  70. much cheaper than WBINVD. */
  71. /* clflush is still broken. Disable for now. */
  72. if (1 || !cpu_has_clflush) {
  73. wbinvd();
  74. } else {
  75. list_for_each_entry(pg, l, lru) {
  76. void *addr = page_address(pg);
  77. clflush_cache_range(addr, PAGE_SIZE);
  78. }
  79. }
  80. }
  81. static inline void flush_map(struct list_head *l)
  82. {
  83. on_each_cpu(flush_kernel_map, l, 1, 1);
  84. }
  85. static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
  86. static inline void save_page(struct page *fpage)
  87. {
  88. if (!test_and_set_bit(PG_arch_1, &fpage->flags))
  89. list_add(&fpage->lru, &deferred_pages);
  90. }
  91. /*
  92. * No more special protections in this 2/4MB area - revert to a
  93. * large page again.
  94. */
  95. static void revert_page(unsigned long address, pgprot_t ref_prot)
  96. {
  97. unsigned long pfn;
  98. pgd_t *pgd;
  99. pud_t *pud;
  100. pmd_t *pmd;
  101. pte_t large_pte;
  102. pgd = pgd_offset_k(address);
  103. BUG_ON(pgd_none(*pgd));
  104. pud = pud_offset(pgd, address);
  105. BUG_ON(pud_none(*pud));
  106. pmd = pmd_offset(pud, address);
  107. BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
  108. pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
  109. large_pte = pfn_pte(pfn, ref_prot);
  110. large_pte = pte_mkhuge(large_pte);
  111. set_pte((pte_t *)pmd, large_pte);
  112. }
  113. static int
  114. __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
  115. pgprot_t ref_prot)
  116. {
  117. struct page *kpte_page;
  118. pgprot_t ref_prot2;
  119. pte_t *kpte;
  120. int level;
  121. kpte = lookup_address(address, &level);
  122. if (!kpte)
  123. return 0;
  124. kpte_page = virt_to_page(kpte);
  125. BUG_ON(PageLRU(kpte_page));
  126. BUG_ON(PageCompound(kpte_page));
  127. if (pgprot_val(prot) != pgprot_val(ref_prot)) {
  128. if (level == 4) {
  129. set_pte(kpte, pfn_pte(pfn, prot));
  130. } else {
  131. /*
  132. * split_large_page will take the reference for this
  133. * change_page_attr on the split page.
  134. */
  135. struct page *split;
  136. ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
  137. split = split_large_page(address, prot, ref_prot2);
  138. if (!split)
  139. return -ENOMEM;
  140. pgprot_val(ref_prot2) &= ~_PAGE_NX;
  141. set_pte(kpte, mk_pte(split, ref_prot2));
  142. kpte_page = split;
  143. }
  144. page_private(kpte_page)++;
  145. } else {
  146. if (level == 4) {
  147. set_pte(kpte, pfn_pte(pfn, ref_prot));
  148. BUG_ON(page_private(kpte_page) == 0);
  149. page_private(kpte_page)--;
  150. } else
  151. BUG();
  152. }
  153. /* on x86-64 the direct mapping set at boot is not using 4k pages */
  154. BUG_ON(PageReserved(kpte_page));
  155. save_page(kpte_page);
  156. if (page_private(kpte_page) == 0)
  157. revert_page(address, ref_prot);
  158. return 0;
  159. }
  160. /*
  161. * Change the page attributes of an page in the linear mapping.
  162. *
  163. * This should be used when a page is mapped with a different caching policy
  164. * than write-back somewhere - some CPUs do not like it when mappings with
  165. * different caching policies exist. This changes the page attributes of the
  166. * in kernel linear mapping too.
  167. *
  168. * The caller needs to ensure that there are no conflicting mappings elsewhere.
  169. * This function only deals with the kernel linear map.
  170. *
  171. * Caller must call global_flush_tlb() after this.
  172. */
  173. int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
  174. {
  175. int err = 0, kernel_map = 0, i;
  176. if (address >= __START_KERNEL_map &&
  177. address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
  178. address = (unsigned long)__va(__pa(address));
  179. kernel_map = 1;
  180. }
  181. down_write(&init_mm.mmap_sem);
  182. for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
  183. unsigned long pfn = __pa(address) >> PAGE_SHIFT;
  184. if (!kernel_map || pte_present(pfn_pte(0, prot))) {
  185. err = __change_page_attr(address, pfn, prot,
  186. PAGE_KERNEL);
  187. if (err)
  188. break;
  189. }
  190. /* Handle kernel mapping too which aliases part of the
  191. * lowmem */
  192. if (__pa(address) < KERNEL_TEXT_SIZE) {
  193. unsigned long addr2;
  194. pgprot_t prot2;
  195. addr2 = __START_KERNEL_map + __pa(address);
  196. /* Make sure the kernel mappings stay executable */
  197. prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
  198. err = __change_page_attr(addr2, pfn, prot2,
  199. PAGE_KERNEL_EXEC);
  200. }
  201. }
  202. up_write(&init_mm.mmap_sem);
  203. return err;
  204. }
  205. /* Don't call this for MMIO areas that may not have a mem_map entry */
  206. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  207. {
  208. unsigned long addr = (unsigned long)page_address(page);
  209. return change_page_attr_addr(addr, numpages, prot);
  210. }
  211. EXPORT_SYMBOL(change_page_attr);
  212. void global_flush_tlb(void)
  213. {
  214. struct page *pg, *next;
  215. struct list_head l;
  216. /*
  217. * Write-protect the semaphore, to exclude two contexts
  218. * doing a list_replace_init() call in parallel and to
  219. * exclude new additions to the deferred_pages list:
  220. */
  221. down_write(&init_mm.mmap_sem);
  222. list_replace_init(&deferred_pages, &l);
  223. up_write(&init_mm.mmap_sem);
  224. flush_map(&l);
  225. list_for_each_entry_safe(pg, next, &l, lru) {
  226. list_del(&pg->lru);
  227. clear_bit(PG_arch_1, &pg->flags);
  228. if (page_private(pg) != 0)
  229. continue;
  230. ClearPagePrivate(pg);
  231. __free_page(pg);
  232. }
  233. }
  234. EXPORT_SYMBOL(global_flush_tlb);