pageattr_64.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/mm.h>
  6. #include <linux/sched.h>
  7. #include <linux/highmem.h>
  8. #include <linux/module.h>
  9. #include <linux/slab.h>
  10. #include <asm/uaccess.h>
  11. #include <asm/processor.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/io.h>
  14. pte_t *lookup_address(unsigned long address)
  15. {
  16. pgd_t *pgd = pgd_offset_k(address);
  17. pud_t *pud;
  18. pmd_t *pmd;
  19. pte_t *pte;
  20. if (pgd_none(*pgd))
  21. return NULL;
  22. pud = pud_offset(pgd, address);
  23. if (!pud_present(*pud))
  24. return NULL;
  25. pmd = pmd_offset(pud, address);
  26. if (!pmd_present(*pmd))
  27. return NULL;
  28. if (pmd_large(*pmd))
  29. return (pte_t *)pmd;
  30. pte = pte_offset_kernel(pmd, address);
  31. if (pte && !pte_present(*pte))
  32. pte = NULL;
  33. return pte;
  34. }
  35. static struct page *split_large_page(unsigned long address, pgprot_t prot,
  36. pgprot_t ref_prot)
  37. {
  38. int i;
  39. unsigned long addr;
  40. struct page *base = alloc_pages(GFP_KERNEL, 0);
  41. pte_t *pbase;
  42. if (!base)
  43. return NULL;
  44. /*
  45. * page_private is used to track the number of entries in
  46. * the page table page have non standard attributes.
  47. */
  48. SetPagePrivate(base);
  49. page_private(base) = 0;
  50. address = __pa(address);
  51. addr = address & LARGE_PAGE_MASK;
  52. pbase = (pte_t *)page_address(base);
  53. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
  54. pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
  55. addr == address ? prot : ref_prot);
  56. }
  57. return base;
  58. }
  59. static void cache_flush_page(void *adr)
  60. {
  61. int i;
  62. for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
  63. asm volatile("clflush (%0)" :: "r" (adr + i));
  64. }
  65. static void flush_kernel_map(void *arg)
  66. {
  67. struct list_head *l = (struct list_head *)arg;
  68. struct page *pg;
  69. /* When clflush is available always use it because it is
  70. much cheaper than WBINVD. */
  71. /* clflush is still broken. Disable for now. */
  72. if (1 || !cpu_has_clflush)
  73. asm volatile("wbinvd" ::: "memory");
  74. else list_for_each_entry(pg, l, lru) {
  75. void *adr = page_address(pg);
  76. cache_flush_page(adr);
  77. }
  78. __flush_tlb_all();
  79. }
  80. static inline void flush_map(struct list_head *l)
  81. {
  82. on_each_cpu(flush_kernel_map, l, 1, 1);
  83. }
  84. static LIST_HEAD(deferred_pages); /* protected by init_mm.mmap_sem */
  85. static inline void save_page(struct page *fpage)
  86. {
  87. if (!test_and_set_bit(PG_arch_1, &fpage->flags))
  88. list_add(&fpage->lru, &deferred_pages);
  89. }
  90. /*
  91. * No more special protections in this 2/4MB area - revert to a
  92. * large page again.
  93. */
  94. static void revert_page(unsigned long address, pgprot_t ref_prot)
  95. {
  96. pgd_t *pgd;
  97. pud_t *pud;
  98. pmd_t *pmd;
  99. pte_t large_pte;
  100. unsigned long pfn;
  101. pgd = pgd_offset_k(address);
  102. BUG_ON(pgd_none(*pgd));
  103. pud = pud_offset(pgd,address);
  104. BUG_ON(pud_none(*pud));
  105. pmd = pmd_offset(pud, address);
  106. BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
  107. pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
  108. large_pte = pfn_pte(pfn, ref_prot);
  109. large_pte = pte_mkhuge(large_pte);
  110. set_pte((pte_t *)pmd, large_pte);
  111. }
  112. static int
  113. __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
  114. pgprot_t ref_prot)
  115. {
  116. pte_t *kpte;
  117. struct page *kpte_page;
  118. pgprot_t ref_prot2;
  119. kpte = lookup_address(address);
  120. if (!kpte) return 0;
  121. kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
  122. BUG_ON(PageLRU(kpte_page));
  123. BUG_ON(PageCompound(kpte_page));
  124. if (pgprot_val(prot) != pgprot_val(ref_prot)) {
  125. if (!pte_huge(*kpte)) {
  126. set_pte(kpte, pfn_pte(pfn, prot));
  127. } else {
  128. /*
  129. * split_large_page will take the reference for this
  130. * change_page_attr on the split page.
  131. */
  132. struct page *split;
  133. ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
  134. split = split_large_page(address, prot, ref_prot2);
  135. if (!split)
  136. return -ENOMEM;
  137. set_pte(kpte, mk_pte(split, ref_prot2));
  138. kpte_page = split;
  139. }
  140. page_private(kpte_page)++;
  141. } else if (!pte_huge(*kpte)) {
  142. set_pte(kpte, pfn_pte(pfn, ref_prot));
  143. BUG_ON(page_private(kpte_page) == 0);
  144. page_private(kpte_page)--;
  145. } else
  146. BUG();
  147. /* on x86-64 the direct mapping set at boot is not using 4k pages */
  148. BUG_ON(PageReserved(kpte_page));
  149. save_page(kpte_page);
  150. if (page_private(kpte_page) == 0)
  151. revert_page(address, ref_prot);
  152. return 0;
  153. }
  154. /*
  155. * Change the page attributes of an page in the linear mapping.
  156. *
  157. * This should be used when a page is mapped with a different caching policy
  158. * than write-back somewhere - some CPUs do not like it when mappings with
  159. * different caching policies exist. This changes the page attributes of the
  160. * in kernel linear mapping too.
  161. *
  162. * The caller needs to ensure that there are no conflicting mappings elsewhere.
  163. * This function only deals with the kernel linear map.
  164. *
  165. * Caller must call global_flush_tlb() after this.
  166. */
  167. int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
  168. {
  169. int err = 0, kernel_map = 0;
  170. int i;
  171. if (address >= __START_KERNEL_map
  172. && address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
  173. address = (unsigned long)__va(__pa(address));
  174. kernel_map = 1;
  175. }
  176. down_write(&init_mm.mmap_sem);
  177. for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
  178. unsigned long pfn = __pa(address) >> PAGE_SHIFT;
  179. if (!kernel_map || pte_present(pfn_pte(0, prot))) {
  180. err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
  181. if (err)
  182. break;
  183. }
  184. /* Handle kernel mapping too which aliases part of the
  185. * lowmem */
  186. if (__pa(address) < KERNEL_TEXT_SIZE) {
  187. unsigned long addr2;
  188. pgprot_t prot2;
  189. addr2 = __START_KERNEL_map + __pa(address);
  190. /* Make sure the kernel mappings stay executable */
  191. prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
  192. err = __change_page_attr(addr2, pfn, prot2,
  193. PAGE_KERNEL_EXEC);
  194. }
  195. }
  196. up_write(&init_mm.mmap_sem);
  197. return err;
  198. }
  199. /* Don't call this for MMIO areas that may not have a mem_map entry */
  200. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  201. {
  202. unsigned long addr = (unsigned long)page_address(page);
  203. return change_page_attr_addr(addr, numpages, prot);
  204. }
  205. void global_flush_tlb(void)
  206. {
  207. struct page *pg, *next;
  208. struct list_head l;
  209. down_read(&init_mm.mmap_sem);
  210. list_replace_init(&deferred_pages, &l);
  211. up_read(&init_mm.mmap_sem);
  212. flush_map(&l);
  213. list_for_each_entry_safe(pg, next, &l, lru) {
  214. list_del(&pg->lru);
  215. clear_bit(PG_arch_1, &pg->flags);
  216. if (page_private(pg) != 0)
  217. continue;
  218. ClearPagePrivate(pg);
  219. __free_page(pg);
  220. }
  221. }
  222. EXPORT_SYMBOL(change_page_attr);
  223. EXPORT_SYMBOL(global_flush_tlb);