pageattr.c 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/config.h>
  6. #include <linux/mm.h>
  7. #include <linux/sched.h>
  8. #include <linux/highmem.h>
  9. #include <linux/module.h>
  10. #include <linux/slab.h>
  11. #include <asm/uaccess.h>
  12. #include <asm/processor.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/io.h>
  15. static inline pte_t *lookup_address(unsigned long address)
  16. {
  17. pgd_t *pgd = pgd_offset_k(address);
  18. pud_t *pud;
  19. pmd_t *pmd;
  20. pte_t *pte;
  21. if (pgd_none(*pgd))
  22. return NULL;
  23. pud = pud_offset(pgd, address);
  24. if (!pud_present(*pud))
  25. return NULL;
  26. pmd = pmd_offset(pud, address);
  27. if (!pmd_present(*pmd))
  28. return NULL;
  29. if (pmd_large(*pmd))
  30. return (pte_t *)pmd;
  31. pte = pte_offset_kernel(pmd, address);
  32. if (pte && !pte_present(*pte))
  33. pte = NULL;
  34. return pte;
  35. }
  36. static struct page *split_large_page(unsigned long address, pgprot_t prot,
  37. pgprot_t ref_prot)
  38. {
  39. int i;
  40. unsigned long addr;
  41. struct page *base = alloc_pages(GFP_KERNEL, 0);
  42. pte_t *pbase;
  43. if (!base)
  44. return NULL;
  45. /*
  46. * page_private is used to track the number of entries in
  47. * the page table page have non standard attributes.
  48. */
  49. SetPagePrivate(base);
  50. page_private(base) = 0;
  51. address = __pa(address);
  52. addr = address & LARGE_PAGE_MASK;
  53. pbase = (pte_t *)page_address(base);
  54. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
  55. pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
  56. addr == address ? prot : ref_prot);
  57. }
  58. return base;
  59. }
  60. static void flush_kernel_map(void *address)
  61. {
  62. if (0 && address && cpu_has_clflush) {
  63. /* is this worth it? */
  64. int i;
  65. for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
  66. asm volatile("clflush (%0)" :: "r" (address + i));
  67. } else
  68. asm volatile("wbinvd":::"memory");
  69. if (address)
  70. __flush_tlb_one(address);
  71. else
  72. __flush_tlb_all();
  73. }
  74. static inline void flush_map(unsigned long address)
  75. {
  76. on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
  77. }
  78. static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
  79. static inline void save_page(struct page *fpage)
  80. {
  81. fpage->lru.next = (struct list_head *)deferred_pages;
  82. deferred_pages = fpage;
  83. }
  84. /*
  85. * No more special protections in this 2/4MB area - revert to a
  86. * large page again.
  87. */
  88. static void revert_page(unsigned long address, pgprot_t ref_prot)
  89. {
  90. pgd_t *pgd;
  91. pud_t *pud;
  92. pmd_t *pmd;
  93. pte_t large_pte;
  94. pgd = pgd_offset_k(address);
  95. BUG_ON(pgd_none(*pgd));
  96. pud = pud_offset(pgd,address);
  97. BUG_ON(pud_none(*pud));
  98. pmd = pmd_offset(pud, address);
  99. BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
  100. pgprot_val(ref_prot) |= _PAGE_PSE;
  101. large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
  102. set_pte((pte_t *)pmd, large_pte);
  103. }
  104. static int
  105. __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
  106. pgprot_t ref_prot)
  107. {
  108. pte_t *kpte;
  109. struct page *kpte_page;
  110. unsigned kpte_flags;
  111. pgprot_t ref_prot2;
  112. kpte = lookup_address(address);
  113. if (!kpte) return 0;
  114. kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
  115. kpte_flags = pte_val(*kpte);
  116. if (pgprot_val(prot) != pgprot_val(ref_prot)) {
  117. if ((kpte_flags & _PAGE_PSE) == 0) {
  118. set_pte(kpte, pfn_pte(pfn, prot));
  119. } else {
  120. /*
  121. * split_large_page will take the reference for this
  122. * change_page_attr on the split page.
  123. */
  124. struct page *split;
  125. ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
  126. split = split_large_page(address, prot, ref_prot2);
  127. if (!split)
  128. return -ENOMEM;
  129. set_pte(kpte,mk_pte(split, ref_prot2));
  130. kpte_page = split;
  131. }
  132. page_private(kpte_page)++;
  133. } else if ((kpte_flags & _PAGE_PSE) == 0) {
  134. set_pte(kpte, pfn_pte(pfn, ref_prot));
  135. BUG_ON(page_private(kpte_page) == 0);
  136. page_private(kpte_page)--;
  137. } else
  138. BUG();
  139. /* on x86-64 the direct mapping set at boot is not using 4k pages */
  140. BUG_ON(PageReserved(kpte_page));
  141. if (page_private(kpte_page) == 0) {
  142. save_page(kpte_page);
  143. revert_page(address, ref_prot);
  144. }
  145. return 0;
  146. }
  147. /*
  148. * Change the page attributes of an page in the linear mapping.
  149. *
  150. * This should be used when a page is mapped with a different caching policy
  151. * than write-back somewhere - some CPUs do not like it when mappings with
  152. * different caching policies exist. This changes the page attributes of the
  153. * in kernel linear mapping too.
  154. *
  155. * The caller needs to ensure that there are no conflicting mappings elsewhere.
  156. * This function only deals with the kernel linear map.
  157. *
  158. * Caller must call global_flush_tlb() after this.
  159. */
  160. int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
  161. {
  162. int err = 0;
  163. int i;
  164. down_write(&init_mm.mmap_sem);
  165. for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
  166. unsigned long pfn = __pa(address) >> PAGE_SHIFT;
  167. err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
  168. if (err)
  169. break;
  170. /* Handle kernel mapping too which aliases part of the
  171. * lowmem */
  172. if (__pa(address) < KERNEL_TEXT_SIZE) {
  173. unsigned long addr2;
  174. pgprot_t prot2 = prot;
  175. addr2 = __START_KERNEL_map + __pa(address);
  176. pgprot_val(prot2) &= ~_PAGE_NX;
  177. err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
  178. }
  179. }
  180. up_write(&init_mm.mmap_sem);
  181. return err;
  182. }
  183. /* Don't call this for MMIO areas that may not have a mem_map entry */
  184. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  185. {
  186. unsigned long addr = (unsigned long)page_address(page);
  187. return change_page_attr_addr(addr, numpages, prot);
  188. }
  189. void global_flush_tlb(void)
  190. {
  191. struct page *dpage;
  192. down_read(&init_mm.mmap_sem);
  193. dpage = xchg(&deferred_pages, NULL);
  194. up_read(&init_mm.mmap_sem);
  195. flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
  196. while (dpage) {
  197. struct page *tmp = dpage;
  198. dpage = (struct page *)dpage->lru.next;
  199. ClearPagePrivate(tmp);
  200. __free_page(tmp);
  201. }
  202. }
  203. EXPORT_SYMBOL(change_page_attr);
  204. EXPORT_SYMBOL(global_flush_tlb);