pageattr.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/mm.h>
  11. #include <asm/e820.h>
  12. #include <asm/processor.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/sections.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/pgalloc.h>
  17. static inline int
  18. within(unsigned long addr, unsigned long start, unsigned long end)
  19. {
  20. return addr >= start && addr < end;
  21. }
  22. /*
  23. * Flushing functions
  24. */
  25. void clflush_cache_range(void *addr, int size)
  26. {
  27. int i;
  28. for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
  29. clflush(addr+i);
  30. }
  31. static void flush_kernel_map(void *arg)
  32. {
  33. /*
  34. * Flush all to work around Errata in early athlons regarding
  35. * large page flushing.
  36. */
  37. __flush_tlb_all();
  38. if (boot_cpu_data.x86_model >= 4)
  39. wbinvd();
  40. }
  41. static void global_flush_tlb(void)
  42. {
  43. BUG_ON(irqs_disabled());
  44. on_each_cpu(flush_kernel_map, NULL, 1, 1);
  45. }
  46. struct clflush_data {
  47. unsigned long addr;
  48. int numpages;
  49. };
  50. static void __cpa_flush_range(void *arg)
  51. {
  52. struct clflush_data *cld = arg;
  53. /*
  54. * We could optimize that further and do individual per page
  55. * tlb invalidates for a low number of pages. Caveat: we must
  56. * flush the high aliases on 64bit as well.
  57. */
  58. __flush_tlb_all();
  59. clflush_cache_range((void *) cld->addr, cld->numpages * PAGE_SIZE);
  60. }
  61. static void cpa_flush_range(unsigned long addr, int numpages)
  62. {
  63. struct clflush_data cld;
  64. BUG_ON(irqs_disabled());
  65. cld.addr = addr;
  66. cld.numpages = numpages;
  67. on_each_cpu(__cpa_flush_range, &cld, 1, 1);
  68. }
  69. /*
  70. * Certain areas of memory on x86 require very specific protection flags,
  71. * for example the BIOS area or kernel text. Callers don't always get this
  72. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  73. * checks and fixes these known static required protection bits.
  74. */
  75. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
  76. {
  77. pgprot_t forbidden = __pgprot(0);
  78. /*
  79. * The BIOS area between 640k and 1Mb needs to be executable for
  80. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  81. */
  82. if (within(__pa(address), BIOS_BEGIN, BIOS_END))
  83. pgprot_val(forbidden) |= _PAGE_NX;
  84. /*
  85. * The kernel text needs to be executable for obvious reasons
  86. * Does not cover __inittext since that is gone later on
  87. */
  88. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  89. pgprot_val(forbidden) |= _PAGE_NX;
  90. #ifdef CONFIG_DEBUG_RODATA
  91. /* The .rodata section needs to be read-only */
  92. if (within(address, (unsigned long)__start_rodata,
  93. (unsigned long)__end_rodata))
  94. pgprot_val(forbidden) |= _PAGE_RW;
  95. #endif
  96. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  97. return prot;
  98. }
  99. pte_t *lookup_address(unsigned long address, int *level)
  100. {
  101. pgd_t *pgd = pgd_offset_k(address);
  102. pud_t *pud;
  103. pmd_t *pmd;
  104. *level = PG_LEVEL_NONE;
  105. if (pgd_none(*pgd))
  106. return NULL;
  107. pud = pud_offset(pgd, address);
  108. if (pud_none(*pud))
  109. return NULL;
  110. pmd = pmd_offset(pud, address);
  111. if (pmd_none(*pmd))
  112. return NULL;
  113. *level = PG_LEVEL_2M;
  114. if (pmd_large(*pmd))
  115. return (pte_t *)pmd;
  116. *level = PG_LEVEL_4K;
  117. return pte_offset_kernel(pmd, address);
  118. }
  119. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  120. {
  121. /* change init_mm */
  122. set_pte_atomic(kpte, pte);
  123. #ifdef CONFIG_X86_32
  124. if (!SHARED_KERNEL_PMD) {
  125. struct page *page;
  126. for (page = pgd_list; page; page = (struct page *)page->index) {
  127. pgd_t *pgd;
  128. pud_t *pud;
  129. pmd_t *pmd;
  130. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  131. pud = pud_offset(pgd, address);
  132. pmd = pmd_offset(pud, address);
  133. set_pte_atomic((pte_t *)pmd, pte);
  134. }
  135. }
  136. #endif
  137. }
  138. static int split_large_page(pte_t *kpte, unsigned long address)
  139. {
  140. pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  141. gfp_t gfp_flags = GFP_KERNEL;
  142. unsigned long flags;
  143. unsigned long addr;
  144. pte_t *pbase, *tmp;
  145. struct page *base;
  146. int i, level;
  147. #ifdef CONFIG_DEBUG_PAGEALLOC
  148. gfp_flags = GFP_ATOMIC;
  149. #endif
  150. base = alloc_pages(gfp_flags, 0);
  151. if (!base)
  152. return -ENOMEM;
  153. spin_lock_irqsave(&pgd_lock, flags);
  154. /*
  155. * Check for races, another CPU might have split this page
  156. * up for us already:
  157. */
  158. tmp = lookup_address(address, &level);
  159. if (tmp != kpte) {
  160. WARN_ON_ONCE(1);
  161. goto out_unlock;
  162. }
  163. address = __pa(address);
  164. addr = address & LARGE_PAGE_MASK;
  165. pbase = (pte_t *)page_address(base);
  166. #ifdef CONFIG_X86_32
  167. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  168. #endif
  169. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
  170. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
  171. /*
  172. * Install the new, split up pagetable. Important detail here:
  173. *
  174. * On Intel the NX bit of all levels must be cleared to make a
  175. * page executable. See section 4.13.2 of Intel 64 and IA-32
  176. * Architectures Software Developer's Manual).
  177. */
  178. ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
  179. __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
  180. base = NULL;
  181. out_unlock:
  182. spin_unlock_irqrestore(&pgd_lock, flags);
  183. if (base)
  184. __free_pages(base, 0);
  185. return 0;
  186. }
  187. static int
  188. __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
  189. {
  190. struct page *kpte_page;
  191. int level, err = 0;
  192. pte_t *kpte;
  193. #ifdef CONFIG_X86_32
  194. BUG_ON(pfn > max_low_pfn);
  195. #endif
  196. repeat:
  197. kpte = lookup_address(address, &level);
  198. if (!kpte)
  199. return -EINVAL;
  200. kpte_page = virt_to_page(kpte);
  201. BUG_ON(PageLRU(kpte_page));
  202. BUG_ON(PageCompound(kpte_page));
  203. prot = static_protections(prot, address);
  204. if (level == PG_LEVEL_4K) {
  205. WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
  206. set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
  207. } else {
  208. /* Clear the PSE bit for the 4k level pages ! */
  209. pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
  210. err = split_large_page(kpte, address);
  211. if (!err)
  212. goto repeat;
  213. }
  214. return err;
  215. }
  216. /**
  217. * change_page_attr_addr - Change page table attributes in linear mapping
  218. * @address: Virtual address in linear mapping.
  219. * @prot: New page table attribute (PAGE_*)
  220. *
  221. * Change page attributes of a page in the direct mapping. This is a variant
  222. * of change_page_attr() that also works on memory holes that do not have
  223. * mem_map entry (pfn_valid() is false).
  224. *
  225. * See change_page_attr() documentation for more details.
  226. *
  227. * Modules and drivers should use the set_memory_* APIs instead.
  228. */
  229. static int change_page_attr_addr(unsigned long address, pgprot_t prot)
  230. {
  231. int err = 0, kernel_map = 0;
  232. unsigned long pfn = __pa(address) >> PAGE_SHIFT;
  233. #ifdef CONFIG_X86_64
  234. if (address >= __START_KERNEL_map &&
  235. address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
  236. address = (unsigned long)__va(__pa(address));
  237. kernel_map = 1;
  238. }
  239. #endif
  240. if (!kernel_map || pte_present(pfn_pte(0, prot))) {
  241. err = __change_page_attr(address, pfn, prot);
  242. if (err)
  243. return err;
  244. }
  245. #ifdef CONFIG_X86_64
  246. /*
  247. * Handle kernel mapping too which aliases part of
  248. * lowmem:
  249. */
  250. if (__pa(address) < KERNEL_TEXT_SIZE) {
  251. unsigned long addr2;
  252. pgprot_t prot2;
  253. addr2 = __START_KERNEL_map + __pa(address);
  254. /* Make sure the kernel mappings stay executable */
  255. prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
  256. err = __change_page_attr(addr2, pfn, prot2);
  257. }
  258. #endif
  259. return err;
  260. }
  261. static int __change_page_attr_set_clr(unsigned long addr, int numpages,
  262. pgprot_t mask_set, pgprot_t mask_clr)
  263. {
  264. pgprot_t new_prot;
  265. int level;
  266. pte_t *pte;
  267. int i, ret;
  268. for (i = 0; i < numpages ; i++) {
  269. pte = lookup_address(addr, &level);
  270. if (!pte)
  271. return -EINVAL;
  272. new_prot = pte_pgprot(*pte);
  273. pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
  274. pgprot_val(new_prot) |= pgprot_val(mask_set);
  275. ret = change_page_attr_addr(addr, new_prot);
  276. if (ret)
  277. return ret;
  278. addr += PAGE_SIZE;
  279. }
  280. return 0;
  281. }
  282. static int change_page_attr_set_clr(unsigned long addr, int numpages,
  283. pgprot_t mask_set, pgprot_t mask_clr)
  284. {
  285. int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
  286. mask_clr);
  287. /*
  288. * On success we use clflush, when the CPU supports it to
  289. * avoid the wbindv. If the CPU does not support it and in the
  290. * error case we fall back to global_flush_tlb (which uses
  291. * wbindv):
  292. */
  293. if (!ret && cpu_has_clflush)
  294. cpa_flush_range(addr, numpages);
  295. else
  296. global_flush_tlb();
  297. return ret;
  298. }
  299. static inline int change_page_attr_set(unsigned long addr, int numpages,
  300. pgprot_t mask)
  301. {
  302. return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  303. }
  304. static inline int change_page_attr_clear(unsigned long addr, int numpages,
  305. pgprot_t mask)
  306. {
  307. return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  308. }
  309. int set_memory_uc(unsigned long addr, int numpages)
  310. {
  311. return change_page_attr_set(addr, numpages,
  312. __pgprot(_PAGE_PCD | _PAGE_PWT));
  313. }
  314. EXPORT_SYMBOL(set_memory_uc);
  315. int set_memory_wb(unsigned long addr, int numpages)
  316. {
  317. return change_page_attr_clear(addr, numpages,
  318. __pgprot(_PAGE_PCD | _PAGE_PWT));
  319. }
  320. EXPORT_SYMBOL(set_memory_wb);
  321. int set_memory_x(unsigned long addr, int numpages)
  322. {
  323. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
  324. }
  325. EXPORT_SYMBOL(set_memory_x);
  326. int set_memory_nx(unsigned long addr, int numpages)
  327. {
  328. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
  329. }
  330. EXPORT_SYMBOL(set_memory_nx);
  331. int set_memory_ro(unsigned long addr, int numpages)
  332. {
  333. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
  334. }
  335. int set_memory_rw(unsigned long addr, int numpages)
  336. {
  337. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
  338. }
  339. int set_memory_np(unsigned long addr, int numpages)
  340. {
  341. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
  342. }
  343. int set_pages_uc(struct page *page, int numpages)
  344. {
  345. unsigned long addr = (unsigned long)page_address(page);
  346. return set_memory_uc(addr, numpages);
  347. }
  348. EXPORT_SYMBOL(set_pages_uc);
  349. int set_pages_wb(struct page *page, int numpages)
  350. {
  351. unsigned long addr = (unsigned long)page_address(page);
  352. return set_memory_wb(addr, numpages);
  353. }
  354. EXPORT_SYMBOL(set_pages_wb);
  355. int set_pages_x(struct page *page, int numpages)
  356. {
  357. unsigned long addr = (unsigned long)page_address(page);
  358. return set_memory_x(addr, numpages);
  359. }
  360. EXPORT_SYMBOL(set_pages_x);
  361. int set_pages_nx(struct page *page, int numpages)
  362. {
  363. unsigned long addr = (unsigned long)page_address(page);
  364. return set_memory_nx(addr, numpages);
  365. }
  366. EXPORT_SYMBOL(set_pages_nx);
  367. int set_pages_ro(struct page *page, int numpages)
  368. {
  369. unsigned long addr = (unsigned long)page_address(page);
  370. return set_memory_ro(addr, numpages);
  371. }
  372. int set_pages_rw(struct page *page, int numpages)
  373. {
  374. unsigned long addr = (unsigned long)page_address(page);
  375. return set_memory_rw(addr, numpages);
  376. }
  377. #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
  378. static inline int __change_page_attr_set(unsigned long addr, int numpages,
  379. pgprot_t mask)
  380. {
  381. return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  382. }
  383. static inline int __change_page_attr_clear(unsigned long addr, int numpages,
  384. pgprot_t mask)
  385. {
  386. return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  387. }
  388. #endif
  389. #ifdef CONFIG_DEBUG_PAGEALLOC
  390. static int __set_pages_p(struct page *page, int numpages)
  391. {
  392. unsigned long addr = (unsigned long)page_address(page);
  393. return __change_page_attr_set(addr, numpages,
  394. __pgprot(_PAGE_PRESENT | _PAGE_RW));
  395. }
  396. static int __set_pages_np(struct page *page, int numpages)
  397. {
  398. unsigned long addr = (unsigned long)page_address(page);
  399. return __change_page_attr_clear(addr, numpages,
  400. __pgprot(_PAGE_PRESENT));
  401. }
  402. void kernel_map_pages(struct page *page, int numpages, int enable)
  403. {
  404. if (PageHighMem(page))
  405. return;
  406. if (!enable) {
  407. debug_check_no_locks_freed(page_address(page),
  408. numpages * PAGE_SIZE);
  409. }
  410. /*
  411. * If page allocator is not up yet then do not call c_p_a():
  412. */
  413. if (!debug_pagealloc_enabled)
  414. return;
  415. /*
  416. * The return value is ignored - the calls cannot fail,
  417. * large pages are disabled at boot time:
  418. */
  419. if (enable)
  420. __set_pages_p(page, numpages);
  421. else
  422. __set_pages_np(page, numpages);
  423. /*
  424. * We should perform an IPI and flush all tlbs,
  425. * but that can deadlock->flush only current cpu:
  426. */
  427. __flush_tlb_all();
  428. }
  429. #endif
  430. /*
  431. * The testcases use internal knowledge of the implementation that shouldn't
  432. * be exposed to the rest of the kernel. Include these directly here.
  433. */
  434. #ifdef CONFIG_CPA_DEBUG
  435. #include "pageattr-test.c"
  436. #endif