pageattr.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/mm.h>
  11. #include <asm/e820.h>
  12. #include <asm/processor.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/sections.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/pgalloc.h>
  17. static inline int
  18. within(unsigned long addr, unsigned long start, unsigned long end)
  19. {
  20. return addr >= start && addr < end;
  21. }
  22. /*
  23. * Flushing functions
  24. */
  25. /**
  26. * clflush_cache_range - flush a cache range with clflush
  27. * @addr: virtual start address
  28. * @size: number of bytes to flush
  29. *
  30. * clflush is an unordered instruction which needs fencing with mfence
  31. * to avoid ordering issues.
  32. */
  33. void clflush_cache_range(void *vaddr, unsigned int size)
  34. {
  35. void *vend = vaddr + size - 1;
  36. mb();
  37. for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
  38. clflush(vaddr);
  39. /*
  40. * Flush any possible final partial cacheline:
  41. */
  42. clflush(vend);
  43. mb();
  44. }
  45. static void __cpa_flush_all(void *arg)
  46. {
  47. /*
  48. * Flush all to work around Errata in early athlons regarding
  49. * large page flushing.
  50. */
  51. __flush_tlb_all();
  52. if (boot_cpu_data.x86_model >= 4)
  53. wbinvd();
  54. }
  55. static void cpa_flush_all(void)
  56. {
  57. BUG_ON(irqs_disabled());
  58. on_each_cpu(__cpa_flush_all, NULL, 1, 1);
  59. }
  60. static void __cpa_flush_range(void *arg)
  61. {
  62. /*
  63. * We could optimize that further and do individual per page
  64. * tlb invalidates for a low number of pages. Caveat: we must
  65. * flush the high aliases on 64bit as well.
  66. */
  67. __flush_tlb_all();
  68. }
  69. static void cpa_flush_range(unsigned long start, int numpages)
  70. {
  71. unsigned int i, level;
  72. unsigned long addr;
  73. BUG_ON(irqs_disabled());
  74. WARN_ON(PAGE_ALIGN(start) != start);
  75. on_each_cpu(__cpa_flush_range, NULL, 1, 1);
  76. /*
  77. * We only need to flush on one CPU,
  78. * clflush is a MESI-coherent instruction that
  79. * will cause all other CPUs to flush the same
  80. * cachelines:
  81. */
  82. for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
  83. pte_t *pte = lookup_address(addr, &level);
  84. /*
  85. * Only flush present addresses:
  86. */
  87. if (pte && pte_present(*pte))
  88. clflush_cache_range((void *) addr, PAGE_SIZE);
  89. }
  90. }
  91. /*
  92. * Certain areas of memory on x86 require very specific protection flags,
  93. * for example the BIOS area or kernel text. Callers don't always get this
  94. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  95. * checks and fixes these known static required protection bits.
  96. */
  97. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
  98. {
  99. pgprot_t forbidden = __pgprot(0);
  100. /*
  101. * The BIOS area between 640k and 1Mb needs to be executable for
  102. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  103. */
  104. if (within(__pa(address), BIOS_BEGIN, BIOS_END))
  105. pgprot_val(forbidden) |= _PAGE_NX;
  106. /*
  107. * The kernel text needs to be executable for obvious reasons
  108. * Does not cover __inittext since that is gone later on
  109. */
  110. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  111. pgprot_val(forbidden) |= _PAGE_NX;
  112. #ifdef CONFIG_DEBUG_RODATA
  113. /* The .rodata section needs to be read-only */
  114. if (within(address, (unsigned long)__start_rodata,
  115. (unsigned long)__end_rodata))
  116. pgprot_val(forbidden) |= _PAGE_RW;
  117. #endif
  118. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  119. return prot;
  120. }
  121. pte_t *lookup_address(unsigned long address, int *level)
  122. {
  123. pgd_t *pgd = pgd_offset_k(address);
  124. pud_t *pud;
  125. pmd_t *pmd;
  126. *level = PG_LEVEL_NONE;
  127. if (pgd_none(*pgd))
  128. return NULL;
  129. pud = pud_offset(pgd, address);
  130. if (pud_none(*pud))
  131. return NULL;
  132. pmd = pmd_offset(pud, address);
  133. if (pmd_none(*pmd))
  134. return NULL;
  135. *level = PG_LEVEL_2M;
  136. if (pmd_large(*pmd))
  137. return (pte_t *)pmd;
  138. *level = PG_LEVEL_4K;
  139. return pte_offset_kernel(pmd, address);
  140. }
  141. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  142. {
  143. /* change init_mm */
  144. set_pte_atomic(kpte, pte);
  145. #ifdef CONFIG_X86_32
  146. if (!SHARED_KERNEL_PMD) {
  147. struct page *page;
  148. list_for_each_entry(page, &pgd_list, lru) {
  149. pgd_t *pgd;
  150. pud_t *pud;
  151. pmd_t *pmd;
  152. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  153. pud = pud_offset(pgd, address);
  154. pmd = pmd_offset(pud, address);
  155. set_pte_atomic((pte_t *)pmd, pte);
  156. }
  157. }
  158. #endif
  159. }
  160. static int split_large_page(pte_t *kpte, unsigned long address)
  161. {
  162. pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  163. gfp_t gfp_flags = GFP_KERNEL;
  164. unsigned long flags;
  165. unsigned long addr;
  166. pte_t *pbase, *tmp;
  167. struct page *base;
  168. unsigned int i, level;
  169. #ifdef CONFIG_DEBUG_PAGEALLOC
  170. gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
  171. gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
  172. #endif
  173. base = alloc_pages(gfp_flags, 0);
  174. if (!base)
  175. return -ENOMEM;
  176. spin_lock_irqsave(&pgd_lock, flags);
  177. /*
  178. * Check for races, another CPU might have split this page
  179. * up for us already:
  180. */
  181. tmp = lookup_address(address, &level);
  182. if (tmp != kpte) {
  183. WARN_ON_ONCE(1);
  184. goto out_unlock;
  185. }
  186. address = __pa(address);
  187. addr = address & LARGE_PAGE_MASK;
  188. pbase = (pte_t *)page_address(base);
  189. #ifdef CONFIG_X86_32
  190. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  191. #endif
  192. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
  193. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
  194. /*
  195. * Install the new, split up pagetable. Important detail here:
  196. *
  197. * On Intel the NX bit of all levels must be cleared to make a
  198. * page executable. See section 4.13.2 of Intel 64 and IA-32
  199. * Architectures Software Developer's Manual).
  200. */
  201. ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
  202. __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
  203. base = NULL;
  204. out_unlock:
  205. spin_unlock_irqrestore(&pgd_lock, flags);
  206. if (base)
  207. __free_pages(base, 0);
  208. return 0;
  209. }
  210. static int
  211. __change_page_attr(unsigned long address, unsigned long pfn,
  212. pgprot_t mask_set, pgprot_t mask_clr)
  213. {
  214. struct page *kpte_page;
  215. int level, err = 0;
  216. pte_t *kpte;
  217. #ifdef CONFIG_X86_32
  218. BUG_ON(pfn > max_low_pfn);
  219. #endif
  220. repeat:
  221. kpte = lookup_address(address, &level);
  222. if (!kpte)
  223. return -EINVAL;
  224. kpte_page = virt_to_page(kpte);
  225. BUG_ON(PageLRU(kpte_page));
  226. BUG_ON(PageCompound(kpte_page));
  227. if (level == PG_LEVEL_4K) {
  228. pgprot_t new_prot = pte_pgprot(*kpte);
  229. pte_t new_pte, old_pte = *kpte;
  230. pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
  231. pgprot_val(new_prot) |= pgprot_val(mask_set);
  232. new_prot = static_protections(new_prot, address);
  233. new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
  234. BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
  235. set_pte_atomic(kpte, new_pte);
  236. } else {
  237. err = split_large_page(kpte, address);
  238. if (!err)
  239. goto repeat;
  240. }
  241. return err;
  242. }
  243. /**
  244. * change_page_attr_addr - Change page table attributes in linear mapping
  245. * @address: Virtual address in linear mapping.
  246. * @prot: New page table attribute (PAGE_*)
  247. *
  248. * Change page attributes of a page in the direct mapping. This is a variant
  249. * of change_page_attr() that also works on memory holes that do not have
  250. * mem_map entry (pfn_valid() is false).
  251. *
  252. * See change_page_attr() documentation for more details.
  253. *
  254. * Modules and drivers should use the set_memory_* APIs instead.
  255. */
  256. #define HIGH_MAP_START __START_KERNEL_map
  257. #define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE)
  258. static int
  259. change_page_attr_addr(unsigned long address, pgprot_t mask_set,
  260. pgprot_t mask_clr)
  261. {
  262. unsigned long phys_addr = __pa(address);
  263. unsigned long pfn = phys_addr >> PAGE_SHIFT;
  264. int err;
  265. #ifdef CONFIG_X86_64
  266. /*
  267. * If we are inside the high mapped kernel range, then we
  268. * fixup the low mapping first. __va() returns the virtual
  269. * address in the linear mapping:
  270. */
  271. if (within(address, HIGH_MAP_START, HIGH_MAP_END))
  272. address = (unsigned long) __va(phys_addr);
  273. #endif
  274. err = __change_page_attr(address, pfn, mask_set, mask_clr);
  275. if (err)
  276. return err;
  277. #ifdef CONFIG_X86_64
  278. /*
  279. * If the physical address is inside the kernel map, we need
  280. * to touch the high mapped kernel as well:
  281. */
  282. if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
  283. /*
  284. * Calc the high mapping address. See __phys_addr()
  285. * for the non obvious details.
  286. */
  287. address = phys_addr + HIGH_MAP_START - phys_base;
  288. /* Make sure the kernel mappings stay executable */
  289. pgprot_val(mask_clr) |= _PAGE_NX;
  290. /*
  291. * Our high aliases are imprecise, because we check
  292. * everything between 0 and KERNEL_TEXT_SIZE, so do
  293. * not propagate lookup failures back to users:
  294. */
  295. __change_page_attr(address, pfn, mask_set, mask_clr);
  296. }
  297. #endif
  298. return err;
  299. }
  300. static int __change_page_attr_set_clr(unsigned long addr, int numpages,
  301. pgprot_t mask_set, pgprot_t mask_clr)
  302. {
  303. unsigned int i;
  304. int ret;
  305. for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
  306. ret = change_page_attr_addr(addr, mask_set, mask_clr);
  307. if (ret)
  308. return ret;
  309. }
  310. return 0;
  311. }
  312. static int change_page_attr_set_clr(unsigned long addr, int numpages,
  313. pgprot_t mask_set, pgprot_t mask_clr)
  314. {
  315. int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
  316. mask_clr);
  317. /*
  318. * On success we use clflush, when the CPU supports it to
  319. * avoid the wbindv. If the CPU does not support it and in the
  320. * error case we fall back to cpa_flush_all (which uses
  321. * wbindv):
  322. */
  323. if (!ret && cpu_has_clflush)
  324. cpa_flush_range(addr, numpages);
  325. else
  326. cpa_flush_all();
  327. return ret;
  328. }
  329. static inline int change_page_attr_set(unsigned long addr, int numpages,
  330. pgprot_t mask)
  331. {
  332. return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  333. }
  334. static inline int change_page_attr_clear(unsigned long addr, int numpages,
  335. pgprot_t mask)
  336. {
  337. return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  338. }
  339. int set_memory_uc(unsigned long addr, int numpages)
  340. {
  341. return change_page_attr_set(addr, numpages,
  342. __pgprot(_PAGE_PCD | _PAGE_PWT));
  343. }
  344. EXPORT_SYMBOL(set_memory_uc);
  345. int set_memory_wb(unsigned long addr, int numpages)
  346. {
  347. return change_page_attr_clear(addr, numpages,
  348. __pgprot(_PAGE_PCD | _PAGE_PWT));
  349. }
  350. EXPORT_SYMBOL(set_memory_wb);
  351. int set_memory_x(unsigned long addr, int numpages)
  352. {
  353. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
  354. }
  355. EXPORT_SYMBOL(set_memory_x);
  356. int set_memory_nx(unsigned long addr, int numpages)
  357. {
  358. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
  359. }
  360. EXPORT_SYMBOL(set_memory_nx);
  361. int set_memory_ro(unsigned long addr, int numpages)
  362. {
  363. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
  364. }
  365. int set_memory_rw(unsigned long addr, int numpages)
  366. {
  367. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
  368. }
  369. int set_memory_np(unsigned long addr, int numpages)
  370. {
  371. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
  372. }
  373. int set_pages_uc(struct page *page, int numpages)
  374. {
  375. unsigned long addr = (unsigned long)page_address(page);
  376. return set_memory_uc(addr, numpages);
  377. }
  378. EXPORT_SYMBOL(set_pages_uc);
  379. int set_pages_wb(struct page *page, int numpages)
  380. {
  381. unsigned long addr = (unsigned long)page_address(page);
  382. return set_memory_wb(addr, numpages);
  383. }
  384. EXPORT_SYMBOL(set_pages_wb);
  385. int set_pages_x(struct page *page, int numpages)
  386. {
  387. unsigned long addr = (unsigned long)page_address(page);
  388. return set_memory_x(addr, numpages);
  389. }
  390. EXPORT_SYMBOL(set_pages_x);
  391. int set_pages_nx(struct page *page, int numpages)
  392. {
  393. unsigned long addr = (unsigned long)page_address(page);
  394. return set_memory_nx(addr, numpages);
  395. }
  396. EXPORT_SYMBOL(set_pages_nx);
  397. int set_pages_ro(struct page *page, int numpages)
  398. {
  399. unsigned long addr = (unsigned long)page_address(page);
  400. return set_memory_ro(addr, numpages);
  401. }
  402. int set_pages_rw(struct page *page, int numpages)
  403. {
  404. unsigned long addr = (unsigned long)page_address(page);
  405. return set_memory_rw(addr, numpages);
  406. }
  407. #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
  408. static inline int __change_page_attr_set(unsigned long addr, int numpages,
  409. pgprot_t mask)
  410. {
  411. return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  412. }
  413. static inline int __change_page_attr_clear(unsigned long addr, int numpages,
  414. pgprot_t mask)
  415. {
  416. return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  417. }
  418. #endif
  419. #ifdef CONFIG_DEBUG_PAGEALLOC
  420. static int __set_pages_p(struct page *page, int numpages)
  421. {
  422. unsigned long addr = (unsigned long)page_address(page);
  423. return __change_page_attr_set(addr, numpages,
  424. __pgprot(_PAGE_PRESENT | _PAGE_RW));
  425. }
  426. static int __set_pages_np(struct page *page, int numpages)
  427. {
  428. unsigned long addr = (unsigned long)page_address(page);
  429. return __change_page_attr_clear(addr, numpages,
  430. __pgprot(_PAGE_PRESENT));
  431. }
  432. void kernel_map_pages(struct page *page, int numpages, int enable)
  433. {
  434. if (PageHighMem(page))
  435. return;
  436. if (!enable) {
  437. debug_check_no_locks_freed(page_address(page),
  438. numpages * PAGE_SIZE);
  439. }
  440. /*
  441. * If page allocator is not up yet then do not call c_p_a():
  442. */
  443. if (!debug_pagealloc_enabled)
  444. return;
  445. /*
  446. * The return value is ignored - the calls cannot fail,
  447. * large pages are disabled at boot time:
  448. */
  449. if (enable)
  450. __set_pages_p(page, numpages);
  451. else
  452. __set_pages_np(page, numpages);
  453. /*
  454. * We should perform an IPI and flush all tlbs,
  455. * but that can deadlock->flush only current cpu:
  456. */
  457. __flush_tlb_all();
  458. }
  459. #endif
  460. /*
  461. * The testcases use internal knowledge of the implementation that shouldn't
  462. * be exposed to the rest of the kernel. Include these directly here.
  463. */
  464. #ifdef CONFIG_CPA_DEBUG
  465. #include "pageattr-test.c"
  466. #endif