pageattr.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/mm.h>
  11. #include <asm/e820.h>
  12. #include <asm/processor.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/sections.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/pgalloc.h>
  17. static inline int
  18. within(unsigned long addr, unsigned long start, unsigned long end)
  19. {
  20. return addr >= start && addr < end;
  21. }
  22. /*
  23. * Flushing functions
  24. */
  25. /**
  26. * clflush_cache_range - flush a cache range with clflush
  27. * @addr: virtual start address
  28. * @size: number of bytes to flush
  29. *
  30. * clflush is an unordered instruction which needs fencing with mfence
  31. * to avoid ordering issues.
  32. */
  33. void clflush_cache_range(void *vaddr, unsigned int size)
  34. {
  35. void *vend = vaddr + size - 1;
  36. mb();
  37. for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
  38. clflush(vaddr);
  39. /*
  40. * Flush any possible final partial cacheline:
  41. */
  42. clflush(vend);
  43. mb();
  44. }
  45. static void __cpa_flush_all(void *arg)
  46. {
  47. /*
  48. * Flush all to work around Errata in early athlons regarding
  49. * large page flushing.
  50. */
  51. __flush_tlb_all();
  52. if (boot_cpu_data.x86_model >= 4)
  53. wbinvd();
  54. }
  55. static void cpa_flush_all(void)
  56. {
  57. BUG_ON(irqs_disabled());
  58. on_each_cpu(__cpa_flush_all, NULL, 1, 1);
  59. }
  60. static void __cpa_flush_range(void *arg)
  61. {
  62. /*
  63. * We could optimize that further and do individual per page
  64. * tlb invalidates for a low number of pages. Caveat: we must
  65. * flush the high aliases on 64bit as well.
  66. */
  67. __flush_tlb_all();
  68. }
  69. static void cpa_flush_range(unsigned long start, int numpages)
  70. {
  71. unsigned int i, level;
  72. unsigned long addr;
  73. BUG_ON(irqs_disabled());
  74. WARN_ON(PAGE_ALIGN(start) != start);
  75. on_each_cpu(__cpa_flush_range, NULL, 1, 1);
  76. /*
  77. * We only need to flush on one CPU,
  78. * clflush is a MESI-coherent instruction that
  79. * will cause all other CPUs to flush the same
  80. * cachelines:
  81. */
  82. for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
  83. pte_t *pte = lookup_address(addr, &level);
  84. /*
  85. * Only flush present addresses:
  86. */
  87. if (pte && pte_present(*pte))
  88. clflush_cache_range((void *) addr, PAGE_SIZE);
  89. }
  90. }
  91. /*
  92. * Certain areas of memory on x86 require very specific protection flags,
  93. * for example the BIOS area or kernel text. Callers don't always get this
  94. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  95. * checks and fixes these known static required protection bits.
  96. */
  97. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
  98. {
  99. pgprot_t forbidden = __pgprot(0);
  100. /*
  101. * The BIOS area between 640k and 1Mb needs to be executable for
  102. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  103. */
  104. if (within(__pa(address), BIOS_BEGIN, BIOS_END))
  105. pgprot_val(forbidden) |= _PAGE_NX;
  106. /*
  107. * The kernel text needs to be executable for obvious reasons
  108. * Does not cover __inittext since that is gone later on
  109. */
  110. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  111. pgprot_val(forbidden) |= _PAGE_NX;
  112. #ifdef CONFIG_DEBUG_RODATA
  113. /* The .rodata section needs to be read-only */
  114. if (within(address, (unsigned long)__start_rodata,
  115. (unsigned long)__end_rodata))
  116. pgprot_val(forbidden) |= _PAGE_RW;
  117. #endif
  118. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  119. return prot;
  120. }
  121. pte_t *lookup_address(unsigned long address, int *level)
  122. {
  123. pgd_t *pgd = pgd_offset_k(address);
  124. pud_t *pud;
  125. pmd_t *pmd;
  126. *level = PG_LEVEL_NONE;
  127. if (pgd_none(*pgd))
  128. return NULL;
  129. pud = pud_offset(pgd, address);
  130. if (pud_none(*pud))
  131. return NULL;
  132. pmd = pmd_offset(pud, address);
  133. if (pmd_none(*pmd))
  134. return NULL;
  135. *level = PG_LEVEL_2M;
  136. if (pmd_large(*pmd))
  137. return (pte_t *)pmd;
  138. *level = PG_LEVEL_4K;
  139. return pte_offset_kernel(pmd, address);
  140. }
  141. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  142. {
  143. /* change init_mm */
  144. set_pte_atomic(kpte, pte);
  145. #ifdef CONFIG_X86_32
  146. if (!SHARED_KERNEL_PMD) {
  147. struct page *page;
  148. for (page = pgd_list; page; page = (struct page *)page->index) {
  149. pgd_t *pgd;
  150. pud_t *pud;
  151. pmd_t *pmd;
  152. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  153. pud = pud_offset(pgd, address);
  154. pmd = pmd_offset(pud, address);
  155. set_pte_atomic((pte_t *)pmd, pte);
  156. }
  157. }
  158. #endif
  159. }
  160. static int split_large_page(pte_t *kpte, unsigned long address)
  161. {
  162. pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  163. gfp_t gfp_flags = GFP_KERNEL;
  164. unsigned long flags;
  165. unsigned long addr;
  166. pte_t *pbase, *tmp;
  167. struct page *base;
  168. int i, level;
  169. #ifdef CONFIG_DEBUG_PAGEALLOC
  170. gfp_flags = GFP_ATOMIC;
  171. #endif
  172. base = alloc_pages(gfp_flags, 0);
  173. if (!base)
  174. return -ENOMEM;
  175. spin_lock_irqsave(&pgd_lock, flags);
  176. /*
  177. * Check for races, another CPU might have split this page
  178. * up for us already:
  179. */
  180. tmp = lookup_address(address, &level);
  181. if (tmp != kpte) {
  182. WARN_ON_ONCE(1);
  183. goto out_unlock;
  184. }
  185. address = __pa(address);
  186. addr = address & LARGE_PAGE_MASK;
  187. pbase = (pte_t *)page_address(base);
  188. #ifdef CONFIG_X86_32
  189. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  190. #endif
  191. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
  192. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
  193. /*
  194. * Install the new, split up pagetable. Important detail here:
  195. *
  196. * On Intel the NX bit of all levels must be cleared to make a
  197. * page executable. See section 4.13.2 of Intel 64 and IA-32
  198. * Architectures Software Developer's Manual).
  199. */
  200. ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
  201. __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
  202. base = NULL;
  203. out_unlock:
  204. spin_unlock_irqrestore(&pgd_lock, flags);
  205. if (base)
  206. __free_pages(base, 0);
  207. return 0;
  208. }
  209. static int
  210. __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
  211. {
  212. struct page *kpte_page;
  213. int level, err = 0;
  214. pte_t *kpte;
  215. #ifdef CONFIG_X86_32
  216. BUG_ON(pfn > max_low_pfn);
  217. #endif
  218. repeat:
  219. kpte = lookup_address(address, &level);
  220. if (!kpte)
  221. return -EINVAL;
  222. kpte_page = virt_to_page(kpte);
  223. BUG_ON(PageLRU(kpte_page));
  224. BUG_ON(PageCompound(kpte_page));
  225. prot = static_protections(prot, address);
  226. if (level == PG_LEVEL_4K) {
  227. WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
  228. set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
  229. } else {
  230. /* Clear the PSE bit for the 4k level pages ! */
  231. pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
  232. err = split_large_page(kpte, address);
  233. if (!err)
  234. goto repeat;
  235. }
  236. return err;
  237. }
  238. /**
  239. * change_page_attr_addr - Change page table attributes in linear mapping
  240. * @address: Virtual address in linear mapping.
  241. * @prot: New page table attribute (PAGE_*)
  242. *
  243. * Change page attributes of a page in the direct mapping. This is a variant
  244. * of change_page_attr() that also works on memory holes that do not have
  245. * mem_map entry (pfn_valid() is false).
  246. *
  247. * See change_page_attr() documentation for more details.
  248. *
  249. * Modules and drivers should use the set_memory_* APIs instead.
  250. */
  251. static int change_page_attr_addr(unsigned long address, pgprot_t prot)
  252. {
  253. int err = 0, kernel_map = 0;
  254. unsigned long pfn = __pa(address) >> PAGE_SHIFT;
  255. #ifdef CONFIG_X86_64
  256. if (address >= __START_KERNEL_map &&
  257. address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
  258. address = (unsigned long)__va(__pa(address));
  259. kernel_map = 1;
  260. }
  261. #endif
  262. if (!kernel_map || pte_present(pfn_pte(0, prot))) {
  263. err = __change_page_attr(address, pfn, prot);
  264. if (err)
  265. return err;
  266. }
  267. #ifdef CONFIG_X86_64
  268. /*
  269. * Handle kernel mapping too which aliases part of
  270. * lowmem:
  271. */
  272. if (__pa(address) < KERNEL_TEXT_SIZE) {
  273. unsigned long addr2;
  274. pgprot_t prot2;
  275. addr2 = __START_KERNEL_map + __pa(address);
  276. /* Make sure the kernel mappings stay executable */
  277. prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
  278. err = __change_page_attr(addr2, pfn, prot2);
  279. }
  280. #endif
  281. return err;
  282. }
  283. static int __change_page_attr_set_clr(unsigned long addr, int numpages,
  284. pgprot_t mask_set, pgprot_t mask_clr)
  285. {
  286. pgprot_t new_prot;
  287. int level;
  288. pte_t *pte;
  289. int i, ret;
  290. for (i = 0; i < numpages ; i++) {
  291. pte = lookup_address(addr, &level);
  292. if (!pte)
  293. return -EINVAL;
  294. new_prot = pte_pgprot(*pte);
  295. pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
  296. pgprot_val(new_prot) |= pgprot_val(mask_set);
  297. ret = change_page_attr_addr(addr, new_prot);
  298. if (ret)
  299. return ret;
  300. addr += PAGE_SIZE;
  301. }
  302. return 0;
  303. }
  304. static int change_page_attr_set_clr(unsigned long addr, int numpages,
  305. pgprot_t mask_set, pgprot_t mask_clr)
  306. {
  307. int ret = __change_page_attr_set_clr(addr, numpages, mask_set,
  308. mask_clr);
  309. /*
  310. * On success we use clflush, when the CPU supports it to
  311. * avoid the wbindv. If the CPU does not support it and in the
  312. * error case we fall back to cpa_flush_all (which uses
  313. * wbindv):
  314. */
  315. if (!ret && cpu_has_clflush)
  316. cpa_flush_range(addr, numpages);
  317. else
  318. cpa_flush_all();
  319. return ret;
  320. }
  321. static inline int change_page_attr_set(unsigned long addr, int numpages,
  322. pgprot_t mask)
  323. {
  324. return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  325. }
  326. static inline int change_page_attr_clear(unsigned long addr, int numpages,
  327. pgprot_t mask)
  328. {
  329. return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  330. }
  331. int set_memory_uc(unsigned long addr, int numpages)
  332. {
  333. return change_page_attr_set(addr, numpages,
  334. __pgprot(_PAGE_PCD | _PAGE_PWT));
  335. }
  336. EXPORT_SYMBOL(set_memory_uc);
  337. int set_memory_wb(unsigned long addr, int numpages)
  338. {
  339. return change_page_attr_clear(addr, numpages,
  340. __pgprot(_PAGE_PCD | _PAGE_PWT));
  341. }
  342. EXPORT_SYMBOL(set_memory_wb);
  343. int set_memory_x(unsigned long addr, int numpages)
  344. {
  345. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
  346. }
  347. EXPORT_SYMBOL(set_memory_x);
  348. int set_memory_nx(unsigned long addr, int numpages)
  349. {
  350. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
  351. }
  352. EXPORT_SYMBOL(set_memory_nx);
  353. int set_memory_ro(unsigned long addr, int numpages)
  354. {
  355. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
  356. }
  357. int set_memory_rw(unsigned long addr, int numpages)
  358. {
  359. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
  360. }
  361. int set_memory_np(unsigned long addr, int numpages)
  362. {
  363. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
  364. }
  365. int set_pages_uc(struct page *page, int numpages)
  366. {
  367. unsigned long addr = (unsigned long)page_address(page);
  368. return set_memory_uc(addr, numpages);
  369. }
  370. EXPORT_SYMBOL(set_pages_uc);
  371. int set_pages_wb(struct page *page, int numpages)
  372. {
  373. unsigned long addr = (unsigned long)page_address(page);
  374. return set_memory_wb(addr, numpages);
  375. }
  376. EXPORT_SYMBOL(set_pages_wb);
  377. int set_pages_x(struct page *page, int numpages)
  378. {
  379. unsigned long addr = (unsigned long)page_address(page);
  380. return set_memory_x(addr, numpages);
  381. }
  382. EXPORT_SYMBOL(set_pages_x);
  383. int set_pages_nx(struct page *page, int numpages)
  384. {
  385. unsigned long addr = (unsigned long)page_address(page);
  386. return set_memory_nx(addr, numpages);
  387. }
  388. EXPORT_SYMBOL(set_pages_nx);
  389. int set_pages_ro(struct page *page, int numpages)
  390. {
  391. unsigned long addr = (unsigned long)page_address(page);
  392. return set_memory_ro(addr, numpages);
  393. }
  394. int set_pages_rw(struct page *page, int numpages)
  395. {
  396. unsigned long addr = (unsigned long)page_address(page);
  397. return set_memory_rw(addr, numpages);
  398. }
  399. #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
  400. static inline int __change_page_attr_set(unsigned long addr, int numpages,
  401. pgprot_t mask)
  402. {
  403. return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  404. }
  405. static inline int __change_page_attr_clear(unsigned long addr, int numpages,
  406. pgprot_t mask)
  407. {
  408. return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  409. }
  410. #endif
  411. #ifdef CONFIG_DEBUG_PAGEALLOC
  412. static int __set_pages_p(struct page *page, int numpages)
  413. {
  414. unsigned long addr = (unsigned long)page_address(page);
  415. return __change_page_attr_set(addr, numpages,
  416. __pgprot(_PAGE_PRESENT | _PAGE_RW));
  417. }
  418. static int __set_pages_np(struct page *page, int numpages)
  419. {
  420. unsigned long addr = (unsigned long)page_address(page);
  421. return __change_page_attr_clear(addr, numpages,
  422. __pgprot(_PAGE_PRESENT));
  423. }
  424. void kernel_map_pages(struct page *page, int numpages, int enable)
  425. {
  426. if (PageHighMem(page))
  427. return;
  428. if (!enable) {
  429. debug_check_no_locks_freed(page_address(page),
  430. numpages * PAGE_SIZE);
  431. }
  432. /*
  433. * If page allocator is not up yet then do not call c_p_a():
  434. */
  435. if (!debug_pagealloc_enabled)
  436. return;
  437. /*
  438. * The return value is ignored - the calls cannot fail,
  439. * large pages are disabled at boot time:
  440. */
  441. if (enable)
  442. __set_pages_p(page, numpages);
  443. else
  444. __set_pages_np(page, numpages);
  445. /*
  446. * We should perform an IPI and flush all tlbs,
  447. * but that can deadlock->flush only current cpu:
  448. */
  449. __flush_tlb_all();
  450. }
  451. #endif
  452. /*
  453. * The testcases use internal knowledge of the implementation that shouldn't
  454. * be exposed to the rest of the kernel. Include these directly here.
  455. */
  456. #ifdef CONFIG_CPA_DEBUG
  457. #include "pageattr-test.c"
  458. #endif