pageattr.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/mm.h>
  11. #include <asm/e820.h>
  12. #include <asm/processor.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/sections.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/pgalloc.h>
  17. static inline int
  18. within(unsigned long addr, unsigned long start, unsigned long end)
  19. {
  20. return addr >= start && addr < end;
  21. }
  22. /*
  23. * Flushing functions
  24. */
  25. /**
  26. * clflush_cache_range - flush a cache range with clflush
  27. * @addr: virtual start address
  28. * @size: number of bytes to flush
  29. *
  30. * clflush is an unordered instruction which needs fencing with mfence
  31. * to avoid ordering issues.
  32. */
  33. void clflush_cache_range(void *vaddr, unsigned int size)
  34. {
  35. void *vend = vaddr + size - 1;
  36. mb();
  37. for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
  38. clflush(vaddr);
  39. /*
  40. * Flush any possible final partial cacheline:
  41. */
  42. clflush(vend);
  43. mb();
  44. }
  45. static void __cpa_flush_all(void *arg)
  46. {
  47. /*
  48. * Flush all to work around Errata in early athlons regarding
  49. * large page flushing.
  50. */
  51. __flush_tlb_all();
  52. if (boot_cpu_data.x86_model >= 4)
  53. wbinvd();
  54. }
  55. static void cpa_flush_all(void)
  56. {
  57. BUG_ON(irqs_disabled());
  58. on_each_cpu(__cpa_flush_all, NULL, 1, 1);
  59. }
  60. static void __cpa_flush_range(void *arg)
  61. {
  62. /*
  63. * We could optimize that further and do individual per page
  64. * tlb invalidates for a low number of pages. Caveat: we must
  65. * flush the high aliases on 64bit as well.
  66. */
  67. __flush_tlb_all();
  68. }
  69. static void cpa_flush_range(unsigned long start, int numpages)
  70. {
  71. unsigned int i, level;
  72. unsigned long addr;
  73. BUG_ON(irqs_disabled());
  74. WARN_ON(PAGE_ALIGN(start) != start);
  75. on_each_cpu(__cpa_flush_range, NULL, 1, 1);
  76. /*
  77. * We only need to flush on one CPU,
  78. * clflush is a MESI-coherent instruction that
  79. * will cause all other CPUs to flush the same
  80. * cachelines:
  81. */
  82. for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
  83. pte_t *pte = lookup_address(addr, &level);
  84. /*
  85. * Only flush present addresses:
  86. */
  87. if (pte && pte_present(*pte))
  88. clflush_cache_range((void *) addr, PAGE_SIZE);
  89. }
  90. }
  91. #define HIGH_MAP_START __START_KERNEL_map
  92. #define HIGH_MAP_END (__START_KERNEL_map + KERNEL_TEXT_SIZE)
  93. /*
  94. * Converts a virtual address to a X86-64 highmap address
  95. */
  96. static unsigned long virt_to_highmap(void *address)
  97. {
  98. #ifdef CONFIG_X86_64
  99. return __pa((unsigned long)address) + HIGH_MAP_START - phys_base;
  100. #else
  101. return (unsigned long)address;
  102. #endif
  103. }
  104. /*
  105. * Certain areas of memory on x86 require very specific protection flags,
  106. * for example the BIOS area or kernel text. Callers don't always get this
  107. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  108. * checks and fixes these known static required protection bits.
  109. */
  110. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
  111. {
  112. pgprot_t forbidden = __pgprot(0);
  113. /*
  114. * The BIOS area between 640k and 1Mb needs to be executable for
  115. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  116. */
  117. if (within(__pa(address), BIOS_BEGIN, BIOS_END))
  118. pgprot_val(forbidden) |= _PAGE_NX;
  119. /*
  120. * The kernel text needs to be executable for obvious reasons
  121. * Does not cover __inittext since that is gone later on
  122. */
  123. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  124. pgprot_val(forbidden) |= _PAGE_NX;
  125. /*
  126. * Do the same for the x86-64 high kernel mapping
  127. */
  128. if (within(address, virt_to_highmap(_text), virt_to_highmap(_etext)))
  129. pgprot_val(forbidden) |= _PAGE_NX;
  130. #ifdef CONFIG_DEBUG_RODATA
  131. /* The .rodata section needs to be read-only */
  132. if (within(address, (unsigned long)__start_rodata,
  133. (unsigned long)__end_rodata))
  134. pgprot_val(forbidden) |= _PAGE_RW;
  135. /*
  136. * Do the same for the x86-64 high kernel mapping
  137. */
  138. if (within(address, virt_to_highmap(__start_rodata),
  139. virt_to_highmap(__end_rodata)))
  140. pgprot_val(forbidden) |= _PAGE_RW;
  141. #endif
  142. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  143. return prot;
  144. }
  145. pte_t *lookup_address(unsigned long address, int *level)
  146. {
  147. pgd_t *pgd = pgd_offset_k(address);
  148. pud_t *pud;
  149. pmd_t *pmd;
  150. *level = PG_LEVEL_NONE;
  151. if (pgd_none(*pgd))
  152. return NULL;
  153. pud = pud_offset(pgd, address);
  154. if (pud_none(*pud))
  155. return NULL;
  156. pmd = pmd_offset(pud, address);
  157. if (pmd_none(*pmd))
  158. return NULL;
  159. *level = PG_LEVEL_2M;
  160. if (pmd_large(*pmd))
  161. return (pte_t *)pmd;
  162. *level = PG_LEVEL_4K;
  163. return pte_offset_kernel(pmd, address);
  164. }
  165. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  166. {
  167. /* change init_mm */
  168. set_pte_atomic(kpte, pte);
  169. #ifdef CONFIG_X86_32
  170. if (!SHARED_KERNEL_PMD) {
  171. struct page *page;
  172. list_for_each_entry(page, &pgd_list, lru) {
  173. pgd_t *pgd;
  174. pud_t *pud;
  175. pmd_t *pmd;
  176. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  177. pud = pud_offset(pgd, address);
  178. pmd = pmd_offset(pud, address);
  179. set_pte_atomic((pte_t *)pmd, pte);
  180. }
  181. }
  182. #endif
  183. }
  184. static int split_large_page(pte_t *kpte, unsigned long address)
  185. {
  186. pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  187. gfp_t gfp_flags = GFP_KERNEL;
  188. unsigned long flags, addr, pfn;
  189. pte_t *pbase, *tmp;
  190. struct page *base;
  191. unsigned int i, level;
  192. #ifdef CONFIG_DEBUG_PAGEALLOC
  193. gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
  194. gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
  195. #endif
  196. base = alloc_pages(gfp_flags, 0);
  197. if (!base)
  198. return -ENOMEM;
  199. spin_lock_irqsave(&pgd_lock, flags);
  200. /*
  201. * Check for races, another CPU might have split this page
  202. * up for us already:
  203. */
  204. tmp = lookup_address(address, &level);
  205. if (tmp != kpte) {
  206. WARN_ON_ONCE(1);
  207. goto out_unlock;
  208. }
  209. address = __pa(address);
  210. addr = address & LARGE_PAGE_MASK;
  211. pbase = (pte_t *)page_address(base);
  212. #ifdef CONFIG_X86_32
  213. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  214. #endif
  215. /*
  216. * Get the target pfn from the original entry:
  217. */
  218. pfn = pte_pfn(*kpte);
  219. for (i = 0; i < PTRS_PER_PTE; i++, pfn++)
  220. set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
  221. /*
  222. * Install the new, split up pagetable. Important detail here:
  223. *
  224. * On Intel the NX bit of all levels must be cleared to make a
  225. * page executable. See section 4.13.2 of Intel 64 and IA-32
  226. * Architectures Software Developer's Manual).
  227. */
  228. ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
  229. __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
  230. base = NULL;
  231. out_unlock:
  232. spin_unlock_irqrestore(&pgd_lock, flags);
  233. if (base)
  234. __free_pages(base, 0);
  235. return 0;
  236. }
  237. static int
  238. __change_page_attr(unsigned long address, pgprot_t mask_set, pgprot_t mask_clr)
  239. {
  240. struct page *kpte_page;
  241. int level, err = 0;
  242. pte_t *kpte;
  243. repeat:
  244. kpte = lookup_address(address, &level);
  245. if (!kpte)
  246. return -EINVAL;
  247. kpte_page = virt_to_page(kpte);
  248. BUG_ON(PageLRU(kpte_page));
  249. BUG_ON(PageCompound(kpte_page));
  250. if (level == PG_LEVEL_4K) {
  251. pte_t new_pte, old_pte = *kpte;
  252. pgprot_t new_prot = pte_pgprot(old_pte);
  253. if(!pte_val(old_pte)) {
  254. WARN_ON_ONCE(1);
  255. return -EINVAL;
  256. }
  257. pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
  258. pgprot_val(new_prot) |= pgprot_val(mask_set);
  259. new_prot = static_protections(new_prot, address);
  260. /*
  261. * We need to keep the pfn from the existing PTE,
  262. * after all we're only going to change it's attributes
  263. * not the memory it points to
  264. */
  265. new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
  266. set_pte_atomic(kpte, new_pte);
  267. } else {
  268. err = split_large_page(kpte, address);
  269. if (!err)
  270. goto repeat;
  271. }
  272. return err;
  273. }
  274. /**
  275. * change_page_attr_addr - Change page table attributes in linear mapping
  276. * @address: Virtual address in linear mapping.
  277. * @prot: New page table attribute (PAGE_*)
  278. *
  279. * Change page attributes of a page in the direct mapping. This is a variant
  280. * of change_page_attr() that also works on memory holes that do not have
  281. * mem_map entry (pfn_valid() is false).
  282. *
  283. * See change_page_attr() documentation for more details.
  284. *
  285. * Modules and drivers should use the set_memory_* APIs instead.
  286. */
  287. static int
  288. change_page_attr_addr(unsigned long address, pgprot_t mask_set,
  289. pgprot_t mask_clr)
  290. {
  291. int err;
  292. #ifdef CONFIG_X86_64
  293. unsigned long phys_addr = __pa(address);
  294. /*
  295. * If we are inside the high mapped kernel range, then we
  296. * fixup the low mapping first. __va() returns the virtual
  297. * address in the linear mapping:
  298. */
  299. if (within(address, HIGH_MAP_START, HIGH_MAP_END))
  300. address = (unsigned long) __va(phys_addr);
  301. #endif
  302. err = __change_page_attr(address, mask_set, mask_clr);
  303. if (err)
  304. return err;
  305. #ifdef CONFIG_X86_64
  306. /*
  307. * If the physical address is inside the kernel map, we need
  308. * to touch the high mapped kernel as well:
  309. */
  310. if (within(phys_addr, 0, KERNEL_TEXT_SIZE)) {
  311. /*
  312. * Calc the high mapping address. See __phys_addr()
  313. * for the non obvious details.
  314. *
  315. * Note that NX and other required permissions are
  316. * checked in static_protections().
  317. */
  318. address = phys_addr + HIGH_MAP_START - phys_base;
  319. /*
  320. * Our high aliases are imprecise, because we check
  321. * everything between 0 and KERNEL_TEXT_SIZE, so do
  322. * not propagate lookup failures back to users:
  323. */
  324. __change_page_attr(address, mask_set, mask_clr);
  325. }
  326. #endif
  327. return err;
  328. }
  329. static int __change_page_attr_set_clr(unsigned long addr, int numpages,
  330. pgprot_t mask_set, pgprot_t mask_clr)
  331. {
  332. unsigned int i;
  333. int ret;
  334. for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
  335. ret = change_page_attr_addr(addr, mask_set, mask_clr);
  336. if (ret)
  337. return ret;
  338. }
  339. return 0;
  340. }
  341. static int change_page_attr_set_clr(unsigned long addr, int numpages,
  342. pgprot_t mask_set, pgprot_t mask_clr)
  343. {
  344. int ret;
  345. /*
  346. * Check, if we are requested to change a not supported
  347. * feature:
  348. */
  349. mask_set = canon_pgprot(mask_set);
  350. mask_clr = canon_pgprot(mask_clr);
  351. if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
  352. return 0;
  353. ret = __change_page_attr_set_clr(addr, numpages, mask_set, mask_clr);
  354. /*
  355. * On success we use clflush, when the CPU supports it to
  356. * avoid the wbindv. If the CPU does not support it and in the
  357. * error case we fall back to cpa_flush_all (which uses
  358. * wbindv):
  359. */
  360. if (!ret && cpu_has_clflush)
  361. cpa_flush_range(addr, numpages);
  362. else
  363. cpa_flush_all();
  364. return ret;
  365. }
  366. static inline int change_page_attr_set(unsigned long addr, int numpages,
  367. pgprot_t mask)
  368. {
  369. return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  370. }
  371. static inline int change_page_attr_clear(unsigned long addr, int numpages,
  372. pgprot_t mask)
  373. {
  374. return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  375. }
  376. int set_memory_uc(unsigned long addr, int numpages)
  377. {
  378. return change_page_attr_set(addr, numpages,
  379. __pgprot(_PAGE_PCD | _PAGE_PWT));
  380. }
  381. EXPORT_SYMBOL(set_memory_uc);
  382. int set_memory_wb(unsigned long addr, int numpages)
  383. {
  384. return change_page_attr_clear(addr, numpages,
  385. __pgprot(_PAGE_PCD | _PAGE_PWT));
  386. }
  387. EXPORT_SYMBOL(set_memory_wb);
  388. int set_memory_x(unsigned long addr, int numpages)
  389. {
  390. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
  391. }
  392. EXPORT_SYMBOL(set_memory_x);
  393. int set_memory_nx(unsigned long addr, int numpages)
  394. {
  395. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
  396. }
  397. EXPORT_SYMBOL(set_memory_nx);
  398. int set_memory_ro(unsigned long addr, int numpages)
  399. {
  400. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
  401. }
  402. int set_memory_rw(unsigned long addr, int numpages)
  403. {
  404. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
  405. }
  406. int set_memory_np(unsigned long addr, int numpages)
  407. {
  408. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
  409. }
  410. int set_pages_uc(struct page *page, int numpages)
  411. {
  412. unsigned long addr = (unsigned long)page_address(page);
  413. return set_memory_uc(addr, numpages);
  414. }
  415. EXPORT_SYMBOL(set_pages_uc);
  416. int set_pages_wb(struct page *page, int numpages)
  417. {
  418. unsigned long addr = (unsigned long)page_address(page);
  419. return set_memory_wb(addr, numpages);
  420. }
  421. EXPORT_SYMBOL(set_pages_wb);
  422. int set_pages_x(struct page *page, int numpages)
  423. {
  424. unsigned long addr = (unsigned long)page_address(page);
  425. return set_memory_x(addr, numpages);
  426. }
  427. EXPORT_SYMBOL(set_pages_x);
  428. int set_pages_nx(struct page *page, int numpages)
  429. {
  430. unsigned long addr = (unsigned long)page_address(page);
  431. return set_memory_nx(addr, numpages);
  432. }
  433. EXPORT_SYMBOL(set_pages_nx);
  434. int set_pages_ro(struct page *page, int numpages)
  435. {
  436. unsigned long addr = (unsigned long)page_address(page);
  437. return set_memory_ro(addr, numpages);
  438. }
  439. int set_pages_rw(struct page *page, int numpages)
  440. {
  441. unsigned long addr = (unsigned long)page_address(page);
  442. return set_memory_rw(addr, numpages);
  443. }
  444. #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_CPA_DEBUG)
  445. static inline int __change_page_attr_set(unsigned long addr, int numpages,
  446. pgprot_t mask)
  447. {
  448. return __change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  449. }
  450. static inline int __change_page_attr_clear(unsigned long addr, int numpages,
  451. pgprot_t mask)
  452. {
  453. return __change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  454. }
  455. #endif
  456. #ifdef CONFIG_DEBUG_PAGEALLOC
  457. static int __set_pages_p(struct page *page, int numpages)
  458. {
  459. unsigned long addr = (unsigned long)page_address(page);
  460. return __change_page_attr_set(addr, numpages,
  461. __pgprot(_PAGE_PRESENT | _PAGE_RW));
  462. }
  463. static int __set_pages_np(struct page *page, int numpages)
  464. {
  465. unsigned long addr = (unsigned long)page_address(page);
  466. return __change_page_attr_clear(addr, numpages,
  467. __pgprot(_PAGE_PRESENT));
  468. }
  469. void kernel_map_pages(struct page *page, int numpages, int enable)
  470. {
  471. if (PageHighMem(page))
  472. return;
  473. if (!enable) {
  474. debug_check_no_locks_freed(page_address(page),
  475. numpages * PAGE_SIZE);
  476. }
  477. /*
  478. * If page allocator is not up yet then do not call c_p_a():
  479. */
  480. if (!debug_pagealloc_enabled)
  481. return;
  482. /*
  483. * The return value is ignored - the calls cannot fail,
  484. * large pages are disabled at boot time:
  485. */
  486. if (enable)
  487. __set_pages_p(page, numpages);
  488. else
  489. __set_pages_np(page, numpages);
  490. /*
  491. * We should perform an IPI and flush all tlbs,
  492. * but that can deadlock->flush only current cpu:
  493. */
  494. __flush_tlb_all();
  495. }
  496. #endif
  497. /*
  498. * The testcases use internal knowledge of the implementation that shouldn't
  499. * be exposed to the rest of the kernel. Include these directly here.
  500. */
  501. #ifdef CONFIG_CPA_DEBUG
  502. #include "pageattr-test.c"
  503. #endif