pageattr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/mm.h>
  11. #include <linux/interrupt.h>
  12. #include <asm/e820.h>
  13. #include <asm/processor.h>
  14. #include <asm/tlbflush.h>
  15. #include <asm/sections.h>
  16. #include <asm/uaccess.h>
  17. #include <asm/pgalloc.h>
  18. #include <asm/proto.h>
  19. /*
  20. * The current flushing context - we pass it instead of 5 arguments:
  21. */
  22. struct cpa_data {
  23. unsigned long vaddr;
  24. pgprot_t mask_set;
  25. pgprot_t mask_clr;
  26. int numpages;
  27. int flushtlb;
  28. unsigned long pfn;
  29. };
  30. #ifdef CONFIG_X86_64
  31. static inline unsigned long highmap_start_pfn(void)
  32. {
  33. return __pa(_text) >> PAGE_SHIFT;
  34. }
  35. static inline unsigned long highmap_end_pfn(void)
  36. {
  37. return __pa(round_up((unsigned long)_end, PMD_SIZE)) >> PAGE_SHIFT;
  38. }
  39. #endif
  40. #ifdef CONFIG_DEBUG_PAGEALLOC
  41. # define debug_pagealloc 1
  42. #else
  43. # define debug_pagealloc 0
  44. #endif
  45. static inline int
  46. within(unsigned long addr, unsigned long start, unsigned long end)
  47. {
  48. return addr >= start && addr < end;
  49. }
  50. /*
  51. * Flushing functions
  52. */
  53. /**
  54. * clflush_cache_range - flush a cache range with clflush
  55. * @addr: virtual start address
  56. * @size: number of bytes to flush
  57. *
  58. * clflush is an unordered instruction which needs fencing with mfence
  59. * to avoid ordering issues.
  60. */
  61. void clflush_cache_range(void *vaddr, unsigned int size)
  62. {
  63. void *vend = vaddr + size - 1;
  64. mb();
  65. for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
  66. clflush(vaddr);
  67. /*
  68. * Flush any possible final partial cacheline:
  69. */
  70. clflush(vend);
  71. mb();
  72. }
  73. static void __cpa_flush_all(void *arg)
  74. {
  75. unsigned long cache = (unsigned long)arg;
  76. /*
  77. * Flush all to work around Errata in early athlons regarding
  78. * large page flushing.
  79. */
  80. __flush_tlb_all();
  81. if (cache && boot_cpu_data.x86_model >= 4)
  82. wbinvd();
  83. }
  84. static void cpa_flush_all(unsigned long cache)
  85. {
  86. BUG_ON(irqs_disabled());
  87. on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1);
  88. }
  89. static void __cpa_flush_range(void *arg)
  90. {
  91. /*
  92. * We could optimize that further and do individual per page
  93. * tlb invalidates for a low number of pages. Caveat: we must
  94. * flush the high aliases on 64bit as well.
  95. */
  96. __flush_tlb_all();
  97. }
  98. static void cpa_flush_range(unsigned long start, int numpages, int cache)
  99. {
  100. unsigned int i, level;
  101. unsigned long addr;
  102. BUG_ON(irqs_disabled());
  103. WARN_ON(PAGE_ALIGN(start) != start);
  104. on_each_cpu(__cpa_flush_range, NULL, 1, 1);
  105. if (!cache)
  106. return;
  107. /*
  108. * We only need to flush on one CPU,
  109. * clflush is a MESI-coherent instruction that
  110. * will cause all other CPUs to flush the same
  111. * cachelines:
  112. */
  113. for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
  114. pte_t *pte = lookup_address(addr, &level);
  115. /*
  116. * Only flush present addresses:
  117. */
  118. if (pte && (pte_val(*pte) & _PAGE_PRESENT))
  119. clflush_cache_range((void *) addr, PAGE_SIZE);
  120. }
  121. }
  122. /*
  123. * Certain areas of memory on x86 require very specific protection flags,
  124. * for example the BIOS area or kernel text. Callers don't always get this
  125. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  126. * checks and fixes these known static required protection bits.
  127. */
  128. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
  129. unsigned long pfn)
  130. {
  131. pgprot_t forbidden = __pgprot(0);
  132. /*
  133. * The BIOS area between 640k and 1Mb needs to be executable for
  134. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  135. */
  136. if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
  137. pgprot_val(forbidden) |= _PAGE_NX;
  138. /*
  139. * The kernel text needs to be executable for obvious reasons
  140. * Does not cover __inittext since that is gone later on. On
  141. * 64bit we do not enforce !NX on the low mapping
  142. */
  143. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  144. pgprot_val(forbidden) |= _PAGE_NX;
  145. /*
  146. * The .rodata section needs to be read-only. Using the pfn
  147. * catches all aliases.
  148. */
  149. if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
  150. __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
  151. pgprot_val(forbidden) |= _PAGE_RW;
  152. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  153. return prot;
  154. }
  155. /*
  156. * Lookup the page table entry for a virtual address. Return a pointer
  157. * to the entry and the level of the mapping.
  158. *
  159. * Note: We return pud and pmd either when the entry is marked large
  160. * or when the present bit is not set. Otherwise we would return a
  161. * pointer to a nonexisting mapping.
  162. */
  163. pte_t *lookup_address(unsigned long address, unsigned int *level)
  164. {
  165. pgd_t *pgd = pgd_offset_k(address);
  166. pud_t *pud;
  167. pmd_t *pmd;
  168. *level = PG_LEVEL_NONE;
  169. if (pgd_none(*pgd))
  170. return NULL;
  171. pud = pud_offset(pgd, address);
  172. if (pud_none(*pud))
  173. return NULL;
  174. *level = PG_LEVEL_1G;
  175. if (pud_large(*pud) || !pud_present(*pud))
  176. return (pte_t *)pud;
  177. pmd = pmd_offset(pud, address);
  178. if (pmd_none(*pmd))
  179. return NULL;
  180. *level = PG_LEVEL_2M;
  181. if (pmd_large(*pmd) || !pmd_present(*pmd))
  182. return (pte_t *)pmd;
  183. *level = PG_LEVEL_4K;
  184. return pte_offset_kernel(pmd, address);
  185. }
  186. /*
  187. * Set the new pmd in all the pgds we know about:
  188. */
  189. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  190. {
  191. /* change init_mm */
  192. set_pte_atomic(kpte, pte);
  193. #ifdef CONFIG_X86_32
  194. if (!SHARED_KERNEL_PMD) {
  195. struct page *page;
  196. list_for_each_entry(page, &pgd_list, lru) {
  197. pgd_t *pgd;
  198. pud_t *pud;
  199. pmd_t *pmd;
  200. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  201. pud = pud_offset(pgd, address);
  202. pmd = pmd_offset(pud, address);
  203. set_pte_atomic((pte_t *)pmd, pte);
  204. }
  205. }
  206. #endif
  207. }
  208. static int
  209. try_preserve_large_page(pte_t *kpte, unsigned long address,
  210. struct cpa_data *cpa)
  211. {
  212. unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn;
  213. pte_t new_pte, old_pte, *tmp;
  214. pgprot_t old_prot, new_prot;
  215. int i, do_split = 1;
  216. unsigned int level;
  217. spin_lock_irqsave(&pgd_lock, flags);
  218. /*
  219. * Check for races, another CPU might have split this page
  220. * up already:
  221. */
  222. tmp = lookup_address(address, &level);
  223. if (tmp != kpte)
  224. goto out_unlock;
  225. switch (level) {
  226. case PG_LEVEL_2M:
  227. psize = PMD_PAGE_SIZE;
  228. pmask = PMD_PAGE_MASK;
  229. break;
  230. #ifdef CONFIG_X86_64
  231. case PG_LEVEL_1G:
  232. psize = PUD_PAGE_SIZE;
  233. pmask = PUD_PAGE_MASK;
  234. break;
  235. #endif
  236. default:
  237. do_split = -EINVAL;
  238. goto out_unlock;
  239. }
  240. /*
  241. * Calculate the number of pages, which fit into this large
  242. * page starting at address:
  243. */
  244. nextpage_addr = (address + psize) & pmask;
  245. numpages = (nextpage_addr - address) >> PAGE_SHIFT;
  246. if (numpages < cpa->numpages)
  247. cpa->numpages = numpages;
  248. /*
  249. * We are safe now. Check whether the new pgprot is the same:
  250. */
  251. old_pte = *kpte;
  252. old_prot = new_prot = pte_pgprot(old_pte);
  253. pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
  254. pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
  255. /*
  256. * old_pte points to the large page base address. So we need
  257. * to add the offset of the virtual address:
  258. */
  259. pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
  260. cpa->pfn = pfn;
  261. new_prot = static_protections(new_prot, address, pfn);
  262. /*
  263. * We need to check the full range, whether
  264. * static_protection() requires a different pgprot for one of
  265. * the pages in the range we try to preserve:
  266. */
  267. addr = address + PAGE_SIZE;
  268. pfn++;
  269. for (i = 1; i < cpa->numpages; i++, addr += PAGE_SIZE, pfn++) {
  270. pgprot_t chk_prot = static_protections(new_prot, addr, pfn);
  271. if (pgprot_val(chk_prot) != pgprot_val(new_prot))
  272. goto out_unlock;
  273. }
  274. /*
  275. * If there are no changes, return. maxpages has been updated
  276. * above:
  277. */
  278. if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
  279. do_split = 0;
  280. goto out_unlock;
  281. }
  282. /*
  283. * We need to change the attributes. Check, whether we can
  284. * change the large page in one go. We request a split, when
  285. * the address is not aligned and the number of pages is
  286. * smaller than the number of pages in the large page. Note
  287. * that we limited the number of possible pages already to
  288. * the number of pages in the large page.
  289. */
  290. if (address == (nextpage_addr - psize) && cpa->numpages == numpages) {
  291. /*
  292. * The address is aligned and the number of pages
  293. * covers the full page.
  294. */
  295. new_pte = pfn_pte(pte_pfn(old_pte), canon_pgprot(new_prot));
  296. __set_pmd_pte(kpte, address, new_pte);
  297. cpa->flushtlb = 1;
  298. do_split = 0;
  299. }
  300. out_unlock:
  301. spin_unlock_irqrestore(&pgd_lock, flags);
  302. return do_split;
  303. }
  304. static LIST_HEAD(page_pool);
  305. static unsigned long pool_size, pool_pages, pool_low;
  306. static unsigned long pool_used, pool_failed;
  307. static void cpa_fill_pool(struct page **ret)
  308. {
  309. gfp_t gfp = GFP_KERNEL;
  310. unsigned long flags;
  311. struct page *p;
  312. /*
  313. * Avoid recursion (on debug-pagealloc) and also signal
  314. * our priority to get to these pagetables:
  315. */
  316. if (current->flags & PF_MEMALLOC)
  317. return;
  318. current->flags |= PF_MEMALLOC;
  319. /*
  320. * Allocate atomically from atomic contexts:
  321. */
  322. if (in_atomic() || irqs_disabled() || debug_pagealloc)
  323. gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
  324. while (pool_pages < pool_size || (ret && !*ret)) {
  325. p = alloc_pages(gfp, 0);
  326. if (!p) {
  327. pool_failed++;
  328. break;
  329. }
  330. /*
  331. * If the call site needs a page right now, provide it:
  332. */
  333. if (ret && !*ret) {
  334. *ret = p;
  335. continue;
  336. }
  337. spin_lock_irqsave(&pgd_lock, flags);
  338. list_add(&p->lru, &page_pool);
  339. pool_pages++;
  340. spin_unlock_irqrestore(&pgd_lock, flags);
  341. }
  342. current->flags &= ~PF_MEMALLOC;
  343. }
  344. #define SHIFT_MB (20 - PAGE_SHIFT)
  345. #define ROUND_MB_GB ((1 << 10) - 1)
  346. #define SHIFT_MB_GB 10
  347. #define POOL_PAGES_PER_GB 16
  348. void __init cpa_init(void)
  349. {
  350. struct sysinfo si;
  351. unsigned long gb;
  352. si_meminfo(&si);
  353. /*
  354. * Calculate the number of pool pages:
  355. *
  356. * Convert totalram (nr of pages) to MiB and round to the next
  357. * GiB. Shift MiB to Gib and multiply the result by
  358. * POOL_PAGES_PER_GB:
  359. */
  360. if (debug_pagealloc) {
  361. gb = ((si.totalram >> SHIFT_MB) + ROUND_MB_GB) >> SHIFT_MB_GB;
  362. pool_size = POOL_PAGES_PER_GB * gb;
  363. } else {
  364. pool_size = 1;
  365. }
  366. pool_low = pool_size;
  367. cpa_fill_pool(NULL);
  368. printk(KERN_DEBUG
  369. "CPA: page pool initialized %lu of %lu pages preallocated\n",
  370. pool_pages, pool_size);
  371. }
  372. static int split_large_page(pte_t *kpte, unsigned long address)
  373. {
  374. unsigned long flags, pfn, pfninc = 1;
  375. unsigned int i, level;
  376. pte_t *pbase, *tmp;
  377. pgprot_t ref_prot;
  378. struct page *base;
  379. /*
  380. * Get a page from the pool. The pool list is protected by the
  381. * pgd_lock, which we have to take anyway for the split
  382. * operation:
  383. */
  384. spin_lock_irqsave(&pgd_lock, flags);
  385. if (list_empty(&page_pool)) {
  386. spin_unlock_irqrestore(&pgd_lock, flags);
  387. base = NULL;
  388. cpa_fill_pool(&base);
  389. if (!base)
  390. return -ENOMEM;
  391. spin_lock_irqsave(&pgd_lock, flags);
  392. } else {
  393. base = list_first_entry(&page_pool, struct page, lru);
  394. list_del(&base->lru);
  395. pool_pages--;
  396. if (pool_pages < pool_low)
  397. pool_low = pool_pages;
  398. }
  399. /*
  400. * Check for races, another CPU might have split this page
  401. * up for us already:
  402. */
  403. tmp = lookup_address(address, &level);
  404. if (tmp != kpte)
  405. goto out_unlock;
  406. pbase = (pte_t *)page_address(base);
  407. #ifdef CONFIG_X86_32
  408. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  409. #endif
  410. ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  411. #ifdef CONFIG_X86_64
  412. if (level == PG_LEVEL_1G) {
  413. pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
  414. pgprot_val(ref_prot) |= _PAGE_PSE;
  415. }
  416. #endif
  417. /*
  418. * Get the target pfn from the original entry:
  419. */
  420. pfn = pte_pfn(*kpte);
  421. for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
  422. set_pte(&pbase[i], pfn_pte(pfn, ref_prot));
  423. /*
  424. * Install the new, split up pagetable. Important details here:
  425. *
  426. * On Intel the NX bit of all levels must be cleared to make a
  427. * page executable. See section 4.13.2 of Intel 64 and IA-32
  428. * Architectures Software Developer's Manual).
  429. *
  430. * Mark the entry present. The current mapping might be
  431. * set to not present, which we preserved above.
  432. */
  433. ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
  434. pgprot_val(ref_prot) |= _PAGE_PRESENT;
  435. __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
  436. base = NULL;
  437. out_unlock:
  438. /*
  439. * If we dropped out via the lookup_address check under
  440. * pgd_lock then stick the page back into the pool:
  441. */
  442. if (base) {
  443. list_add(&base->lru, &page_pool);
  444. pool_pages++;
  445. } else
  446. pool_used++;
  447. spin_unlock_irqrestore(&pgd_lock, flags);
  448. return 0;
  449. }
  450. static int __change_page_attr(struct cpa_data *cpa, int primary)
  451. {
  452. unsigned long address = cpa->vaddr;
  453. int do_split, err;
  454. unsigned int level;
  455. pte_t *kpte, old_pte;
  456. repeat:
  457. kpte = lookup_address(address, &level);
  458. if (!kpte)
  459. return primary ? -EINVAL : 0;
  460. old_pte = *kpte;
  461. if (!pte_val(old_pte)) {
  462. if (!primary)
  463. return 0;
  464. printk(KERN_WARNING "CPA: called for zero pte. "
  465. "vaddr = %lx cpa->vaddr = %lx\n", address,
  466. cpa->vaddr);
  467. WARN_ON(1);
  468. return -EINVAL;
  469. }
  470. if (level == PG_LEVEL_4K) {
  471. pte_t new_pte;
  472. pgprot_t new_prot = pte_pgprot(old_pte);
  473. unsigned long pfn = pte_pfn(old_pte);
  474. pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
  475. pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
  476. new_prot = static_protections(new_prot, address, pfn);
  477. /*
  478. * We need to keep the pfn from the existing PTE,
  479. * after all we're only going to change it's attributes
  480. * not the memory it points to
  481. */
  482. new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
  483. cpa->pfn = pfn;
  484. /*
  485. * Do we really change anything ?
  486. */
  487. if (pte_val(old_pte) != pte_val(new_pte)) {
  488. set_pte_atomic(kpte, new_pte);
  489. cpa->flushtlb = 1;
  490. }
  491. cpa->numpages = 1;
  492. return 0;
  493. }
  494. /*
  495. * Check, whether we can keep the large page intact
  496. * and just change the pte:
  497. */
  498. do_split = try_preserve_large_page(kpte, address, cpa);
  499. /*
  500. * When the range fits into the existing large page,
  501. * return. cp->numpages and cpa->tlbflush have been updated in
  502. * try_large_page:
  503. */
  504. if (do_split <= 0)
  505. return do_split;
  506. /*
  507. * We have to split the large page:
  508. */
  509. err = split_large_page(kpte, address);
  510. if (!err) {
  511. cpa->flushtlb = 1;
  512. goto repeat;
  513. }
  514. return err;
  515. }
  516. static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
  517. static int cpa_process_alias(struct cpa_data *cpa)
  518. {
  519. struct cpa_data alias_cpa;
  520. int ret = 0;
  521. if (cpa->pfn > max_pfn_mapped)
  522. return 0;
  523. /*
  524. * No need to redo, when the primary call touched the direct
  525. * mapping already:
  526. */
  527. if (!within(cpa->vaddr, PAGE_OFFSET,
  528. PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
  529. alias_cpa = *cpa;
  530. alias_cpa.vaddr = (unsigned long) __va(cpa->pfn << PAGE_SHIFT);
  531. ret = __change_page_attr_set_clr(&alias_cpa, 0);
  532. }
  533. #ifdef CONFIG_X86_64
  534. if (ret)
  535. return ret;
  536. /*
  537. * No need to redo, when the primary call touched the high
  538. * mapping already:
  539. */
  540. if (within(cpa->vaddr, (unsigned long) _text, (unsigned long) _end))
  541. return 0;
  542. /*
  543. * If the physical address is inside the kernel map, we need
  544. * to touch the high mapped kernel as well:
  545. */
  546. if (!within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn()))
  547. return 0;
  548. alias_cpa = *cpa;
  549. alias_cpa.vaddr =
  550. (cpa->pfn << PAGE_SHIFT) + __START_KERNEL_map - phys_base;
  551. /*
  552. * The high mapping range is imprecise, so ignore the return value.
  553. */
  554. __change_page_attr_set_clr(&alias_cpa, 0);
  555. #endif
  556. return ret;
  557. }
  558. static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
  559. {
  560. int ret, numpages = cpa->numpages;
  561. while (numpages) {
  562. /*
  563. * Store the remaining nr of pages for the large page
  564. * preservation check.
  565. */
  566. cpa->numpages = numpages;
  567. ret = __change_page_attr(cpa, checkalias);
  568. if (ret)
  569. return ret;
  570. if (checkalias) {
  571. ret = cpa_process_alias(cpa);
  572. if (ret)
  573. return ret;
  574. }
  575. /*
  576. * Adjust the number of pages with the result of the
  577. * CPA operation. Either a large page has been
  578. * preserved or a single page update happened.
  579. */
  580. BUG_ON(cpa->numpages > numpages);
  581. numpages -= cpa->numpages;
  582. cpa->vaddr += cpa->numpages * PAGE_SIZE;
  583. }
  584. return 0;
  585. }
  586. static inline int cache_attr(pgprot_t attr)
  587. {
  588. return pgprot_val(attr) &
  589. (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
  590. }
  591. static int change_page_attr_set_clr(unsigned long addr, int numpages,
  592. pgprot_t mask_set, pgprot_t mask_clr)
  593. {
  594. struct cpa_data cpa;
  595. int ret, cache, checkalias;
  596. /*
  597. * Check, if we are requested to change a not supported
  598. * feature:
  599. */
  600. mask_set = canon_pgprot(mask_set);
  601. mask_clr = canon_pgprot(mask_clr);
  602. if (!pgprot_val(mask_set) && !pgprot_val(mask_clr))
  603. return 0;
  604. /* Ensure we are PAGE_SIZE aligned */
  605. if (addr & ~PAGE_MASK) {
  606. addr &= PAGE_MASK;
  607. /*
  608. * People should not be passing in unaligned addresses:
  609. */
  610. WARN_ON_ONCE(1);
  611. }
  612. cpa.vaddr = addr;
  613. cpa.numpages = numpages;
  614. cpa.mask_set = mask_set;
  615. cpa.mask_clr = mask_clr;
  616. cpa.flushtlb = 0;
  617. /* No alias checking for _NX bit modifications */
  618. checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
  619. ret = __change_page_attr_set_clr(&cpa, checkalias);
  620. /*
  621. * Check whether we really changed something:
  622. */
  623. if (!cpa.flushtlb)
  624. goto out;
  625. /*
  626. * No need to flush, when we did not set any of the caching
  627. * attributes:
  628. */
  629. cache = cache_attr(mask_set);
  630. /*
  631. * On success we use clflush, when the CPU supports it to
  632. * avoid the wbindv. If the CPU does not support it and in the
  633. * error case we fall back to cpa_flush_all (which uses
  634. * wbindv):
  635. */
  636. if (!ret && cpu_has_clflush)
  637. cpa_flush_range(addr, numpages, cache);
  638. else
  639. cpa_flush_all(cache);
  640. out:
  641. cpa_fill_pool(NULL);
  642. return ret;
  643. }
  644. static inline int change_page_attr_set(unsigned long addr, int numpages,
  645. pgprot_t mask)
  646. {
  647. return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0));
  648. }
  649. static inline int change_page_attr_clear(unsigned long addr, int numpages,
  650. pgprot_t mask)
  651. {
  652. return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask);
  653. }
  654. int set_memory_uc(unsigned long addr, int numpages)
  655. {
  656. return change_page_attr_set(addr, numpages,
  657. __pgprot(_PAGE_PCD | _PAGE_PWT));
  658. }
  659. EXPORT_SYMBOL(set_memory_uc);
  660. int set_memory_wb(unsigned long addr, int numpages)
  661. {
  662. return change_page_attr_clear(addr, numpages,
  663. __pgprot(_PAGE_PCD | _PAGE_PWT));
  664. }
  665. EXPORT_SYMBOL(set_memory_wb);
  666. int set_memory_x(unsigned long addr, int numpages)
  667. {
  668. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_NX));
  669. }
  670. EXPORT_SYMBOL(set_memory_x);
  671. int set_memory_nx(unsigned long addr, int numpages)
  672. {
  673. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_NX));
  674. }
  675. EXPORT_SYMBOL(set_memory_nx);
  676. int set_memory_ro(unsigned long addr, int numpages)
  677. {
  678. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_RW));
  679. }
  680. int set_memory_rw(unsigned long addr, int numpages)
  681. {
  682. return change_page_attr_set(addr, numpages, __pgprot(_PAGE_RW));
  683. }
  684. int set_memory_np(unsigned long addr, int numpages)
  685. {
  686. return change_page_attr_clear(addr, numpages, __pgprot(_PAGE_PRESENT));
  687. }
  688. int set_pages_uc(struct page *page, int numpages)
  689. {
  690. unsigned long addr = (unsigned long)page_address(page);
  691. return set_memory_uc(addr, numpages);
  692. }
  693. EXPORT_SYMBOL(set_pages_uc);
  694. int set_pages_wb(struct page *page, int numpages)
  695. {
  696. unsigned long addr = (unsigned long)page_address(page);
  697. return set_memory_wb(addr, numpages);
  698. }
  699. EXPORT_SYMBOL(set_pages_wb);
  700. int set_pages_x(struct page *page, int numpages)
  701. {
  702. unsigned long addr = (unsigned long)page_address(page);
  703. return set_memory_x(addr, numpages);
  704. }
  705. EXPORT_SYMBOL(set_pages_x);
  706. int set_pages_nx(struct page *page, int numpages)
  707. {
  708. unsigned long addr = (unsigned long)page_address(page);
  709. return set_memory_nx(addr, numpages);
  710. }
  711. EXPORT_SYMBOL(set_pages_nx);
  712. int set_pages_ro(struct page *page, int numpages)
  713. {
  714. unsigned long addr = (unsigned long)page_address(page);
  715. return set_memory_ro(addr, numpages);
  716. }
  717. int set_pages_rw(struct page *page, int numpages)
  718. {
  719. unsigned long addr = (unsigned long)page_address(page);
  720. return set_memory_rw(addr, numpages);
  721. }
  722. #ifdef CONFIG_DEBUG_PAGEALLOC
  723. static int __set_pages_p(struct page *page, int numpages)
  724. {
  725. struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
  726. .numpages = numpages,
  727. .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
  728. .mask_clr = __pgprot(0)};
  729. return __change_page_attr_set_clr(&cpa, 1);
  730. }
  731. static int __set_pages_np(struct page *page, int numpages)
  732. {
  733. struct cpa_data cpa = { .vaddr = (unsigned long) page_address(page),
  734. .numpages = numpages,
  735. .mask_set = __pgprot(0),
  736. .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW)};
  737. return __change_page_attr_set_clr(&cpa, 1);
  738. }
  739. void kernel_map_pages(struct page *page, int numpages, int enable)
  740. {
  741. if (PageHighMem(page))
  742. return;
  743. if (!enable) {
  744. debug_check_no_locks_freed(page_address(page),
  745. numpages * PAGE_SIZE);
  746. }
  747. /*
  748. * If page allocator is not up yet then do not call c_p_a():
  749. */
  750. if (!debug_pagealloc_enabled)
  751. return;
  752. /*
  753. * The return value is ignored as the calls cannot fail.
  754. * Large pages are kept enabled at boot time, and are
  755. * split up quickly with DEBUG_PAGEALLOC. If a splitup
  756. * fails here (due to temporary memory shortage) no damage
  757. * is done because we just keep the largepage intact up
  758. * to the next attempt when it will likely be split up:
  759. */
  760. if (enable)
  761. __set_pages_p(page, numpages);
  762. else
  763. __set_pages_np(page, numpages);
  764. /*
  765. * We should perform an IPI and flush all tlbs,
  766. * but that can deadlock->flush only current cpu:
  767. */
  768. __flush_tlb_all();
  769. /*
  770. * Try to refill the page pool here. We can do this only after
  771. * the tlb flush.
  772. */
  773. cpa_fill_pool(NULL);
  774. }
  775. #ifdef CONFIG_HIBERNATION
  776. bool kernel_page_present(struct page *page)
  777. {
  778. unsigned int level;
  779. pte_t *pte;
  780. if (PageHighMem(page))
  781. return false;
  782. pte = lookup_address((unsigned long)page_address(page), &level);
  783. return (pte_val(*pte) & _PAGE_PRESENT);
  784. }
  785. #endif /* CONFIG_HIBERNATION */
  786. #endif /* CONFIG_DEBUG_PAGEALLOC */
  787. /*
  788. * The testcases use internal knowledge of the implementation that shouldn't
  789. * be exposed to the rest of the kernel. Include these directly here.
  790. */
  791. #ifdef CONFIG_CPA_DEBUG
  792. #include "pageattr-test.c"
  793. #endif