pageattr.c 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/mm.h>
  10. #include <linux/interrupt.h>
  11. #include <linux/seq_file.h>
  12. #include <linux/debugfs.h>
  13. #include <linux/pfn.h>
  14. #include <linux/percpu.h>
  15. #include <linux/gfp.h>
  16. #include <linux/pci.h>
  17. #include <asm/e820.h>
  18. #include <asm/processor.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/sections.h>
  21. #include <asm/setup.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/pgalloc.h>
  24. #include <asm/proto.h>
  25. #include <asm/pat.h>
  26. /*
  27. * The current flushing context - we pass it instead of 5 arguments:
  28. */
  29. struct cpa_data {
  30. unsigned long *vaddr;
  31. pgprot_t mask_set;
  32. pgprot_t mask_clr;
  33. int numpages;
  34. int flags;
  35. unsigned long pfn;
  36. unsigned force_split : 1;
  37. int curpage;
  38. struct page **pages;
  39. };
  40. /*
  41. * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings)
  42. * using cpa_lock. So that we don't allow any other cpu, with stale large tlb
  43. * entries change the page attribute in parallel to some other cpu
  44. * splitting a large page entry along with changing the attribute.
  45. */
  46. static DEFINE_SPINLOCK(cpa_lock);
  47. #define CPA_FLUSHTLB 1
  48. #define CPA_ARRAY 2
  49. #define CPA_PAGES_ARRAY 4
  50. #ifdef CONFIG_PROC_FS
  51. static unsigned long direct_pages_count[PG_LEVEL_NUM];
  52. void update_page_count(int level, unsigned long pages)
  53. {
  54. /* Protect against CPA */
  55. spin_lock(&pgd_lock);
  56. direct_pages_count[level] += pages;
  57. spin_unlock(&pgd_lock);
  58. }
  59. static void split_page_count(int level)
  60. {
  61. direct_pages_count[level]--;
  62. direct_pages_count[level - 1] += PTRS_PER_PTE;
  63. }
  64. void arch_report_meminfo(struct seq_file *m)
  65. {
  66. seq_printf(m, "DirectMap4k: %8lu kB\n",
  67. direct_pages_count[PG_LEVEL_4K] << 2);
  68. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  69. seq_printf(m, "DirectMap2M: %8lu kB\n",
  70. direct_pages_count[PG_LEVEL_2M] << 11);
  71. #else
  72. seq_printf(m, "DirectMap4M: %8lu kB\n",
  73. direct_pages_count[PG_LEVEL_2M] << 12);
  74. #endif
  75. #ifdef CONFIG_X86_64
  76. if (direct_gbpages)
  77. seq_printf(m, "DirectMap1G: %8lu kB\n",
  78. direct_pages_count[PG_LEVEL_1G] << 20);
  79. #endif
  80. }
  81. #else
  82. static inline void split_page_count(int level) { }
  83. #endif
  84. #ifdef CONFIG_X86_64
  85. static inline unsigned long highmap_start_pfn(void)
  86. {
  87. return __pa_symbol(_text) >> PAGE_SHIFT;
  88. }
  89. static inline unsigned long highmap_end_pfn(void)
  90. {
  91. return __pa_symbol(roundup(_brk_end, PMD_SIZE)) >> PAGE_SHIFT;
  92. }
  93. #endif
  94. #ifdef CONFIG_DEBUG_PAGEALLOC
  95. # define debug_pagealloc 1
  96. #else
  97. # define debug_pagealloc 0
  98. #endif
  99. static inline int
  100. within(unsigned long addr, unsigned long start, unsigned long end)
  101. {
  102. return addr >= start && addr < end;
  103. }
  104. /*
  105. * Flushing functions
  106. */
  107. /**
  108. * clflush_cache_range - flush a cache range with clflush
  109. * @vaddr: virtual start address
  110. * @size: number of bytes to flush
  111. *
  112. * clflush is an unordered instruction which needs fencing with mfence
  113. * to avoid ordering issues.
  114. */
  115. void clflush_cache_range(void *vaddr, unsigned int size)
  116. {
  117. void *vend = vaddr + size - 1;
  118. mb();
  119. for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
  120. clflush(vaddr);
  121. /*
  122. * Flush any possible final partial cacheline:
  123. */
  124. clflush(vend);
  125. mb();
  126. }
  127. EXPORT_SYMBOL_GPL(clflush_cache_range);
  128. static void __cpa_flush_all(void *arg)
  129. {
  130. unsigned long cache = (unsigned long)arg;
  131. /*
  132. * Flush all to work around Errata in early athlons regarding
  133. * large page flushing.
  134. */
  135. __flush_tlb_all();
  136. if (cache && boot_cpu_data.x86 >= 4)
  137. wbinvd();
  138. }
  139. static void cpa_flush_all(unsigned long cache)
  140. {
  141. BUG_ON(irqs_disabled());
  142. on_each_cpu(__cpa_flush_all, (void *) cache, 1);
  143. }
  144. static void __cpa_flush_range(void *arg)
  145. {
  146. /*
  147. * We could optimize that further and do individual per page
  148. * tlb invalidates for a low number of pages. Caveat: we must
  149. * flush the high aliases on 64bit as well.
  150. */
  151. __flush_tlb_all();
  152. }
  153. static void cpa_flush_range(unsigned long start, int numpages, int cache)
  154. {
  155. unsigned int i, level;
  156. unsigned long addr;
  157. BUG_ON(irqs_disabled());
  158. WARN_ON(PAGE_ALIGN(start) != start);
  159. on_each_cpu(__cpa_flush_range, NULL, 1);
  160. if (!cache)
  161. return;
  162. /*
  163. * We only need to flush on one CPU,
  164. * clflush is a MESI-coherent instruction that
  165. * will cause all other CPUs to flush the same
  166. * cachelines:
  167. */
  168. for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) {
  169. pte_t *pte = lookup_address(addr, &level);
  170. /*
  171. * Only flush present addresses:
  172. */
  173. if (pte && (pte_val(*pte) & _PAGE_PRESENT))
  174. clflush_cache_range((void *) addr, PAGE_SIZE);
  175. }
  176. }
  177. static void cpa_flush_array(unsigned long *start, int numpages, int cache,
  178. int in_flags, struct page **pages)
  179. {
  180. unsigned int i, level;
  181. unsigned long do_wbinvd = cache && numpages >= 1024; /* 4M threshold */
  182. BUG_ON(irqs_disabled());
  183. on_each_cpu(__cpa_flush_all, (void *) do_wbinvd, 1);
  184. if (!cache || do_wbinvd)
  185. return;
  186. /*
  187. * We only need to flush on one CPU,
  188. * clflush is a MESI-coherent instruction that
  189. * will cause all other CPUs to flush the same
  190. * cachelines:
  191. */
  192. for (i = 0; i < numpages; i++) {
  193. unsigned long addr;
  194. pte_t *pte;
  195. if (in_flags & CPA_PAGES_ARRAY)
  196. addr = (unsigned long)page_address(pages[i]);
  197. else
  198. addr = start[i];
  199. pte = lookup_address(addr, &level);
  200. /*
  201. * Only flush present addresses:
  202. */
  203. if (pte && (pte_val(*pte) & _PAGE_PRESENT))
  204. clflush_cache_range((void *)addr, PAGE_SIZE);
  205. }
  206. }
  207. /*
  208. * Certain areas of memory on x86 require very specific protection flags,
  209. * for example the BIOS area or kernel text. Callers don't always get this
  210. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  211. * checks and fixes these known static required protection bits.
  212. */
  213. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
  214. unsigned long pfn)
  215. {
  216. pgprot_t forbidden = __pgprot(0);
  217. /*
  218. * The BIOS area between 640k and 1Mb needs to be executable for
  219. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  220. */
  221. #ifdef CONFIG_PCI_BIOS
  222. if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
  223. pgprot_val(forbidden) |= _PAGE_NX;
  224. #endif
  225. /*
  226. * The kernel text needs to be executable for obvious reasons
  227. * Does not cover __inittext since that is gone later on. On
  228. * 64bit we do not enforce !NX on the low mapping
  229. */
  230. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  231. pgprot_val(forbidden) |= _PAGE_NX;
  232. /*
  233. * The .rodata section needs to be read-only. Using the pfn
  234. * catches all aliases.
  235. */
  236. if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
  237. __pa_symbol(__end_rodata) >> PAGE_SHIFT))
  238. pgprot_val(forbidden) |= _PAGE_RW;
  239. #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
  240. /*
  241. * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
  242. * kernel text mappings for the large page aligned text, rodata sections
  243. * will be always read-only. For the kernel identity mappings covering
  244. * the holes caused by this alignment can be anything that user asks.
  245. *
  246. * This will preserve the large page mappings for kernel text/data
  247. * at no extra cost.
  248. */
  249. if (kernel_set_to_readonly &&
  250. within(address, (unsigned long)_text,
  251. (unsigned long)__end_rodata_hpage_align)) {
  252. unsigned int level;
  253. /*
  254. * Don't enforce the !RW mapping for the kernel text mapping,
  255. * if the current mapping is already using small page mapping.
  256. * No need to work hard to preserve large page mappings in this
  257. * case.
  258. *
  259. * This also fixes the Linux Xen paravirt guest boot failure
  260. * (because of unexpected read-only mappings for kernel identity
  261. * mappings). In this paravirt guest case, the kernel text
  262. * mapping and the kernel identity mapping share the same
  263. * page-table pages. Thus we can't really use different
  264. * protections for the kernel text and identity mappings. Also,
  265. * these shared mappings are made of small page mappings.
  266. * Thus this don't enforce !RW mapping for small page kernel
  267. * text mapping logic will help Linux Xen parvirt guest boot
  268. * as well.
  269. */
  270. if (lookup_address(address, &level) && (level != PG_LEVEL_4K))
  271. pgprot_val(forbidden) |= _PAGE_RW;
  272. }
  273. #endif
  274. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  275. return prot;
  276. }
  277. /*
  278. * Lookup the page table entry for a virtual address. Return a pointer
  279. * to the entry and the level of the mapping.
  280. *
  281. * Note: We return pud and pmd either when the entry is marked large
  282. * or when the present bit is not set. Otherwise we would return a
  283. * pointer to a nonexisting mapping.
  284. */
  285. pte_t *lookup_address(unsigned long address, unsigned int *level)
  286. {
  287. pgd_t *pgd = pgd_offset_k(address);
  288. pud_t *pud;
  289. pmd_t *pmd;
  290. *level = PG_LEVEL_NONE;
  291. if (pgd_none(*pgd))
  292. return NULL;
  293. pud = pud_offset(pgd, address);
  294. if (pud_none(*pud))
  295. return NULL;
  296. *level = PG_LEVEL_1G;
  297. if (pud_large(*pud) || !pud_present(*pud))
  298. return (pte_t *)pud;
  299. pmd = pmd_offset(pud, address);
  300. if (pmd_none(*pmd))
  301. return NULL;
  302. *level = PG_LEVEL_2M;
  303. if (pmd_large(*pmd) || !pmd_present(*pmd))
  304. return (pte_t *)pmd;
  305. *level = PG_LEVEL_4K;
  306. return pte_offset_kernel(pmd, address);
  307. }
  308. EXPORT_SYMBOL_GPL(lookup_address);
  309. /*
  310. * This is necessary because __pa() does not work on some
  311. * kinds of memory, like vmalloc() or the alloc_remap()
  312. * areas on 32-bit NUMA systems. The percpu areas can
  313. * end up in this kind of memory, for instance.
  314. *
  315. * This could be optimized, but it is only intended to be
  316. * used at inititalization time, and keeping it
  317. * unoptimized should increase the testing coverage for
  318. * the more obscure platforms.
  319. */
  320. phys_addr_t slow_virt_to_phys(void *__virt_addr)
  321. {
  322. unsigned long virt_addr = (unsigned long)__virt_addr;
  323. phys_addr_t phys_addr;
  324. unsigned long offset;
  325. enum pg_level level;
  326. unsigned long psize;
  327. unsigned long pmask;
  328. pte_t *pte;
  329. pte = lookup_address(virt_addr, &level);
  330. BUG_ON(!pte);
  331. psize = page_level_size(level);
  332. pmask = page_level_mask(level);
  333. offset = virt_addr & ~pmask;
  334. phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
  335. return (phys_addr | offset);
  336. }
  337. EXPORT_SYMBOL_GPL(slow_virt_to_phys);
  338. /*
  339. * Set the new pmd in all the pgds we know about:
  340. */
  341. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  342. {
  343. /* change init_mm */
  344. set_pte_atomic(kpte, pte);
  345. #ifdef CONFIG_X86_32
  346. if (!SHARED_KERNEL_PMD) {
  347. struct page *page;
  348. list_for_each_entry(page, &pgd_list, lru) {
  349. pgd_t *pgd;
  350. pud_t *pud;
  351. pmd_t *pmd;
  352. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  353. pud = pud_offset(pgd, address);
  354. pmd = pmd_offset(pud, address);
  355. set_pte_atomic((pte_t *)pmd, pte);
  356. }
  357. }
  358. #endif
  359. }
  360. static int
  361. try_preserve_large_page(pte_t *kpte, unsigned long address,
  362. struct cpa_data *cpa)
  363. {
  364. unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn;
  365. pte_t new_pte, old_pte, *tmp;
  366. pgprot_t old_prot, new_prot, req_prot;
  367. int i, do_split = 1;
  368. enum pg_level level;
  369. if (cpa->force_split)
  370. return 1;
  371. spin_lock(&pgd_lock);
  372. /*
  373. * Check for races, another CPU might have split this page
  374. * up already:
  375. */
  376. tmp = lookup_address(address, &level);
  377. if (tmp != kpte)
  378. goto out_unlock;
  379. switch (level) {
  380. case PG_LEVEL_2M:
  381. #ifdef CONFIG_X86_64
  382. case PG_LEVEL_1G:
  383. #endif
  384. psize = page_level_size(level);
  385. pmask = page_level_mask(level);
  386. break;
  387. default:
  388. do_split = -EINVAL;
  389. goto out_unlock;
  390. }
  391. /*
  392. * Calculate the number of pages, which fit into this large
  393. * page starting at address:
  394. */
  395. nextpage_addr = (address + psize) & pmask;
  396. numpages = (nextpage_addr - address) >> PAGE_SHIFT;
  397. if (numpages < cpa->numpages)
  398. cpa->numpages = numpages;
  399. /*
  400. * We are safe now. Check whether the new pgprot is the same:
  401. */
  402. old_pte = *kpte;
  403. old_prot = req_prot = pte_pgprot(old_pte);
  404. pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
  405. pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
  406. /*
  407. * Set the PSE and GLOBAL flags only if the PRESENT flag is
  408. * set otherwise pmd_present/pmd_huge will return true even on
  409. * a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
  410. * for the ancient hardware that doesn't support it.
  411. */
  412. if (pgprot_val(req_prot) & _PAGE_PRESENT)
  413. pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
  414. else
  415. pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
  416. req_prot = canon_pgprot(req_prot);
  417. /*
  418. * old_pte points to the large page base address. So we need
  419. * to add the offset of the virtual address:
  420. */
  421. pfn = pte_pfn(old_pte) + ((address & (psize - 1)) >> PAGE_SHIFT);
  422. cpa->pfn = pfn;
  423. new_prot = static_protections(req_prot, address, pfn);
  424. /*
  425. * We need to check the full range, whether
  426. * static_protection() requires a different pgprot for one of
  427. * the pages in the range we try to preserve:
  428. */
  429. addr = address & pmask;
  430. pfn = pte_pfn(old_pte);
  431. for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) {
  432. pgprot_t chk_prot = static_protections(req_prot, addr, pfn);
  433. if (pgprot_val(chk_prot) != pgprot_val(new_prot))
  434. goto out_unlock;
  435. }
  436. /*
  437. * If there are no changes, return. maxpages has been updated
  438. * above:
  439. */
  440. if (pgprot_val(new_prot) == pgprot_val(old_prot)) {
  441. do_split = 0;
  442. goto out_unlock;
  443. }
  444. /*
  445. * We need to change the attributes. Check, whether we can
  446. * change the large page in one go. We request a split, when
  447. * the address is not aligned and the number of pages is
  448. * smaller than the number of pages in the large page. Note
  449. * that we limited the number of possible pages already to
  450. * the number of pages in the large page.
  451. */
  452. if (address == (address & pmask) && cpa->numpages == (psize >> PAGE_SHIFT)) {
  453. /*
  454. * The address is aligned and the number of pages
  455. * covers the full page.
  456. */
  457. new_pte = pfn_pte(pte_pfn(old_pte), new_prot);
  458. __set_pmd_pte(kpte, address, new_pte);
  459. cpa->flags |= CPA_FLUSHTLB;
  460. do_split = 0;
  461. }
  462. out_unlock:
  463. spin_unlock(&pgd_lock);
  464. return do_split;
  465. }
  466. static int
  467. __split_large_page(pte_t *kpte, unsigned long address, struct page *base)
  468. {
  469. pte_t *pbase = (pte_t *)page_address(base);
  470. unsigned long pfn, pfninc = 1;
  471. unsigned int i, level;
  472. pte_t *tmp;
  473. pgprot_t ref_prot;
  474. spin_lock(&pgd_lock);
  475. /*
  476. * Check for races, another CPU might have split this page
  477. * up for us already:
  478. */
  479. tmp = lookup_address(address, &level);
  480. if (tmp != kpte) {
  481. spin_unlock(&pgd_lock);
  482. return 1;
  483. }
  484. paravirt_alloc_pte(&init_mm, page_to_pfn(base));
  485. ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  486. /*
  487. * If we ever want to utilize the PAT bit, we need to
  488. * update this function to make sure it's converted from
  489. * bit 12 to bit 7 when we cross from the 2MB level to
  490. * the 4K level:
  491. */
  492. WARN_ON_ONCE(pgprot_val(ref_prot) & _PAGE_PAT_LARGE);
  493. #ifdef CONFIG_X86_64
  494. if (level == PG_LEVEL_1G) {
  495. pfninc = PMD_PAGE_SIZE >> PAGE_SHIFT;
  496. /*
  497. * Set the PSE flags only if the PRESENT flag is set
  498. * otherwise pmd_present/pmd_huge will return true
  499. * even on a non present pmd.
  500. */
  501. if (pgprot_val(ref_prot) & _PAGE_PRESENT)
  502. pgprot_val(ref_prot) |= _PAGE_PSE;
  503. else
  504. pgprot_val(ref_prot) &= ~_PAGE_PSE;
  505. }
  506. #endif
  507. /*
  508. * Set the GLOBAL flags only if the PRESENT flag is set
  509. * otherwise pmd/pte_present will return true even on a non
  510. * present pmd/pte. The canon_pgprot will clear _PAGE_GLOBAL
  511. * for the ancient hardware that doesn't support it.
  512. */
  513. if (pgprot_val(ref_prot) & _PAGE_PRESENT)
  514. pgprot_val(ref_prot) |= _PAGE_GLOBAL;
  515. else
  516. pgprot_val(ref_prot) &= ~_PAGE_GLOBAL;
  517. /*
  518. * Get the target pfn from the original entry:
  519. */
  520. pfn = pte_pfn(*kpte);
  521. for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc)
  522. set_pte(&pbase[i], pfn_pte(pfn, canon_pgprot(ref_prot)));
  523. if (pfn_range_is_mapped(PFN_DOWN(__pa(address)),
  524. PFN_DOWN(__pa(address)) + 1))
  525. split_page_count(level);
  526. /*
  527. * Install the new, split up pagetable.
  528. *
  529. * We use the standard kernel pagetable protections for the new
  530. * pagetable protections, the actual ptes set above control the
  531. * primary protection behavior:
  532. */
  533. __set_pmd_pte(kpte, address, mk_pte(base, __pgprot(_KERNPG_TABLE)));
  534. /*
  535. * Intel Atom errata AAH41 workaround.
  536. *
  537. * The real fix should be in hw or in a microcode update, but
  538. * we also probabilistically try to reduce the window of having
  539. * a large TLB mixed with 4K TLBs while instruction fetches are
  540. * going on.
  541. */
  542. __flush_tlb_all();
  543. spin_unlock(&pgd_lock);
  544. return 0;
  545. }
  546. static int split_large_page(pte_t *kpte, unsigned long address)
  547. {
  548. struct page *base;
  549. if (!debug_pagealloc)
  550. spin_unlock(&cpa_lock);
  551. base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
  552. if (!debug_pagealloc)
  553. spin_lock(&cpa_lock);
  554. if (!base)
  555. return -ENOMEM;
  556. if (__split_large_page(kpte, address, base))
  557. __free_page(base);
  558. return 0;
  559. }
  560. static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr,
  561. int primary)
  562. {
  563. /*
  564. * Ignore all non primary paths.
  565. */
  566. if (!primary)
  567. return 0;
  568. /*
  569. * Ignore the NULL PTE for kernel identity mapping, as it is expected
  570. * to have holes.
  571. * Also set numpages to '1' indicating that we processed cpa req for
  572. * one virtual address page and its pfn. TBD: numpages can be set based
  573. * on the initial value and the level returned by lookup_address().
  574. */
  575. if (within(vaddr, PAGE_OFFSET,
  576. PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT))) {
  577. cpa->numpages = 1;
  578. cpa->pfn = __pa(vaddr) >> PAGE_SHIFT;
  579. return 0;
  580. } else {
  581. WARN(1, KERN_WARNING "CPA: called for zero pte. "
  582. "vaddr = %lx cpa->vaddr = %lx\n", vaddr,
  583. *cpa->vaddr);
  584. return -EFAULT;
  585. }
  586. }
  587. static int __change_page_attr(struct cpa_data *cpa, int primary)
  588. {
  589. unsigned long address;
  590. int do_split, err;
  591. unsigned int level;
  592. pte_t *kpte, old_pte;
  593. if (cpa->flags & CPA_PAGES_ARRAY) {
  594. struct page *page = cpa->pages[cpa->curpage];
  595. if (unlikely(PageHighMem(page)))
  596. return 0;
  597. address = (unsigned long)page_address(page);
  598. } else if (cpa->flags & CPA_ARRAY)
  599. address = cpa->vaddr[cpa->curpage];
  600. else
  601. address = *cpa->vaddr;
  602. repeat:
  603. kpte = lookup_address(address, &level);
  604. if (!kpte)
  605. return __cpa_process_fault(cpa, address, primary);
  606. old_pte = *kpte;
  607. if (!pte_val(old_pte))
  608. return __cpa_process_fault(cpa, address, primary);
  609. if (level == PG_LEVEL_4K) {
  610. pte_t new_pte;
  611. pgprot_t new_prot = pte_pgprot(old_pte);
  612. unsigned long pfn = pte_pfn(old_pte);
  613. pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr);
  614. pgprot_val(new_prot) |= pgprot_val(cpa->mask_set);
  615. new_prot = static_protections(new_prot, address, pfn);
  616. /*
  617. * Set the GLOBAL flags only if the PRESENT flag is
  618. * set otherwise pte_present will return true even on
  619. * a non present pte. The canon_pgprot will clear
  620. * _PAGE_GLOBAL for the ancient hardware that doesn't
  621. * support it.
  622. */
  623. if (pgprot_val(new_prot) & _PAGE_PRESENT)
  624. pgprot_val(new_prot) |= _PAGE_GLOBAL;
  625. else
  626. pgprot_val(new_prot) &= ~_PAGE_GLOBAL;
  627. /*
  628. * We need to keep the pfn from the existing PTE,
  629. * after all we're only going to change it's attributes
  630. * not the memory it points to
  631. */
  632. new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
  633. cpa->pfn = pfn;
  634. /*
  635. * Do we really change anything ?
  636. */
  637. if (pte_val(old_pte) != pte_val(new_pte)) {
  638. set_pte_atomic(kpte, new_pte);
  639. cpa->flags |= CPA_FLUSHTLB;
  640. }
  641. cpa->numpages = 1;
  642. return 0;
  643. }
  644. /*
  645. * Check, whether we can keep the large page intact
  646. * and just change the pte:
  647. */
  648. do_split = try_preserve_large_page(kpte, address, cpa);
  649. /*
  650. * When the range fits into the existing large page,
  651. * return. cp->numpages and cpa->tlbflush have been updated in
  652. * try_large_page:
  653. */
  654. if (do_split <= 0)
  655. return do_split;
  656. /*
  657. * We have to split the large page:
  658. */
  659. err = split_large_page(kpte, address);
  660. if (!err) {
  661. /*
  662. * Do a global flush tlb after splitting the large page
  663. * and before we do the actual change page attribute in the PTE.
  664. *
  665. * With out this, we violate the TLB application note, that says
  666. * "The TLBs may contain both ordinary and large-page
  667. * translations for a 4-KByte range of linear addresses. This
  668. * may occur if software modifies the paging structures so that
  669. * the page size used for the address range changes. If the two
  670. * translations differ with respect to page frame or attributes
  671. * (e.g., permissions), processor behavior is undefined and may
  672. * be implementation-specific."
  673. *
  674. * We do this global tlb flush inside the cpa_lock, so that we
  675. * don't allow any other cpu, with stale tlb entries change the
  676. * page attribute in parallel, that also falls into the
  677. * just split large page entry.
  678. */
  679. flush_tlb_all();
  680. goto repeat;
  681. }
  682. return err;
  683. }
  684. static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias);
  685. static int cpa_process_alias(struct cpa_data *cpa)
  686. {
  687. struct cpa_data alias_cpa;
  688. unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT);
  689. unsigned long vaddr;
  690. int ret;
  691. if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1))
  692. return 0;
  693. /*
  694. * No need to redo, when the primary call touched the direct
  695. * mapping already:
  696. */
  697. if (cpa->flags & CPA_PAGES_ARRAY) {
  698. struct page *page = cpa->pages[cpa->curpage];
  699. if (unlikely(PageHighMem(page)))
  700. return 0;
  701. vaddr = (unsigned long)page_address(page);
  702. } else if (cpa->flags & CPA_ARRAY)
  703. vaddr = cpa->vaddr[cpa->curpage];
  704. else
  705. vaddr = *cpa->vaddr;
  706. if (!(within(vaddr, PAGE_OFFSET,
  707. PAGE_OFFSET + (max_pfn_mapped << PAGE_SHIFT)))) {
  708. alias_cpa = *cpa;
  709. alias_cpa.vaddr = &laddr;
  710. alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
  711. ret = __change_page_attr_set_clr(&alias_cpa, 0);
  712. if (ret)
  713. return ret;
  714. }
  715. #ifdef CONFIG_X86_64
  716. /*
  717. * If the primary call didn't touch the high mapping already
  718. * and the physical address is inside the kernel map, we need
  719. * to touch the high mapped kernel as well:
  720. */
  721. if (!within(vaddr, (unsigned long)_text, _brk_end) &&
  722. within(cpa->pfn, highmap_start_pfn(), highmap_end_pfn())) {
  723. unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) +
  724. __START_KERNEL_map - phys_base;
  725. alias_cpa = *cpa;
  726. alias_cpa.vaddr = &temp_cpa_vaddr;
  727. alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY);
  728. /*
  729. * The high mapping range is imprecise, so ignore the
  730. * return value.
  731. */
  732. __change_page_attr_set_clr(&alias_cpa, 0);
  733. }
  734. #endif
  735. return 0;
  736. }
  737. static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
  738. {
  739. int ret, numpages = cpa->numpages;
  740. while (numpages) {
  741. /*
  742. * Store the remaining nr of pages for the large page
  743. * preservation check.
  744. */
  745. cpa->numpages = numpages;
  746. /* for array changes, we can't use large page */
  747. if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
  748. cpa->numpages = 1;
  749. if (!debug_pagealloc)
  750. spin_lock(&cpa_lock);
  751. ret = __change_page_attr(cpa, checkalias);
  752. if (!debug_pagealloc)
  753. spin_unlock(&cpa_lock);
  754. if (ret)
  755. return ret;
  756. if (checkalias) {
  757. ret = cpa_process_alias(cpa);
  758. if (ret)
  759. return ret;
  760. }
  761. /*
  762. * Adjust the number of pages with the result of the
  763. * CPA operation. Either a large page has been
  764. * preserved or a single page update happened.
  765. */
  766. BUG_ON(cpa->numpages > numpages);
  767. numpages -= cpa->numpages;
  768. if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY))
  769. cpa->curpage++;
  770. else
  771. *cpa->vaddr += cpa->numpages * PAGE_SIZE;
  772. }
  773. return 0;
  774. }
  775. static inline int cache_attr(pgprot_t attr)
  776. {
  777. return pgprot_val(attr) &
  778. (_PAGE_PAT | _PAGE_PAT_LARGE | _PAGE_PWT | _PAGE_PCD);
  779. }
  780. static int change_page_attr_set_clr(unsigned long *addr, int numpages,
  781. pgprot_t mask_set, pgprot_t mask_clr,
  782. int force_split, int in_flag,
  783. struct page **pages)
  784. {
  785. struct cpa_data cpa;
  786. int ret, cache, checkalias;
  787. unsigned long baddr = 0;
  788. /*
  789. * Check, if we are requested to change a not supported
  790. * feature:
  791. */
  792. mask_set = canon_pgprot(mask_set);
  793. mask_clr = canon_pgprot(mask_clr);
  794. if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split)
  795. return 0;
  796. /* Ensure we are PAGE_SIZE aligned */
  797. if (in_flag & CPA_ARRAY) {
  798. int i;
  799. for (i = 0; i < numpages; i++) {
  800. if (addr[i] & ~PAGE_MASK) {
  801. addr[i] &= PAGE_MASK;
  802. WARN_ON_ONCE(1);
  803. }
  804. }
  805. } else if (!(in_flag & CPA_PAGES_ARRAY)) {
  806. /*
  807. * in_flag of CPA_PAGES_ARRAY implies it is aligned.
  808. * No need to cehck in that case
  809. */
  810. if (*addr & ~PAGE_MASK) {
  811. *addr &= PAGE_MASK;
  812. /*
  813. * People should not be passing in unaligned addresses:
  814. */
  815. WARN_ON_ONCE(1);
  816. }
  817. /*
  818. * Save address for cache flush. *addr is modified in the call
  819. * to __change_page_attr_set_clr() below.
  820. */
  821. baddr = *addr;
  822. }
  823. /* Must avoid aliasing mappings in the highmem code */
  824. kmap_flush_unused();
  825. vm_unmap_aliases();
  826. cpa.vaddr = addr;
  827. cpa.pages = pages;
  828. cpa.numpages = numpages;
  829. cpa.mask_set = mask_set;
  830. cpa.mask_clr = mask_clr;
  831. cpa.flags = 0;
  832. cpa.curpage = 0;
  833. cpa.force_split = force_split;
  834. if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY))
  835. cpa.flags |= in_flag;
  836. /* No alias checking for _NX bit modifications */
  837. checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != _PAGE_NX;
  838. ret = __change_page_attr_set_clr(&cpa, checkalias);
  839. /*
  840. * Check whether we really changed something:
  841. */
  842. if (!(cpa.flags & CPA_FLUSHTLB))
  843. goto out;
  844. /*
  845. * No need to flush, when we did not set any of the caching
  846. * attributes:
  847. */
  848. cache = cache_attr(mask_set);
  849. /*
  850. * On success we use clflush, when the CPU supports it to
  851. * avoid the wbindv. If the CPU does not support it and in the
  852. * error case we fall back to cpa_flush_all (which uses
  853. * wbindv):
  854. */
  855. if (!ret && cpu_has_clflush) {
  856. if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
  857. cpa_flush_array(addr, numpages, cache,
  858. cpa.flags, pages);
  859. } else
  860. cpa_flush_range(baddr, numpages, cache);
  861. } else
  862. cpa_flush_all(cache);
  863. out:
  864. return ret;
  865. }
  866. static inline int change_page_attr_set(unsigned long *addr, int numpages,
  867. pgprot_t mask, int array)
  868. {
  869. return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0,
  870. (array ? CPA_ARRAY : 0), NULL);
  871. }
  872. static inline int change_page_attr_clear(unsigned long *addr, int numpages,
  873. pgprot_t mask, int array)
  874. {
  875. return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0,
  876. (array ? CPA_ARRAY : 0), NULL);
  877. }
  878. static inline int cpa_set_pages_array(struct page **pages, int numpages,
  879. pgprot_t mask)
  880. {
  881. return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0,
  882. CPA_PAGES_ARRAY, pages);
  883. }
  884. static inline int cpa_clear_pages_array(struct page **pages, int numpages,
  885. pgprot_t mask)
  886. {
  887. return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0,
  888. CPA_PAGES_ARRAY, pages);
  889. }
  890. int _set_memory_uc(unsigned long addr, int numpages)
  891. {
  892. /*
  893. * for now UC MINUS. see comments in ioremap_nocache()
  894. */
  895. return change_page_attr_set(&addr, numpages,
  896. __pgprot(_PAGE_CACHE_UC_MINUS), 0);
  897. }
  898. int set_memory_uc(unsigned long addr, int numpages)
  899. {
  900. int ret;
  901. /*
  902. * for now UC MINUS. see comments in ioremap_nocache()
  903. */
  904. ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
  905. _PAGE_CACHE_UC_MINUS, NULL);
  906. if (ret)
  907. goto out_err;
  908. ret = _set_memory_uc(addr, numpages);
  909. if (ret)
  910. goto out_free;
  911. return 0;
  912. out_free:
  913. free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
  914. out_err:
  915. return ret;
  916. }
  917. EXPORT_SYMBOL(set_memory_uc);
  918. static int _set_memory_array(unsigned long *addr, int addrinarray,
  919. unsigned long new_type)
  920. {
  921. int i, j;
  922. int ret;
  923. /*
  924. * for now UC MINUS. see comments in ioremap_nocache()
  925. */
  926. for (i = 0; i < addrinarray; i++) {
  927. ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
  928. new_type, NULL);
  929. if (ret)
  930. goto out_free;
  931. }
  932. ret = change_page_attr_set(addr, addrinarray,
  933. __pgprot(_PAGE_CACHE_UC_MINUS), 1);
  934. if (!ret && new_type == _PAGE_CACHE_WC)
  935. ret = change_page_attr_set_clr(addr, addrinarray,
  936. __pgprot(_PAGE_CACHE_WC),
  937. __pgprot(_PAGE_CACHE_MASK),
  938. 0, CPA_ARRAY, NULL);
  939. if (ret)
  940. goto out_free;
  941. return 0;
  942. out_free:
  943. for (j = 0; j < i; j++)
  944. free_memtype(__pa(addr[j]), __pa(addr[j]) + PAGE_SIZE);
  945. return ret;
  946. }
  947. int set_memory_array_uc(unsigned long *addr, int addrinarray)
  948. {
  949. return _set_memory_array(addr, addrinarray, _PAGE_CACHE_UC_MINUS);
  950. }
  951. EXPORT_SYMBOL(set_memory_array_uc);
  952. int set_memory_array_wc(unsigned long *addr, int addrinarray)
  953. {
  954. return _set_memory_array(addr, addrinarray, _PAGE_CACHE_WC);
  955. }
  956. EXPORT_SYMBOL(set_memory_array_wc);
  957. int _set_memory_wc(unsigned long addr, int numpages)
  958. {
  959. int ret;
  960. unsigned long addr_copy = addr;
  961. ret = change_page_attr_set(&addr, numpages,
  962. __pgprot(_PAGE_CACHE_UC_MINUS), 0);
  963. if (!ret) {
  964. ret = change_page_attr_set_clr(&addr_copy, numpages,
  965. __pgprot(_PAGE_CACHE_WC),
  966. __pgprot(_PAGE_CACHE_MASK),
  967. 0, 0, NULL);
  968. }
  969. return ret;
  970. }
  971. int set_memory_wc(unsigned long addr, int numpages)
  972. {
  973. int ret;
  974. if (!pat_enabled)
  975. return set_memory_uc(addr, numpages);
  976. ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
  977. _PAGE_CACHE_WC, NULL);
  978. if (ret)
  979. goto out_err;
  980. ret = _set_memory_wc(addr, numpages);
  981. if (ret)
  982. goto out_free;
  983. return 0;
  984. out_free:
  985. free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
  986. out_err:
  987. return ret;
  988. }
  989. EXPORT_SYMBOL(set_memory_wc);
  990. int _set_memory_wb(unsigned long addr, int numpages)
  991. {
  992. return change_page_attr_clear(&addr, numpages,
  993. __pgprot(_PAGE_CACHE_MASK), 0);
  994. }
  995. int set_memory_wb(unsigned long addr, int numpages)
  996. {
  997. int ret;
  998. ret = _set_memory_wb(addr, numpages);
  999. if (ret)
  1000. return ret;
  1001. free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
  1002. return 0;
  1003. }
  1004. EXPORT_SYMBOL(set_memory_wb);
  1005. int set_memory_array_wb(unsigned long *addr, int addrinarray)
  1006. {
  1007. int i;
  1008. int ret;
  1009. ret = change_page_attr_clear(addr, addrinarray,
  1010. __pgprot(_PAGE_CACHE_MASK), 1);
  1011. if (ret)
  1012. return ret;
  1013. for (i = 0; i < addrinarray; i++)
  1014. free_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE);
  1015. return 0;
  1016. }
  1017. EXPORT_SYMBOL(set_memory_array_wb);
  1018. int set_memory_x(unsigned long addr, int numpages)
  1019. {
  1020. if (!(__supported_pte_mask & _PAGE_NX))
  1021. return 0;
  1022. return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_NX), 0);
  1023. }
  1024. EXPORT_SYMBOL(set_memory_x);
  1025. int set_memory_nx(unsigned long addr, int numpages)
  1026. {
  1027. if (!(__supported_pte_mask & _PAGE_NX))
  1028. return 0;
  1029. return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_NX), 0);
  1030. }
  1031. EXPORT_SYMBOL(set_memory_nx);
  1032. int set_memory_ro(unsigned long addr, int numpages)
  1033. {
  1034. return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_RW), 0);
  1035. }
  1036. EXPORT_SYMBOL_GPL(set_memory_ro);
  1037. int set_memory_rw(unsigned long addr, int numpages)
  1038. {
  1039. return change_page_attr_set(&addr, numpages, __pgprot(_PAGE_RW), 0);
  1040. }
  1041. EXPORT_SYMBOL_GPL(set_memory_rw);
  1042. int set_memory_np(unsigned long addr, int numpages)
  1043. {
  1044. return change_page_attr_clear(&addr, numpages, __pgprot(_PAGE_PRESENT), 0);
  1045. }
  1046. int set_memory_4k(unsigned long addr, int numpages)
  1047. {
  1048. return change_page_attr_set_clr(&addr, numpages, __pgprot(0),
  1049. __pgprot(0), 1, 0, NULL);
  1050. }
  1051. int set_pages_uc(struct page *page, int numpages)
  1052. {
  1053. unsigned long addr = (unsigned long)page_address(page);
  1054. return set_memory_uc(addr, numpages);
  1055. }
  1056. EXPORT_SYMBOL(set_pages_uc);
  1057. static int _set_pages_array(struct page **pages, int addrinarray,
  1058. unsigned long new_type)
  1059. {
  1060. unsigned long start;
  1061. unsigned long end;
  1062. int i;
  1063. int free_idx;
  1064. int ret;
  1065. for (i = 0; i < addrinarray; i++) {
  1066. if (PageHighMem(pages[i]))
  1067. continue;
  1068. start = page_to_pfn(pages[i]) << PAGE_SHIFT;
  1069. end = start + PAGE_SIZE;
  1070. if (reserve_memtype(start, end, new_type, NULL))
  1071. goto err_out;
  1072. }
  1073. ret = cpa_set_pages_array(pages, addrinarray,
  1074. __pgprot(_PAGE_CACHE_UC_MINUS));
  1075. if (!ret && new_type == _PAGE_CACHE_WC)
  1076. ret = change_page_attr_set_clr(NULL, addrinarray,
  1077. __pgprot(_PAGE_CACHE_WC),
  1078. __pgprot(_PAGE_CACHE_MASK),
  1079. 0, CPA_PAGES_ARRAY, pages);
  1080. if (ret)
  1081. goto err_out;
  1082. return 0; /* Success */
  1083. err_out:
  1084. free_idx = i;
  1085. for (i = 0; i < free_idx; i++) {
  1086. if (PageHighMem(pages[i]))
  1087. continue;
  1088. start = page_to_pfn(pages[i]) << PAGE_SHIFT;
  1089. end = start + PAGE_SIZE;
  1090. free_memtype(start, end);
  1091. }
  1092. return -EINVAL;
  1093. }
  1094. int set_pages_array_uc(struct page **pages, int addrinarray)
  1095. {
  1096. return _set_pages_array(pages, addrinarray, _PAGE_CACHE_UC_MINUS);
  1097. }
  1098. EXPORT_SYMBOL(set_pages_array_uc);
  1099. int set_pages_array_wc(struct page **pages, int addrinarray)
  1100. {
  1101. return _set_pages_array(pages, addrinarray, _PAGE_CACHE_WC);
  1102. }
  1103. EXPORT_SYMBOL(set_pages_array_wc);
  1104. int set_pages_wb(struct page *page, int numpages)
  1105. {
  1106. unsigned long addr = (unsigned long)page_address(page);
  1107. return set_memory_wb(addr, numpages);
  1108. }
  1109. EXPORT_SYMBOL(set_pages_wb);
  1110. int set_pages_array_wb(struct page **pages, int addrinarray)
  1111. {
  1112. int retval;
  1113. unsigned long start;
  1114. unsigned long end;
  1115. int i;
  1116. retval = cpa_clear_pages_array(pages, addrinarray,
  1117. __pgprot(_PAGE_CACHE_MASK));
  1118. if (retval)
  1119. return retval;
  1120. for (i = 0; i < addrinarray; i++) {
  1121. if (PageHighMem(pages[i]))
  1122. continue;
  1123. start = page_to_pfn(pages[i]) << PAGE_SHIFT;
  1124. end = start + PAGE_SIZE;
  1125. free_memtype(start, end);
  1126. }
  1127. return 0;
  1128. }
  1129. EXPORT_SYMBOL(set_pages_array_wb);
  1130. int set_pages_x(struct page *page, int numpages)
  1131. {
  1132. unsigned long addr = (unsigned long)page_address(page);
  1133. return set_memory_x(addr, numpages);
  1134. }
  1135. EXPORT_SYMBOL(set_pages_x);
  1136. int set_pages_nx(struct page *page, int numpages)
  1137. {
  1138. unsigned long addr = (unsigned long)page_address(page);
  1139. return set_memory_nx(addr, numpages);
  1140. }
  1141. EXPORT_SYMBOL(set_pages_nx);
  1142. int set_pages_ro(struct page *page, int numpages)
  1143. {
  1144. unsigned long addr = (unsigned long)page_address(page);
  1145. return set_memory_ro(addr, numpages);
  1146. }
  1147. int set_pages_rw(struct page *page, int numpages)
  1148. {
  1149. unsigned long addr = (unsigned long)page_address(page);
  1150. return set_memory_rw(addr, numpages);
  1151. }
  1152. #ifdef CONFIG_DEBUG_PAGEALLOC
  1153. static int __set_pages_p(struct page *page, int numpages)
  1154. {
  1155. unsigned long tempaddr = (unsigned long) page_address(page);
  1156. struct cpa_data cpa = { .vaddr = &tempaddr,
  1157. .numpages = numpages,
  1158. .mask_set = __pgprot(_PAGE_PRESENT | _PAGE_RW),
  1159. .mask_clr = __pgprot(0),
  1160. .flags = 0};
  1161. /*
  1162. * No alias checking needed for setting present flag. otherwise,
  1163. * we may need to break large pages for 64-bit kernel text
  1164. * mappings (this adds to complexity if we want to do this from
  1165. * atomic context especially). Let's keep it simple!
  1166. */
  1167. return __change_page_attr_set_clr(&cpa, 0);
  1168. }
  1169. static int __set_pages_np(struct page *page, int numpages)
  1170. {
  1171. unsigned long tempaddr = (unsigned long) page_address(page);
  1172. struct cpa_data cpa = { .vaddr = &tempaddr,
  1173. .numpages = numpages,
  1174. .mask_set = __pgprot(0),
  1175. .mask_clr = __pgprot(_PAGE_PRESENT | _PAGE_RW),
  1176. .flags = 0};
  1177. /*
  1178. * No alias checking needed for setting not present flag. otherwise,
  1179. * we may need to break large pages for 64-bit kernel text
  1180. * mappings (this adds to complexity if we want to do this from
  1181. * atomic context especially). Let's keep it simple!
  1182. */
  1183. return __change_page_attr_set_clr(&cpa, 0);
  1184. }
  1185. void kernel_map_pages(struct page *page, int numpages, int enable)
  1186. {
  1187. if (PageHighMem(page))
  1188. return;
  1189. if (!enable) {
  1190. debug_check_no_locks_freed(page_address(page),
  1191. numpages * PAGE_SIZE);
  1192. }
  1193. /*
  1194. * The return value is ignored as the calls cannot fail.
  1195. * Large pages for identity mappings are not used at boot time
  1196. * and hence no memory allocations during large page split.
  1197. */
  1198. if (enable)
  1199. __set_pages_p(page, numpages);
  1200. else
  1201. __set_pages_np(page, numpages);
  1202. /*
  1203. * We should perform an IPI and flush all tlbs,
  1204. * but that can deadlock->flush only current cpu:
  1205. */
  1206. __flush_tlb_all();
  1207. arch_flush_lazy_mmu_mode();
  1208. }
  1209. #ifdef CONFIG_HIBERNATION
  1210. bool kernel_page_present(struct page *page)
  1211. {
  1212. unsigned int level;
  1213. pte_t *pte;
  1214. if (PageHighMem(page))
  1215. return false;
  1216. pte = lookup_address((unsigned long)page_address(page), &level);
  1217. return (pte_val(*pte) & _PAGE_PRESENT);
  1218. }
  1219. #endif /* CONFIG_HIBERNATION */
  1220. #endif /* CONFIG_DEBUG_PAGEALLOC */
  1221. /*
  1222. * The testcases use internal knowledge of the implementation that shouldn't
  1223. * be exposed to the rest of the kernel. Include these directly here.
  1224. */
  1225. #ifdef CONFIG_CPA_DEBUG
  1226. #include "pageattr-test.c"
  1227. #endif