pageattr.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/mm.h>
  11. #include <asm/processor.h>
  12. #include <asm/tlbflush.h>
  13. #include <asm/sections.h>
  14. #include <asm/uaccess.h>
  15. #include <asm/pgalloc.h>
  16. /*
  17. * We must allow the BIOS range to be executable:
  18. */
  19. #define BIOS_BEGIN 0x000a0000
  20. #define BIOS_END 0x00100000
  21. static inline int
  22. within(unsigned long addr, unsigned long start, unsigned long end)
  23. {
  24. return addr >= start && addr < end;
  25. }
  26. /*
  27. * Certain areas of memory on x86 require very specific protection flags,
  28. * for example the BIOS area or kernel text. Callers don't always get this
  29. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  30. * checks and fixes these known static required protection bits.
  31. */
  32. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
  33. {
  34. pgprot_t forbidden = __pgprot(0);
  35. /*
  36. * The BIOS area between 640k and 1Mb needs to be executable for
  37. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  38. */
  39. if (within(__pa(address), BIOS_BEGIN, BIOS_END))
  40. pgprot_val(forbidden) |= _PAGE_NX;
  41. /*
  42. * The kernel text needs to be executable for obvious reasons
  43. * Does not cover __inittext since that is gone later on
  44. */
  45. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  46. pgprot_val(forbidden) |= _PAGE_NX;
  47. #ifdef CONFIG_DEBUG_RODATA
  48. /* The .rodata section needs to be read-only */
  49. if (within(address, (unsigned long)__start_rodata,
  50. (unsigned long)__end_rodata))
  51. pgprot_val(forbidden) |= _PAGE_RW;
  52. #endif
  53. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  54. return prot;
  55. }
  56. pte_t *lookup_address(unsigned long address, int *level)
  57. {
  58. pgd_t *pgd = pgd_offset_k(address);
  59. pud_t *pud;
  60. pmd_t *pmd;
  61. *level = PG_LEVEL_NONE;
  62. if (pgd_none(*pgd))
  63. return NULL;
  64. pud = pud_offset(pgd, address);
  65. if (pud_none(*pud))
  66. return NULL;
  67. pmd = pmd_offset(pud, address);
  68. if (pmd_none(*pmd))
  69. return NULL;
  70. *level = PG_LEVEL_2M;
  71. if (pmd_large(*pmd))
  72. return (pte_t *)pmd;
  73. *level = PG_LEVEL_4K;
  74. return pte_offset_kernel(pmd, address);
  75. }
  76. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  77. {
  78. /* change init_mm */
  79. set_pte_atomic(kpte, pte);
  80. #ifdef CONFIG_X86_32
  81. if (!SHARED_KERNEL_PMD) {
  82. struct page *page;
  83. for (page = pgd_list; page; page = (struct page *)page->index) {
  84. pgd_t *pgd;
  85. pud_t *pud;
  86. pmd_t *pmd;
  87. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  88. pud = pud_offset(pgd, address);
  89. pmd = pmd_offset(pud, address);
  90. set_pte_atomic((pte_t *)pmd, pte);
  91. }
  92. }
  93. #endif
  94. }
  95. static int split_large_page(pte_t *kpte, unsigned long address)
  96. {
  97. pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  98. gfp_t gfp_flags = GFP_KERNEL;
  99. unsigned long flags;
  100. unsigned long addr;
  101. pte_t *pbase, *tmp;
  102. struct page *base;
  103. int i, level;
  104. #ifdef CONFIG_DEBUG_PAGEALLOC
  105. gfp_flags = GFP_ATOMIC;
  106. #endif
  107. base = alloc_pages(gfp_flags, 0);
  108. if (!base)
  109. return -ENOMEM;
  110. spin_lock_irqsave(&pgd_lock, flags);
  111. /*
  112. * Check for races, another CPU might have split this page
  113. * up for us already:
  114. */
  115. tmp = lookup_address(address, &level);
  116. if (tmp != kpte) {
  117. WARN_ON_ONCE(1);
  118. goto out_unlock;
  119. }
  120. address = __pa(address);
  121. addr = address & LARGE_PAGE_MASK;
  122. pbase = (pte_t *)page_address(base);
  123. #ifdef CONFIG_X86_32
  124. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  125. #endif
  126. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
  127. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
  128. /*
  129. * Install the new, split up pagetable. Important detail here:
  130. *
  131. * On Intel the NX bit of all levels must be cleared to make a
  132. * page executable. See section 4.13.2 of Intel 64 and IA-32
  133. * Architectures Software Developer's Manual).
  134. */
  135. ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
  136. __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
  137. base = NULL;
  138. out_unlock:
  139. spin_unlock_irqrestore(&pgd_lock, flags);
  140. if (base)
  141. __free_pages(base, 0);
  142. return 0;
  143. }
  144. static int
  145. __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
  146. {
  147. struct page *kpte_page;
  148. int level, err = 0;
  149. pte_t *kpte;
  150. #ifdef CONFIG_X86_32
  151. BUG_ON(pfn > max_low_pfn);
  152. #endif
  153. repeat:
  154. kpte = lookup_address(address, &level);
  155. if (!kpte)
  156. return -EINVAL;
  157. kpte_page = virt_to_page(kpte);
  158. BUG_ON(PageLRU(kpte_page));
  159. BUG_ON(PageCompound(kpte_page));
  160. prot = static_protections(prot, address);
  161. if (level == PG_LEVEL_4K) {
  162. set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
  163. } else {
  164. err = split_large_page(kpte, address);
  165. if (!err)
  166. goto repeat;
  167. }
  168. return err;
  169. }
  170. /**
  171. * change_page_attr_addr - Change page table attributes in linear mapping
  172. * @address: Virtual address in linear mapping.
  173. * @numpages: Number of pages to change
  174. * @prot: New page table attribute (PAGE_*)
  175. *
  176. * Change page attributes of a page in the direct mapping. This is a variant
  177. * of change_page_attr() that also works on memory holes that do not have
  178. * mem_map entry (pfn_valid() is false).
  179. *
  180. * See change_page_attr() documentation for more details.
  181. *
  182. * Modules and drivers should use the set_memory_* APIs instead.
  183. */
  184. int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
  185. {
  186. int err = 0, kernel_map = 0, i;
  187. #ifdef CONFIG_X86_64
  188. if (address >= __START_KERNEL_map &&
  189. address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
  190. address = (unsigned long)__va(__pa(address));
  191. kernel_map = 1;
  192. }
  193. #endif
  194. for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
  195. unsigned long pfn = __pa(address) >> PAGE_SHIFT;
  196. if (!kernel_map || pte_present(pfn_pte(0, prot))) {
  197. err = __change_page_attr(address, pfn, prot);
  198. if (err)
  199. break;
  200. }
  201. #ifdef CONFIG_X86_64
  202. /*
  203. * Handle kernel mapping too which aliases part of
  204. * lowmem:
  205. */
  206. if (__pa(address) < KERNEL_TEXT_SIZE) {
  207. unsigned long addr2;
  208. pgprot_t prot2;
  209. addr2 = __START_KERNEL_map + __pa(address);
  210. /* Make sure the kernel mappings stay executable */
  211. prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
  212. err = __change_page_attr(addr2, pfn, prot2);
  213. }
  214. #endif
  215. }
  216. return err;
  217. }
  218. /**
  219. * change_page_attr - Change page table attributes in the linear mapping.
  220. * @page: First page to change
  221. * @numpages: Number of pages to change
  222. * @prot: New protection/caching type (PAGE_*)
  223. *
  224. * Returns 0 on success, otherwise a negated errno.
  225. *
  226. * This should be used when a page is mapped with a different caching policy
  227. * than write-back somewhere - some CPUs do not like it when mappings with
  228. * different caching policies exist. This changes the page attributes of the
  229. * in kernel linear mapping too.
  230. *
  231. * Caller must call global_flush_tlb() later to make the changes active.
  232. *
  233. * The caller needs to ensure that there are no conflicting mappings elsewhere
  234. * (e.g. in user space) * This function only deals with the kernel linear map.
  235. *
  236. * For MMIO areas without mem_map use change_page_attr_addr() instead.
  237. *
  238. * Modules and drivers should use the set_pages_* APIs instead.
  239. */
  240. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  241. {
  242. unsigned long addr = (unsigned long)page_address(page);
  243. return change_page_attr_addr(addr, numpages, prot);
  244. }
  245. EXPORT_SYMBOL(change_page_attr);
  246. /**
  247. * change_page_attr_set - Change page table attributes in the linear mapping.
  248. * @addr: Virtual address in linear mapping.
  249. * @numpages: Number of pages to change
  250. * @prot: Protection/caching type bits to set (PAGE_*)
  251. *
  252. * Returns 0 on success, otherwise a negated errno.
  253. *
  254. * This should be used when a page is mapped with a different caching policy
  255. * than write-back somewhere - some CPUs do not like it when mappings with
  256. * different caching policies exist. This changes the page attributes of the
  257. * in kernel linear mapping too.
  258. *
  259. * Caller must call global_flush_tlb() later to make the changes active.
  260. *
  261. * The caller needs to ensure that there are no conflicting mappings elsewhere
  262. * (e.g. in user space) * This function only deals with the kernel linear map.
  263. *
  264. * This function is different from change_page_attr() in that only selected bits
  265. * are impacted, all other bits remain as is.
  266. */
  267. int change_page_attr_set(unsigned long addr, int numpages, pgprot_t prot)
  268. {
  269. pgprot_t current_prot;
  270. int level;
  271. pte_t *pte;
  272. pte = lookup_address(addr, &level);
  273. if (pte)
  274. current_prot = pte_pgprot(*pte);
  275. else
  276. pgprot_val(current_prot) = 0;
  277. pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
  278. return change_page_attr_addr(addr, numpages, prot);
  279. }
  280. /**
  281. * change_page_attr_clear - Change page table attributes in the linear mapping.
  282. * @addr: Virtual address in linear mapping.
  283. * @numpages: Number of pages to change
  284. * @prot: Protection/caching type bits to clear (PAGE_*)
  285. *
  286. * Returns 0 on success, otherwise a negated errno.
  287. *
  288. * This should be used when a page is mapped with a different caching policy
  289. * than write-back somewhere - some CPUs do not like it when mappings with
  290. * different caching policies exist. This changes the page attributes of the
  291. * in kernel linear mapping too.
  292. *
  293. * Caller must call global_flush_tlb() later to make the changes active.
  294. *
  295. * The caller needs to ensure that there are no conflicting mappings elsewhere
  296. * (e.g. in user space) * This function only deals with the kernel linear map.
  297. *
  298. * This function is different from change_page_attr() in that only selected bits
  299. * are impacted, all other bits remain as is.
  300. */
  301. int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot)
  302. {
  303. pgprot_t current_prot;
  304. int level;
  305. pte_t *pte;
  306. pte = lookup_address(addr, &level);
  307. if (pte)
  308. current_prot = pte_pgprot(*pte);
  309. else
  310. pgprot_val(current_prot) = 0;
  311. pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot);
  312. return change_page_attr_addr(addr, numpages, prot);
  313. }
  314. int set_memory_uc(unsigned long addr, int numpages)
  315. {
  316. pgprot_t uncached;
  317. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  318. return change_page_attr_set(addr, numpages, uncached);
  319. }
  320. EXPORT_SYMBOL(set_memory_uc);
  321. int set_memory_wb(unsigned long addr, int numpages)
  322. {
  323. pgprot_t uncached;
  324. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  325. return change_page_attr_clear(addr, numpages, uncached);
  326. }
  327. EXPORT_SYMBOL(set_memory_wb);
  328. int set_memory_x(unsigned long addr, int numpages)
  329. {
  330. pgprot_t nx;
  331. pgprot_val(nx) = _PAGE_NX;
  332. return change_page_attr_clear(addr, numpages, nx);
  333. }
  334. EXPORT_SYMBOL(set_memory_x);
  335. int set_memory_nx(unsigned long addr, int numpages)
  336. {
  337. pgprot_t nx;
  338. pgprot_val(nx) = _PAGE_NX;
  339. return change_page_attr_set(addr, numpages, nx);
  340. }
  341. EXPORT_SYMBOL(set_memory_nx);
  342. int set_memory_ro(unsigned long addr, int numpages)
  343. {
  344. pgprot_t rw;
  345. pgprot_val(rw) = _PAGE_RW;
  346. return change_page_attr_clear(addr, numpages, rw);
  347. }
  348. EXPORT_SYMBOL(set_memory_ro);
  349. int set_memory_rw(unsigned long addr, int numpages)
  350. {
  351. pgprot_t rw;
  352. pgprot_val(rw) = _PAGE_RW;
  353. return change_page_attr_set(addr, numpages, rw);
  354. }
  355. EXPORT_SYMBOL(set_memory_rw);
  356. int set_pages_uc(struct page *page, int numpages)
  357. {
  358. unsigned long addr = (unsigned long)page_address(page);
  359. pgprot_t uncached;
  360. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  361. return change_page_attr_set(addr, numpages, uncached);
  362. }
  363. EXPORT_SYMBOL(set_pages_uc);
  364. int set_pages_wb(struct page *page, int numpages)
  365. {
  366. unsigned long addr = (unsigned long)page_address(page);
  367. pgprot_t uncached;
  368. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  369. return change_page_attr_clear(addr, numpages, uncached);
  370. }
  371. EXPORT_SYMBOL(set_pages_wb);
  372. int set_pages_x(struct page *page, int numpages)
  373. {
  374. unsigned long addr = (unsigned long)page_address(page);
  375. pgprot_t nx;
  376. pgprot_val(nx) = _PAGE_NX;
  377. return change_page_attr_clear(addr, numpages, nx);
  378. }
  379. EXPORT_SYMBOL(set_pages_x);
  380. int set_pages_nx(struct page *page, int numpages)
  381. {
  382. unsigned long addr = (unsigned long)page_address(page);
  383. pgprot_t nx;
  384. pgprot_val(nx) = _PAGE_NX;
  385. return change_page_attr_set(addr, numpages, nx);
  386. }
  387. EXPORT_SYMBOL(set_pages_nx);
  388. int set_pages_ro(struct page *page, int numpages)
  389. {
  390. unsigned long addr = (unsigned long)page_address(page);
  391. pgprot_t rw;
  392. pgprot_val(rw) = _PAGE_RW;
  393. return change_page_attr_clear(addr, numpages, rw);
  394. }
  395. EXPORT_SYMBOL(set_pages_ro);
  396. int set_pages_rw(struct page *page, int numpages)
  397. {
  398. unsigned long addr = (unsigned long)page_address(page);
  399. pgprot_t rw;
  400. pgprot_val(rw) = _PAGE_RW;
  401. return change_page_attr_set(addr, numpages, rw);
  402. }
  403. EXPORT_SYMBOL(set_pages_rw);
  404. void clflush_cache_range(void *addr, int size)
  405. {
  406. int i;
  407. for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
  408. clflush(addr+i);
  409. }
  410. static void flush_kernel_map(void *arg)
  411. {
  412. /*
  413. * Flush all to work around Errata in early athlons regarding
  414. * large page flushing.
  415. */
  416. __flush_tlb_all();
  417. if (boot_cpu_data.x86_model >= 4)
  418. wbinvd();
  419. }
  420. void global_flush_tlb(void)
  421. {
  422. BUG_ON(irqs_disabled());
  423. on_each_cpu(flush_kernel_map, NULL, 1, 1);
  424. }
  425. EXPORT_SYMBOL(global_flush_tlb);
  426. #ifdef CONFIG_DEBUG_PAGEALLOC
  427. void kernel_map_pages(struct page *page, int numpages, int enable)
  428. {
  429. if (PageHighMem(page))
  430. return;
  431. if (!enable) {
  432. debug_check_no_locks_freed(page_address(page),
  433. numpages * PAGE_SIZE);
  434. }
  435. /*
  436. * If page allocator is not up yet then do not call c_p_a():
  437. */
  438. if (!debug_pagealloc_enabled)
  439. return;
  440. /*
  441. * The return value is ignored - the calls cannot fail,
  442. * large pages are disabled at boot time:
  443. */
  444. change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
  445. /*
  446. * We should perform an IPI and flush all tlbs,
  447. * but that can deadlock->flush only current cpu:
  448. */
  449. __flush_tlb_all();
  450. }
  451. #endif