pageattr.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533
  1. /*
  2. * Copyright 2002 Andi Kleen, SuSE Labs.
  3. * Thanks to Ben LaHaise for precious feedback.
  4. */
  5. #include <linux/highmem.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/module.h>
  8. #include <linux/sched.h>
  9. #include <linux/slab.h>
  10. #include <linux/mm.h>
  11. #include <asm/e820.h>
  12. #include <asm/processor.h>
  13. #include <asm/tlbflush.h>
  14. #include <asm/sections.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/pgalloc.h>
  17. static inline int
  18. within(unsigned long addr, unsigned long start, unsigned long end)
  19. {
  20. return addr >= start && addr < end;
  21. }
  22. /*
  23. * Certain areas of memory on x86 require very specific protection flags,
  24. * for example the BIOS area or kernel text. Callers don't always get this
  25. * right (again, ioremap() on BIOS memory is not uncommon) so this function
  26. * checks and fixes these known static required protection bits.
  27. */
  28. static inline pgprot_t static_protections(pgprot_t prot, unsigned long address)
  29. {
  30. pgprot_t forbidden = __pgprot(0);
  31. /*
  32. * The BIOS area between 640k and 1Mb needs to be executable for
  33. * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
  34. */
  35. if (within(__pa(address), BIOS_BEGIN, BIOS_END))
  36. pgprot_val(forbidden) |= _PAGE_NX;
  37. /*
  38. * The kernel text needs to be executable for obvious reasons
  39. * Does not cover __inittext since that is gone later on
  40. */
  41. if (within(address, (unsigned long)_text, (unsigned long)_etext))
  42. pgprot_val(forbidden) |= _PAGE_NX;
  43. #ifdef CONFIG_DEBUG_RODATA
  44. /* The .rodata section needs to be read-only */
  45. if (within(address, (unsigned long)__start_rodata,
  46. (unsigned long)__end_rodata))
  47. pgprot_val(forbidden) |= _PAGE_RW;
  48. #endif
  49. prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
  50. return prot;
  51. }
  52. pte_t *lookup_address(unsigned long address, int *level)
  53. {
  54. pgd_t *pgd = pgd_offset_k(address);
  55. pud_t *pud;
  56. pmd_t *pmd;
  57. *level = PG_LEVEL_NONE;
  58. if (pgd_none(*pgd))
  59. return NULL;
  60. pud = pud_offset(pgd, address);
  61. if (pud_none(*pud))
  62. return NULL;
  63. pmd = pmd_offset(pud, address);
  64. if (pmd_none(*pmd))
  65. return NULL;
  66. *level = PG_LEVEL_2M;
  67. if (pmd_large(*pmd))
  68. return (pte_t *)pmd;
  69. *level = PG_LEVEL_4K;
  70. return pte_offset_kernel(pmd, address);
  71. }
  72. static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
  73. {
  74. /* change init_mm */
  75. set_pte_atomic(kpte, pte);
  76. #ifdef CONFIG_X86_32
  77. if (!SHARED_KERNEL_PMD) {
  78. struct page *page;
  79. for (page = pgd_list; page; page = (struct page *)page->index) {
  80. pgd_t *pgd;
  81. pud_t *pud;
  82. pmd_t *pmd;
  83. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  84. pud = pud_offset(pgd, address);
  85. pmd = pmd_offset(pud, address);
  86. set_pte_atomic((pte_t *)pmd, pte);
  87. }
  88. }
  89. #endif
  90. }
  91. static int split_large_page(pte_t *kpte, unsigned long address)
  92. {
  93. pgprot_t ref_prot = pte_pgprot(pte_clrhuge(*kpte));
  94. gfp_t gfp_flags = GFP_KERNEL;
  95. unsigned long flags;
  96. unsigned long addr;
  97. pte_t *pbase, *tmp;
  98. struct page *base;
  99. int i, level;
  100. #ifdef CONFIG_DEBUG_PAGEALLOC
  101. gfp_flags = GFP_ATOMIC;
  102. #endif
  103. base = alloc_pages(gfp_flags, 0);
  104. if (!base)
  105. return -ENOMEM;
  106. spin_lock_irqsave(&pgd_lock, flags);
  107. /*
  108. * Check for races, another CPU might have split this page
  109. * up for us already:
  110. */
  111. tmp = lookup_address(address, &level);
  112. if (tmp != kpte) {
  113. WARN_ON_ONCE(1);
  114. goto out_unlock;
  115. }
  116. address = __pa(address);
  117. addr = address & LARGE_PAGE_MASK;
  118. pbase = (pte_t *)page_address(base);
  119. #ifdef CONFIG_X86_32
  120. paravirt_alloc_pt(&init_mm, page_to_pfn(base));
  121. #endif
  122. for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
  123. set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
  124. /*
  125. * Install the new, split up pagetable. Important detail here:
  126. *
  127. * On Intel the NX bit of all levels must be cleared to make a
  128. * page executable. See section 4.13.2 of Intel 64 and IA-32
  129. * Architectures Software Developer's Manual).
  130. */
  131. ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
  132. __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
  133. base = NULL;
  134. out_unlock:
  135. spin_unlock_irqrestore(&pgd_lock, flags);
  136. if (base)
  137. __free_pages(base, 0);
  138. return 0;
  139. }
  140. static int
  141. __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
  142. {
  143. struct page *kpte_page;
  144. int level, err = 0;
  145. pte_t *kpte;
  146. #ifdef CONFIG_X86_32
  147. BUG_ON(pfn > max_low_pfn);
  148. #endif
  149. repeat:
  150. kpte = lookup_address(address, &level);
  151. if (!kpte)
  152. return -EINVAL;
  153. kpte_page = virt_to_page(kpte);
  154. BUG_ON(PageLRU(kpte_page));
  155. BUG_ON(PageCompound(kpte_page));
  156. prot = static_protections(prot, address);
  157. if (level == PG_LEVEL_4K) {
  158. set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
  159. } else {
  160. err = split_large_page(kpte, address);
  161. if (!err)
  162. goto repeat;
  163. }
  164. return err;
  165. }
  166. /**
  167. * change_page_attr_addr - Change page table attributes in linear mapping
  168. * @address: Virtual address in linear mapping.
  169. * @numpages: Number of pages to change
  170. * @prot: New page table attribute (PAGE_*)
  171. *
  172. * Change page attributes of a page in the direct mapping. This is a variant
  173. * of change_page_attr() that also works on memory holes that do not have
  174. * mem_map entry (pfn_valid() is false).
  175. *
  176. * See change_page_attr() documentation for more details.
  177. *
  178. * Modules and drivers should use the set_memory_* APIs instead.
  179. */
  180. int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
  181. {
  182. int err = 0, kernel_map = 0, i;
  183. #ifdef CONFIG_X86_64
  184. if (address >= __START_KERNEL_map &&
  185. address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
  186. address = (unsigned long)__va(__pa(address));
  187. kernel_map = 1;
  188. }
  189. #endif
  190. for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
  191. unsigned long pfn = __pa(address) >> PAGE_SHIFT;
  192. if (!kernel_map || pte_present(pfn_pte(0, prot))) {
  193. err = __change_page_attr(address, pfn, prot);
  194. if (err)
  195. break;
  196. }
  197. #ifdef CONFIG_X86_64
  198. /*
  199. * Handle kernel mapping too which aliases part of
  200. * lowmem:
  201. */
  202. if (__pa(address) < KERNEL_TEXT_SIZE) {
  203. unsigned long addr2;
  204. pgprot_t prot2;
  205. addr2 = __START_KERNEL_map + __pa(address);
  206. /* Make sure the kernel mappings stay executable */
  207. prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
  208. err = __change_page_attr(addr2, pfn, prot2);
  209. }
  210. #endif
  211. }
  212. return err;
  213. }
  214. /**
  215. * change_page_attr - Change page table attributes in the linear mapping.
  216. * @page: First page to change
  217. * @numpages: Number of pages to change
  218. * @prot: New protection/caching type (PAGE_*)
  219. *
  220. * Returns 0 on success, otherwise a negated errno.
  221. *
  222. * This should be used when a page is mapped with a different caching policy
  223. * than write-back somewhere - some CPUs do not like it when mappings with
  224. * different caching policies exist. This changes the page attributes of the
  225. * in kernel linear mapping too.
  226. *
  227. * Caller must call global_flush_tlb() later to make the changes active.
  228. *
  229. * The caller needs to ensure that there are no conflicting mappings elsewhere
  230. * (e.g. in user space) * This function only deals with the kernel linear map.
  231. *
  232. * For MMIO areas without mem_map use change_page_attr_addr() instead.
  233. *
  234. * Modules and drivers should use the set_pages_* APIs instead.
  235. */
  236. int change_page_attr(struct page *page, int numpages, pgprot_t prot)
  237. {
  238. unsigned long addr = (unsigned long)page_address(page);
  239. return change_page_attr_addr(addr, numpages, prot);
  240. }
  241. EXPORT_UNUSED_SYMBOL(change_page_attr); /* to be removed in 2.6.27 */
  242. /**
  243. * change_page_attr_set - Change page table attributes in the linear mapping.
  244. * @addr: Virtual address in linear mapping.
  245. * @numpages: Number of pages to change
  246. * @prot: Protection/caching type bits to set (PAGE_*)
  247. *
  248. * Returns 0 on success, otherwise a negated errno.
  249. *
  250. * This should be used when a page is mapped with a different caching policy
  251. * than write-back somewhere - some CPUs do not like it when mappings with
  252. * different caching policies exist. This changes the page attributes of the
  253. * in kernel linear mapping too.
  254. *
  255. * Caller must call global_flush_tlb() later to make the changes active.
  256. *
  257. * The caller needs to ensure that there are no conflicting mappings elsewhere
  258. * (e.g. in user space) * This function only deals with the kernel linear map.
  259. *
  260. * This function is different from change_page_attr() in that only selected bits
  261. * are impacted, all other bits remain as is.
  262. */
  263. int change_page_attr_set(unsigned long addr, int numpages, pgprot_t prot)
  264. {
  265. pgprot_t current_prot;
  266. int level;
  267. pte_t *pte;
  268. pte = lookup_address(addr, &level);
  269. if (pte)
  270. current_prot = pte_pgprot(*pte);
  271. else
  272. pgprot_val(current_prot) = 0;
  273. pgprot_val(prot) = pgprot_val(current_prot) | pgprot_val(prot);
  274. return change_page_attr_addr(addr, numpages, prot);
  275. }
  276. /**
  277. * change_page_attr_clear - Change page table attributes in the linear mapping.
  278. * @addr: Virtual address in linear mapping.
  279. * @numpages: Number of pages to change
  280. * @prot: Protection/caching type bits to clear (PAGE_*)
  281. *
  282. * Returns 0 on success, otherwise a negated errno.
  283. *
  284. * This should be used when a page is mapped with a different caching policy
  285. * than write-back somewhere - some CPUs do not like it when mappings with
  286. * different caching policies exist. This changes the page attributes of the
  287. * in kernel linear mapping too.
  288. *
  289. * Caller must call global_flush_tlb() later to make the changes active.
  290. *
  291. * The caller needs to ensure that there are no conflicting mappings elsewhere
  292. * (e.g. in user space) * This function only deals with the kernel linear map.
  293. *
  294. * This function is different from change_page_attr() in that only selected bits
  295. * are impacted, all other bits remain as is.
  296. */
  297. int change_page_attr_clear(unsigned long addr, int numpages, pgprot_t prot)
  298. {
  299. pgprot_t current_prot;
  300. int level;
  301. pte_t *pte;
  302. pte = lookup_address(addr, &level);
  303. if (pte)
  304. current_prot = pte_pgprot(*pte);
  305. else
  306. pgprot_val(current_prot) = 0;
  307. pgprot_val(prot) = pgprot_val(current_prot) & ~pgprot_val(prot);
  308. return change_page_attr_addr(addr, numpages, prot);
  309. }
  310. int set_memory_uc(unsigned long addr, int numpages)
  311. {
  312. pgprot_t uncached;
  313. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  314. return change_page_attr_set(addr, numpages, uncached);
  315. }
  316. EXPORT_SYMBOL(set_memory_uc);
  317. int set_memory_wb(unsigned long addr, int numpages)
  318. {
  319. pgprot_t uncached;
  320. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  321. return change_page_attr_clear(addr, numpages, uncached);
  322. }
  323. EXPORT_SYMBOL(set_memory_wb);
  324. int set_memory_x(unsigned long addr, int numpages)
  325. {
  326. pgprot_t nx;
  327. pgprot_val(nx) = _PAGE_NX;
  328. return change_page_attr_clear(addr, numpages, nx);
  329. }
  330. EXPORT_SYMBOL(set_memory_x);
  331. int set_memory_nx(unsigned long addr, int numpages)
  332. {
  333. pgprot_t nx;
  334. pgprot_val(nx) = _PAGE_NX;
  335. return change_page_attr_set(addr, numpages, nx);
  336. }
  337. EXPORT_SYMBOL(set_memory_nx);
  338. int set_memory_ro(unsigned long addr, int numpages)
  339. {
  340. pgprot_t rw;
  341. pgprot_val(rw) = _PAGE_RW;
  342. return change_page_attr_clear(addr, numpages, rw);
  343. }
  344. EXPORT_SYMBOL(set_memory_ro);
  345. int set_memory_rw(unsigned long addr, int numpages)
  346. {
  347. pgprot_t rw;
  348. pgprot_val(rw) = _PAGE_RW;
  349. return change_page_attr_set(addr, numpages, rw);
  350. }
  351. EXPORT_SYMBOL(set_memory_rw);
  352. int set_pages_uc(struct page *page, int numpages)
  353. {
  354. unsigned long addr = (unsigned long)page_address(page);
  355. pgprot_t uncached;
  356. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  357. return change_page_attr_set(addr, numpages, uncached);
  358. }
  359. EXPORT_SYMBOL(set_pages_uc);
  360. int set_pages_wb(struct page *page, int numpages)
  361. {
  362. unsigned long addr = (unsigned long)page_address(page);
  363. pgprot_t uncached;
  364. pgprot_val(uncached) = _PAGE_PCD | _PAGE_PWT;
  365. return change_page_attr_clear(addr, numpages, uncached);
  366. }
  367. EXPORT_SYMBOL(set_pages_wb);
  368. int set_pages_x(struct page *page, int numpages)
  369. {
  370. unsigned long addr = (unsigned long)page_address(page);
  371. pgprot_t nx;
  372. pgprot_val(nx) = _PAGE_NX;
  373. return change_page_attr_clear(addr, numpages, nx);
  374. }
  375. EXPORT_SYMBOL(set_pages_x);
  376. int set_pages_nx(struct page *page, int numpages)
  377. {
  378. unsigned long addr = (unsigned long)page_address(page);
  379. pgprot_t nx;
  380. pgprot_val(nx) = _PAGE_NX;
  381. return change_page_attr_set(addr, numpages, nx);
  382. }
  383. EXPORT_SYMBOL(set_pages_nx);
  384. int set_pages_ro(struct page *page, int numpages)
  385. {
  386. unsigned long addr = (unsigned long)page_address(page);
  387. pgprot_t rw;
  388. pgprot_val(rw) = _PAGE_RW;
  389. return change_page_attr_clear(addr, numpages, rw);
  390. }
  391. EXPORT_SYMBOL(set_pages_ro);
  392. int set_pages_rw(struct page *page, int numpages)
  393. {
  394. unsigned long addr = (unsigned long)page_address(page);
  395. pgprot_t rw;
  396. pgprot_val(rw) = _PAGE_RW;
  397. return change_page_attr_set(addr, numpages, rw);
  398. }
  399. EXPORT_SYMBOL(set_pages_rw);
  400. void clflush_cache_range(void *addr, int size)
  401. {
  402. int i;
  403. for (i = 0; i < size; i += boot_cpu_data.x86_clflush_size)
  404. clflush(addr+i);
  405. }
  406. static void flush_kernel_map(void *arg)
  407. {
  408. /*
  409. * Flush all to work around Errata in early athlons regarding
  410. * large page flushing.
  411. */
  412. __flush_tlb_all();
  413. if (boot_cpu_data.x86_model >= 4)
  414. wbinvd();
  415. }
  416. void global_flush_tlb(void)
  417. {
  418. BUG_ON(irqs_disabled());
  419. on_each_cpu(flush_kernel_map, NULL, 1, 1);
  420. }
  421. EXPORT_SYMBOL(global_flush_tlb);
  422. #ifdef CONFIG_DEBUG_PAGEALLOC
  423. void kernel_map_pages(struct page *page, int numpages, int enable)
  424. {
  425. if (PageHighMem(page))
  426. return;
  427. if (!enable) {
  428. debug_check_no_locks_freed(page_address(page),
  429. numpages * PAGE_SIZE);
  430. }
  431. /*
  432. * If page allocator is not up yet then do not call c_p_a():
  433. */
  434. if (!debug_pagealloc_enabled)
  435. return;
  436. /*
  437. * The return value is ignored - the calls cannot fail,
  438. * large pages are disabled at boot time:
  439. */
  440. change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
  441. /*
  442. * We should perform an IPI and flush all tlbs,
  443. * but that can deadlock->flush only current cpu:
  444. */
  445. __flush_tlb_all();
  446. }
  447. #endif