pgtable.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3. #include <asm/page.h>
  4. #include <asm/e820.h>
  5. #include <asm/pgtable_types.h>
  6. /*
  7. * Macro to mark a page protection value as UC-
  8. */
  9. #define pgprot_noncached(prot) \
  10. ((boot_cpu_data.x86 > 3) \
  11. ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
  12. : (prot))
  13. #ifndef __ASSEMBLY__
  14. #include <asm/x86_init.h>
  15. /*
  16. * ZERO_PAGE is a global shared page that is always zero: used
  17. * for zero-mapped memory areas etc..
  18. */
  19. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  20. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  21. extern spinlock_t pgd_lock;
  22. extern struct list_head pgd_list;
  23. #ifdef CONFIG_PARAVIRT
  24. #include <asm/paravirt.h>
  25. #else /* !CONFIG_PARAVIRT */
  26. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  27. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  28. #define set_pte_atomic(ptep, pte) \
  29. native_set_pte_atomic(ptep, pte)
  30. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  31. #ifndef __PAGETABLE_PUD_FOLDED
  32. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  33. #define pgd_clear(pgd) native_pgd_clear(pgd)
  34. #endif
  35. #ifndef set_pud
  36. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  37. #endif
  38. #ifndef __PAGETABLE_PMD_FOLDED
  39. #define pud_clear(pud) native_pud_clear(pud)
  40. #endif
  41. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  42. #define pmd_clear(pmd) native_pmd_clear(pmd)
  43. #define pte_update(mm, addr, ptep) do { } while (0)
  44. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  45. #define pgd_val(x) native_pgd_val(x)
  46. #define __pgd(x) native_make_pgd(x)
  47. #ifndef __PAGETABLE_PUD_FOLDED
  48. #define pud_val(x) native_pud_val(x)
  49. #define __pud(x) native_make_pud(x)
  50. #endif
  51. #ifndef __PAGETABLE_PMD_FOLDED
  52. #define pmd_val(x) native_pmd_val(x)
  53. #define __pmd(x) native_make_pmd(x)
  54. #endif
  55. #define pte_val(x) native_pte_val(x)
  56. #define __pte(x) native_make_pte(x)
  57. #define arch_end_context_switch(prev) do {} while(0)
  58. #endif /* CONFIG_PARAVIRT */
  59. /*
  60. * The following only work if pte_present() is true.
  61. * Undefined behaviour if not..
  62. */
  63. static inline int pte_dirty(pte_t pte)
  64. {
  65. return pte_flags(pte) & _PAGE_DIRTY;
  66. }
  67. static inline int pte_young(pte_t pte)
  68. {
  69. return pte_flags(pte) & _PAGE_ACCESSED;
  70. }
  71. static inline int pte_write(pte_t pte)
  72. {
  73. return pte_flags(pte) & _PAGE_RW;
  74. }
  75. static inline int pte_file(pte_t pte)
  76. {
  77. return pte_flags(pte) & _PAGE_FILE;
  78. }
  79. static inline int pte_huge(pte_t pte)
  80. {
  81. return pte_flags(pte) & _PAGE_PSE;
  82. }
  83. static inline int pte_global(pte_t pte)
  84. {
  85. return pte_flags(pte) & _PAGE_GLOBAL;
  86. }
  87. static inline int pte_exec(pte_t pte)
  88. {
  89. return !(pte_flags(pte) & _PAGE_NX);
  90. }
  91. static inline int pte_special(pte_t pte)
  92. {
  93. return pte_flags(pte) & _PAGE_SPECIAL;
  94. }
  95. static inline unsigned long pte_pfn(pte_t pte)
  96. {
  97. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  98. }
  99. static inline unsigned long pmd_pfn(pmd_t pmd)
  100. {
  101. return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
  102. }
  103. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  104. static inline int pmd_large(pmd_t pte)
  105. {
  106. return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  107. (_PAGE_PSE | _PAGE_PRESENT);
  108. }
  109. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  110. {
  111. pteval_t v = native_pte_val(pte);
  112. return native_make_pte(v | set);
  113. }
  114. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  115. {
  116. pteval_t v = native_pte_val(pte);
  117. return native_make_pte(v & ~clear);
  118. }
  119. static inline pte_t pte_mkclean(pte_t pte)
  120. {
  121. return pte_clear_flags(pte, _PAGE_DIRTY);
  122. }
  123. static inline pte_t pte_mkold(pte_t pte)
  124. {
  125. return pte_clear_flags(pte, _PAGE_ACCESSED);
  126. }
  127. static inline pte_t pte_wrprotect(pte_t pte)
  128. {
  129. return pte_clear_flags(pte, _PAGE_RW);
  130. }
  131. static inline pte_t pte_mkexec(pte_t pte)
  132. {
  133. return pte_clear_flags(pte, _PAGE_NX);
  134. }
  135. static inline pte_t pte_mkdirty(pte_t pte)
  136. {
  137. return pte_set_flags(pte, _PAGE_DIRTY);
  138. }
  139. static inline pte_t pte_mkyoung(pte_t pte)
  140. {
  141. return pte_set_flags(pte, _PAGE_ACCESSED);
  142. }
  143. static inline pte_t pte_mkwrite(pte_t pte)
  144. {
  145. return pte_set_flags(pte, _PAGE_RW);
  146. }
  147. static inline pte_t pte_mkhuge(pte_t pte)
  148. {
  149. return pte_set_flags(pte, _PAGE_PSE);
  150. }
  151. static inline pte_t pte_clrhuge(pte_t pte)
  152. {
  153. return pte_clear_flags(pte, _PAGE_PSE);
  154. }
  155. static inline pte_t pte_mkglobal(pte_t pte)
  156. {
  157. return pte_set_flags(pte, _PAGE_GLOBAL);
  158. }
  159. static inline pte_t pte_clrglobal(pte_t pte)
  160. {
  161. return pte_clear_flags(pte, _PAGE_GLOBAL);
  162. }
  163. static inline pte_t pte_mkspecial(pte_t pte)
  164. {
  165. return pte_set_flags(pte, _PAGE_SPECIAL);
  166. }
  167. /*
  168. * Mask out unsupported bits in a present pgprot. Non-present pgprots
  169. * can use those bits for other purposes, so leave them be.
  170. */
  171. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  172. {
  173. pgprotval_t protval = pgprot_val(pgprot);
  174. if (protval & _PAGE_PRESENT)
  175. protval &= __supported_pte_mask;
  176. return protval;
  177. }
  178. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  179. {
  180. return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  181. massage_pgprot(pgprot));
  182. }
  183. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  184. {
  185. return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  186. massage_pgprot(pgprot));
  187. }
  188. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  189. {
  190. pteval_t val = pte_val(pte);
  191. /*
  192. * Chop off the NX bit (if present), and add the NX portion of
  193. * the newprot (if present):
  194. */
  195. val &= _PAGE_CHG_MASK;
  196. val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  197. return __pte(val);
  198. }
  199. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  200. #define pgprot_modify pgprot_modify
  201. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  202. {
  203. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  204. pgprotval_t addbits = pgprot_val(newprot);
  205. return __pgprot(preservebits | addbits);
  206. }
  207. #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
  208. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  209. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  210. unsigned long flags,
  211. unsigned long new_flags)
  212. {
  213. /*
  214. * PAT type is always WB for untracked ranges, so no need to check.
  215. */
  216. if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
  217. return 1;
  218. /*
  219. * Certain new memtypes are not allowed with certain
  220. * requested memtype:
  221. * - request is uncached, return cannot be write-back
  222. * - request is write-combine, return cannot be write-back
  223. */
  224. if ((flags == _PAGE_CACHE_UC_MINUS &&
  225. new_flags == _PAGE_CACHE_WB) ||
  226. (flags == _PAGE_CACHE_WC &&
  227. new_flags == _PAGE_CACHE_WB)) {
  228. return 0;
  229. }
  230. return 1;
  231. }
  232. pmd_t *populate_extra_pmd(unsigned long vaddr);
  233. pte_t *populate_extra_pte(unsigned long vaddr);
  234. #endif /* __ASSEMBLY__ */
  235. #ifdef CONFIG_X86_32
  236. # include "pgtable_32.h"
  237. #else
  238. # include "pgtable_64.h"
  239. #endif
  240. #ifndef __ASSEMBLY__
  241. #include <linux/mm_types.h>
  242. static inline int pte_none(pte_t pte)
  243. {
  244. return !pte.pte;
  245. }
  246. #define __HAVE_ARCH_PTE_SAME
  247. static inline int pte_same(pte_t a, pte_t b)
  248. {
  249. return a.pte == b.pte;
  250. }
  251. static inline int pte_present(pte_t a)
  252. {
  253. return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
  254. }
  255. static inline int pte_hidden(pte_t pte)
  256. {
  257. return pte_flags(pte) & _PAGE_HIDDEN;
  258. }
  259. static inline int pmd_present(pmd_t pmd)
  260. {
  261. return pmd_flags(pmd) & _PAGE_PRESENT;
  262. }
  263. static inline int pmd_none(pmd_t pmd)
  264. {
  265. /* Only check low word on 32-bit platforms, since it might be
  266. out of sync with upper half. */
  267. return (unsigned long)native_pmd_val(pmd) == 0;
  268. }
  269. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  270. {
  271. return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
  272. }
  273. /*
  274. * Currently stuck as a macro due to indirect forward reference to
  275. * linux/mmzone.h's __section_mem_map_addr() definition:
  276. */
  277. #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
  278. /*
  279. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  280. *
  281. * this macro returns the index of the entry in the pmd page which would
  282. * control the given virtual address
  283. */
  284. static inline unsigned long pmd_index(unsigned long address)
  285. {
  286. return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  287. }
  288. /*
  289. * Conversion functions: convert a page and protection to a page entry,
  290. * and a page entry and page directory to the page they refer to.
  291. *
  292. * (Currently stuck as a macro because of indirect forward reference
  293. * to linux/mm.h:page_to_nid())
  294. */
  295. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  296. /*
  297. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  298. *
  299. * this function returns the index of the entry in the pte page which would
  300. * control the given virtual address
  301. */
  302. static inline unsigned long pte_index(unsigned long address)
  303. {
  304. return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  305. }
  306. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  307. {
  308. return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  309. }
  310. static inline int pmd_bad(pmd_t pmd)
  311. {
  312. return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  313. }
  314. static inline unsigned long pages_to_mb(unsigned long npg)
  315. {
  316. return npg >> (20 - PAGE_SHIFT);
  317. }
  318. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  319. remap_pfn_range(vma, vaddr, pfn, size, prot)
  320. #if PAGETABLE_LEVELS > 2
  321. static inline int pud_none(pud_t pud)
  322. {
  323. return native_pud_val(pud) == 0;
  324. }
  325. static inline int pud_present(pud_t pud)
  326. {
  327. return pud_flags(pud) & _PAGE_PRESENT;
  328. }
  329. static inline unsigned long pud_page_vaddr(pud_t pud)
  330. {
  331. return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
  332. }
  333. /*
  334. * Currently stuck as a macro due to indirect forward reference to
  335. * linux/mmzone.h's __section_mem_map_addr() definition:
  336. */
  337. #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
  338. /* Find an entry in the second-level page table.. */
  339. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  340. {
  341. return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  342. }
  343. static inline int pud_large(pud_t pud)
  344. {
  345. return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  346. (_PAGE_PSE | _PAGE_PRESENT);
  347. }
  348. static inline int pud_bad(pud_t pud)
  349. {
  350. return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  351. }
  352. #else
  353. static inline int pud_large(pud_t pud)
  354. {
  355. return 0;
  356. }
  357. #endif /* PAGETABLE_LEVELS > 2 */
  358. #if PAGETABLE_LEVELS > 3
  359. static inline int pgd_present(pgd_t pgd)
  360. {
  361. return pgd_flags(pgd) & _PAGE_PRESENT;
  362. }
  363. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  364. {
  365. return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  366. }
  367. /*
  368. * Currently stuck as a macro due to indirect forward reference to
  369. * linux/mmzone.h's __section_mem_map_addr() definition:
  370. */
  371. #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
  372. /* to find an entry in a page-table-directory. */
  373. static inline unsigned long pud_index(unsigned long address)
  374. {
  375. return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  376. }
  377. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  378. {
  379. return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  380. }
  381. static inline int pgd_bad(pgd_t pgd)
  382. {
  383. return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  384. }
  385. static inline int pgd_none(pgd_t pgd)
  386. {
  387. return !native_pgd_val(pgd);
  388. }
  389. #endif /* PAGETABLE_LEVELS > 3 */
  390. #endif /* __ASSEMBLY__ */
  391. /*
  392. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  393. *
  394. * this macro returns the index of the entry in the pgd page which would
  395. * control the given virtual address
  396. */
  397. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  398. /*
  399. * pgd_offset() returns a (pgd_t *)
  400. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  401. */
  402. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  403. /*
  404. * a shortcut which implies the use of the kernel's pgd, instead
  405. * of a process's
  406. */
  407. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  408. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  409. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  410. #ifndef __ASSEMBLY__
  411. extern int direct_gbpages;
  412. /* local pte updates need not use xchg for locking */
  413. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  414. {
  415. pte_t res = *ptep;
  416. /* Pure native function needs no input for mm, addr */
  417. native_pte_clear(NULL, 0, ptep);
  418. return res;
  419. }
  420. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  421. pte_t *ptep , pte_t pte)
  422. {
  423. native_set_pte(ptep, pte);
  424. }
  425. #ifndef CONFIG_PARAVIRT
  426. /*
  427. * Rules for using pte_update - it must be called after any PTE update which
  428. * has not been done using the set_pte / clear_pte interfaces. It is used by
  429. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  430. * updates should either be sets, clears, or set_pte_atomic for P->P
  431. * transitions, which means this hook should only be called for user PTEs.
  432. * This hook implies a P->P protection or access change has taken place, which
  433. * requires a subsequent TLB flush. The notification can optionally be delayed
  434. * until the TLB flush event by using the pte_update_defer form of the
  435. * interface, but care must be taken to assure that the flush happens while
  436. * still holding the same page table lock so that the shadow and primary pages
  437. * do not become out of sync on SMP.
  438. */
  439. #define pte_update(mm, addr, ptep) do { } while (0)
  440. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  441. #endif
  442. /*
  443. * We only update the dirty/accessed state if we set
  444. * the dirty bit by hand in the kernel, since the hardware
  445. * will do the accessed bit for us, and we don't want to
  446. * race with other CPU's that might be updating the dirty
  447. * bit at the same time.
  448. */
  449. struct vm_area_struct;
  450. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  451. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  452. unsigned long address, pte_t *ptep,
  453. pte_t entry, int dirty);
  454. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  455. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  456. unsigned long addr, pte_t *ptep);
  457. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  458. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  459. unsigned long address, pte_t *ptep);
  460. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  461. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  462. pte_t *ptep)
  463. {
  464. pte_t pte = native_ptep_get_and_clear(ptep);
  465. pte_update(mm, addr, ptep);
  466. return pte;
  467. }
  468. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  469. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  470. unsigned long addr, pte_t *ptep,
  471. int full)
  472. {
  473. pte_t pte;
  474. if (full) {
  475. /*
  476. * Full address destruction in progress; paravirt does not
  477. * care about updates and native needs no locking
  478. */
  479. pte = native_local_ptep_get_and_clear(ptep);
  480. } else {
  481. pte = ptep_get_and_clear(mm, addr, ptep);
  482. }
  483. return pte;
  484. }
  485. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  486. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  487. unsigned long addr, pte_t *ptep)
  488. {
  489. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  490. pte_update(mm, addr, ptep);
  491. }
  492. /*
  493. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  494. *
  495. * dst - pointer to pgd range anwhere on a pgd page
  496. * src - ""
  497. * count - the number of pgds to copy.
  498. *
  499. * dst and src can be on the same page, but the range must not overlap,
  500. * and must not cross a page boundary.
  501. */
  502. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  503. {
  504. memcpy(dst, src, count * sizeof(pgd_t));
  505. }
  506. #include <asm-generic/pgtable.h>
  507. #endif /* __ASSEMBLY__ */
  508. #endif /* _ASM_X86_PGTABLE_H */