pgtable.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633
  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3. #include <asm/page.h>
  4. #include <asm/e820.h>
  5. #include <asm/pgtable_types.h>
  6. /*
  7. * Macro to mark a page protection value as UC-
  8. */
  9. #define pgprot_noncached(prot) \
  10. ((boot_cpu_data.x86 > 3) \
  11. ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
  12. : (prot))
  13. #ifndef __ASSEMBLY__
  14. /*
  15. * ZERO_PAGE is a global shared page that is always zero: used
  16. * for zero-mapped memory areas etc..
  17. */
  18. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  19. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  20. extern spinlock_t pgd_lock;
  21. extern struct list_head pgd_list;
  22. #ifdef CONFIG_PARAVIRT
  23. #include <asm/paravirt.h>
  24. #else /* !CONFIG_PARAVIRT */
  25. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  26. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  27. #define set_pte_atomic(ptep, pte) \
  28. native_set_pte_atomic(ptep, pte)
  29. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  30. #ifndef __PAGETABLE_PUD_FOLDED
  31. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  32. #define pgd_clear(pgd) native_pgd_clear(pgd)
  33. #endif
  34. #ifndef set_pud
  35. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  36. #endif
  37. #ifndef __PAGETABLE_PMD_FOLDED
  38. #define pud_clear(pud) native_pud_clear(pud)
  39. #endif
  40. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  41. #define pmd_clear(pmd) native_pmd_clear(pmd)
  42. #define pte_update(mm, addr, ptep) do { } while (0)
  43. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  44. static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
  45. {
  46. native_pagetable_setup_start(base);
  47. }
  48. static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
  49. {
  50. native_pagetable_setup_done(base);
  51. }
  52. #define pgd_val(x) native_pgd_val(x)
  53. #define __pgd(x) native_make_pgd(x)
  54. #ifndef __PAGETABLE_PUD_FOLDED
  55. #define pud_val(x) native_pud_val(x)
  56. #define __pud(x) native_make_pud(x)
  57. #endif
  58. #ifndef __PAGETABLE_PMD_FOLDED
  59. #define pmd_val(x) native_pmd_val(x)
  60. #define __pmd(x) native_make_pmd(x)
  61. #endif
  62. #define pte_val(x) native_pte_val(x)
  63. #define __pte(x) native_make_pte(x)
  64. #define arch_end_context_switch(prev) do {} while(0)
  65. #endif /* CONFIG_PARAVIRT */
  66. /*
  67. * The following only work if pte_present() is true.
  68. * Undefined behaviour if not..
  69. */
  70. static inline int pte_dirty(pte_t pte)
  71. {
  72. return pte_flags(pte) & _PAGE_DIRTY;
  73. }
  74. static inline int pte_young(pte_t pte)
  75. {
  76. return pte_flags(pte) & _PAGE_ACCESSED;
  77. }
  78. static inline int pte_write(pte_t pte)
  79. {
  80. return pte_flags(pte) & _PAGE_RW;
  81. }
  82. static inline int pte_file(pte_t pte)
  83. {
  84. return pte_flags(pte) & _PAGE_FILE;
  85. }
  86. static inline int pte_huge(pte_t pte)
  87. {
  88. return pte_flags(pte) & _PAGE_PSE;
  89. }
  90. static inline int pte_global(pte_t pte)
  91. {
  92. return pte_flags(pte) & _PAGE_GLOBAL;
  93. }
  94. static inline int pte_exec(pte_t pte)
  95. {
  96. return !(pte_flags(pte) & _PAGE_NX);
  97. }
  98. static inline int pte_special(pte_t pte)
  99. {
  100. return pte_flags(pte) & _PAGE_SPECIAL;
  101. }
  102. static inline unsigned long pte_pfn(pte_t pte)
  103. {
  104. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  105. }
  106. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  107. static inline int pmd_large(pmd_t pte)
  108. {
  109. return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  110. (_PAGE_PSE | _PAGE_PRESENT);
  111. }
  112. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  113. {
  114. pteval_t v = native_pte_val(pte);
  115. return native_make_pte(v | set);
  116. }
  117. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  118. {
  119. pteval_t v = native_pte_val(pte);
  120. return native_make_pte(v & ~clear);
  121. }
  122. static inline pte_t pte_mkclean(pte_t pte)
  123. {
  124. return pte_clear_flags(pte, _PAGE_DIRTY);
  125. }
  126. static inline pte_t pte_mkold(pte_t pte)
  127. {
  128. return pte_clear_flags(pte, _PAGE_ACCESSED);
  129. }
  130. static inline pte_t pte_wrprotect(pte_t pte)
  131. {
  132. return pte_clear_flags(pte, _PAGE_RW);
  133. }
  134. static inline pte_t pte_mkexec(pte_t pte)
  135. {
  136. return pte_clear_flags(pte, _PAGE_NX);
  137. }
  138. static inline pte_t pte_mkdirty(pte_t pte)
  139. {
  140. return pte_set_flags(pte, _PAGE_DIRTY);
  141. }
  142. static inline pte_t pte_mkyoung(pte_t pte)
  143. {
  144. return pte_set_flags(pte, _PAGE_ACCESSED);
  145. }
  146. static inline pte_t pte_mkwrite(pte_t pte)
  147. {
  148. return pte_set_flags(pte, _PAGE_RW);
  149. }
  150. static inline pte_t pte_mkhuge(pte_t pte)
  151. {
  152. return pte_set_flags(pte, _PAGE_PSE);
  153. }
  154. static inline pte_t pte_clrhuge(pte_t pte)
  155. {
  156. return pte_clear_flags(pte, _PAGE_PSE);
  157. }
  158. static inline pte_t pte_mkglobal(pte_t pte)
  159. {
  160. return pte_set_flags(pte, _PAGE_GLOBAL);
  161. }
  162. static inline pte_t pte_clrglobal(pte_t pte)
  163. {
  164. return pte_clear_flags(pte, _PAGE_GLOBAL);
  165. }
  166. static inline pte_t pte_mkspecial(pte_t pte)
  167. {
  168. return pte_set_flags(pte, _PAGE_SPECIAL);
  169. }
  170. /*
  171. * Mask out unsupported bits in a present pgprot. Non-present pgprots
  172. * can use those bits for other purposes, so leave them be.
  173. */
  174. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  175. {
  176. pgprotval_t protval = pgprot_val(pgprot);
  177. if (protval & _PAGE_PRESENT)
  178. protval &= __supported_pte_mask;
  179. return protval;
  180. }
  181. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  182. {
  183. return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  184. massage_pgprot(pgprot));
  185. }
  186. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  187. {
  188. return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  189. massage_pgprot(pgprot));
  190. }
  191. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  192. {
  193. pteval_t val = pte_val(pte);
  194. /*
  195. * Chop off the NX bit (if present), and add the NX portion of
  196. * the newprot (if present):
  197. */
  198. val &= _PAGE_CHG_MASK;
  199. val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  200. return __pte(val);
  201. }
  202. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  203. #define pgprot_modify pgprot_modify
  204. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  205. {
  206. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  207. pgprotval_t addbits = pgprot_val(newprot);
  208. return __pgprot(preservebits | addbits);
  209. }
  210. #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
  211. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  212. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  213. unsigned long flags,
  214. unsigned long new_flags)
  215. {
  216. /*
  217. * PAT type is always WB for ISA. So no need to check.
  218. */
  219. if (is_ISA_range(paddr, paddr + size - 1))
  220. return 1;
  221. /*
  222. * Certain new memtypes are not allowed with certain
  223. * requested memtype:
  224. * - request is uncached, return cannot be write-back
  225. * - request is write-combine, return cannot be write-back
  226. */
  227. if ((flags == _PAGE_CACHE_UC_MINUS &&
  228. new_flags == _PAGE_CACHE_WB) ||
  229. (flags == _PAGE_CACHE_WC &&
  230. new_flags == _PAGE_CACHE_WB)) {
  231. return 0;
  232. }
  233. return 1;
  234. }
  235. pmd_t *populate_extra_pmd(unsigned long vaddr);
  236. pte_t *populate_extra_pte(unsigned long vaddr);
  237. #endif /* __ASSEMBLY__ */
  238. #ifdef CONFIG_X86_32
  239. # include "pgtable_32.h"
  240. #else
  241. # include "pgtable_64.h"
  242. #endif
  243. #ifndef __ASSEMBLY__
  244. #include <linux/mm_types.h>
  245. static inline int pte_none(pte_t pte)
  246. {
  247. return !pte.pte;
  248. }
  249. #define __HAVE_ARCH_PTE_SAME
  250. static inline int pte_same(pte_t a, pte_t b)
  251. {
  252. return a.pte == b.pte;
  253. }
  254. static inline int pte_present(pte_t a)
  255. {
  256. return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
  257. }
  258. static inline int pte_hidden(pte_t pte)
  259. {
  260. return pte_flags(pte) & _PAGE_HIDDEN;
  261. }
  262. static inline int pmd_present(pmd_t pmd)
  263. {
  264. return pmd_flags(pmd) & _PAGE_PRESENT;
  265. }
  266. static inline int pmd_none(pmd_t pmd)
  267. {
  268. /* Only check low word on 32-bit platforms, since it might be
  269. out of sync with upper half. */
  270. return (unsigned long)native_pmd_val(pmd) == 0;
  271. }
  272. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  273. {
  274. return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
  275. }
  276. /*
  277. * Currently stuck as a macro due to indirect forward reference to
  278. * linux/mmzone.h's __section_mem_map_addr() definition:
  279. */
  280. #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
  281. /*
  282. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  283. *
  284. * this macro returns the index of the entry in the pmd page which would
  285. * control the given virtual address
  286. */
  287. static inline unsigned pmd_index(unsigned long address)
  288. {
  289. return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  290. }
  291. /*
  292. * Conversion functions: convert a page and protection to a page entry,
  293. * and a page entry and page directory to the page they refer to.
  294. *
  295. * (Currently stuck as a macro because of indirect forward reference
  296. * to linux/mm.h:page_to_nid())
  297. */
  298. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  299. /*
  300. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  301. *
  302. * this function returns the index of the entry in the pte page which would
  303. * control the given virtual address
  304. */
  305. static inline unsigned pte_index(unsigned long address)
  306. {
  307. return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  308. }
  309. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  310. {
  311. return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  312. }
  313. static inline int pmd_bad(pmd_t pmd)
  314. {
  315. return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  316. }
  317. static inline unsigned long pages_to_mb(unsigned long npg)
  318. {
  319. return npg >> (20 - PAGE_SHIFT);
  320. }
  321. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  322. remap_pfn_range(vma, vaddr, pfn, size, prot)
  323. #if PAGETABLE_LEVELS > 2
  324. static inline int pud_none(pud_t pud)
  325. {
  326. return native_pud_val(pud) == 0;
  327. }
  328. static inline int pud_present(pud_t pud)
  329. {
  330. return pud_flags(pud) & _PAGE_PRESENT;
  331. }
  332. static inline unsigned long pud_page_vaddr(pud_t pud)
  333. {
  334. return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
  335. }
  336. /*
  337. * Currently stuck as a macro due to indirect forward reference to
  338. * linux/mmzone.h's __section_mem_map_addr() definition:
  339. */
  340. #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
  341. /* Find an entry in the second-level page table.. */
  342. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  343. {
  344. return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  345. }
  346. static inline unsigned long pmd_pfn(pmd_t pmd)
  347. {
  348. return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
  349. }
  350. static inline int pud_large(pud_t pud)
  351. {
  352. return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  353. (_PAGE_PSE | _PAGE_PRESENT);
  354. }
  355. static inline int pud_bad(pud_t pud)
  356. {
  357. return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  358. }
  359. #else
  360. static inline int pud_large(pud_t pud)
  361. {
  362. return 0;
  363. }
  364. #endif /* PAGETABLE_LEVELS > 2 */
  365. #if PAGETABLE_LEVELS > 3
  366. static inline int pgd_present(pgd_t pgd)
  367. {
  368. return pgd_flags(pgd) & _PAGE_PRESENT;
  369. }
  370. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  371. {
  372. return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  373. }
  374. /*
  375. * Currently stuck as a macro due to indirect forward reference to
  376. * linux/mmzone.h's __section_mem_map_addr() definition:
  377. */
  378. #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
  379. /* to find an entry in a page-table-directory. */
  380. static inline unsigned pud_index(unsigned long address)
  381. {
  382. return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  383. }
  384. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  385. {
  386. return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  387. }
  388. static inline int pgd_bad(pgd_t pgd)
  389. {
  390. return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  391. }
  392. static inline int pgd_none(pgd_t pgd)
  393. {
  394. return !native_pgd_val(pgd);
  395. }
  396. #endif /* PAGETABLE_LEVELS > 3 */
  397. #endif /* __ASSEMBLY__ */
  398. /*
  399. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  400. *
  401. * this macro returns the index of the entry in the pgd page which would
  402. * control the given virtual address
  403. */
  404. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  405. /*
  406. * pgd_offset() returns a (pgd_t *)
  407. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  408. */
  409. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  410. /*
  411. * a shortcut which implies the use of the kernel's pgd, instead
  412. * of a process's
  413. */
  414. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  415. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  416. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  417. #ifndef __ASSEMBLY__
  418. extern int direct_gbpages;
  419. /* local pte updates need not use xchg for locking */
  420. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  421. {
  422. pte_t res = *ptep;
  423. /* Pure native function needs no input for mm, addr */
  424. native_pte_clear(NULL, 0, ptep);
  425. return res;
  426. }
  427. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  428. pte_t *ptep , pte_t pte)
  429. {
  430. native_set_pte(ptep, pte);
  431. }
  432. #ifndef CONFIG_PARAVIRT
  433. /*
  434. * Rules for using pte_update - it must be called after any PTE update which
  435. * has not been done using the set_pte / clear_pte interfaces. It is used by
  436. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  437. * updates should either be sets, clears, or set_pte_atomic for P->P
  438. * transitions, which means this hook should only be called for user PTEs.
  439. * This hook implies a P->P protection or access change has taken place, which
  440. * requires a subsequent TLB flush. The notification can optionally be delayed
  441. * until the TLB flush event by using the pte_update_defer form of the
  442. * interface, but care must be taken to assure that the flush happens while
  443. * still holding the same page table lock so that the shadow and primary pages
  444. * do not become out of sync on SMP.
  445. */
  446. #define pte_update(mm, addr, ptep) do { } while (0)
  447. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  448. #endif
  449. /*
  450. * We only update the dirty/accessed state if we set
  451. * the dirty bit by hand in the kernel, since the hardware
  452. * will do the accessed bit for us, and we don't want to
  453. * race with other CPU's that might be updating the dirty
  454. * bit at the same time.
  455. */
  456. struct vm_area_struct;
  457. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  458. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  459. unsigned long address, pte_t *ptep,
  460. pte_t entry, int dirty);
  461. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  462. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  463. unsigned long addr, pte_t *ptep);
  464. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  465. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  466. unsigned long address, pte_t *ptep);
  467. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  468. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  469. pte_t *ptep)
  470. {
  471. pte_t pte = native_ptep_get_and_clear(ptep);
  472. pte_update(mm, addr, ptep);
  473. return pte;
  474. }
  475. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  476. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  477. unsigned long addr, pte_t *ptep,
  478. int full)
  479. {
  480. pte_t pte;
  481. if (full) {
  482. /*
  483. * Full address destruction in progress; paravirt does not
  484. * care about updates and native needs no locking
  485. */
  486. pte = native_local_ptep_get_and_clear(ptep);
  487. } else {
  488. pte = ptep_get_and_clear(mm, addr, ptep);
  489. }
  490. return pte;
  491. }
  492. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  493. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  494. unsigned long addr, pte_t *ptep)
  495. {
  496. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  497. pte_update(mm, addr, ptep);
  498. }
  499. /*
  500. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  501. *
  502. * dst - pointer to pgd range anwhere on a pgd page
  503. * src - ""
  504. * count - the number of pgds to copy.
  505. *
  506. * dst and src can be on the same page, but the range must not overlap,
  507. * and must not cross a page boundary.
  508. */
  509. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  510. {
  511. memcpy(dst, src, count * sizeof(pgd_t));
  512. }
  513. #include <asm-generic/pgtable.h>
  514. #endif /* __ASSEMBLY__ */
  515. #endif /* _ASM_X86_PGTABLE_H */