pgtable.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840
  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3. #include <asm/page.h>
  4. #include <asm/e820.h>
  5. #include <asm/pgtable_types.h>
  6. /*
  7. * Macro to mark a page protection value as UC-
  8. */
  9. #define pgprot_noncached(prot) \
  10. ((boot_cpu_data.x86 > 3) \
  11. ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
  12. : (prot))
  13. #ifndef __ASSEMBLY__
  14. #include <asm/x86_init.h>
  15. /*
  16. * ZERO_PAGE is a global shared page that is always zero: used
  17. * for zero-mapped memory areas etc..
  18. */
  19. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
  20. __visible;
  21. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  22. extern spinlock_t pgd_lock;
  23. extern struct list_head pgd_list;
  24. extern struct mm_struct *pgd_page_get_mm(struct page *page);
  25. #ifdef CONFIG_PARAVIRT
  26. #include <asm/paravirt.h>
  27. #else /* !CONFIG_PARAVIRT */
  28. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  29. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  30. #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  31. #define set_pte_atomic(ptep, pte) \
  32. native_set_pte_atomic(ptep, pte)
  33. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  34. #ifndef __PAGETABLE_PUD_FOLDED
  35. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  36. #define pgd_clear(pgd) native_pgd_clear(pgd)
  37. #endif
  38. #ifndef set_pud
  39. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  40. #endif
  41. #ifndef __PAGETABLE_PMD_FOLDED
  42. #define pud_clear(pud) native_pud_clear(pud)
  43. #endif
  44. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  45. #define pmd_clear(pmd) native_pmd_clear(pmd)
  46. #define pte_update(mm, addr, ptep) do { } while (0)
  47. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  48. #define pmd_update(mm, addr, ptep) do { } while (0)
  49. #define pmd_update_defer(mm, addr, ptep) do { } while (0)
  50. #define pgd_val(x) native_pgd_val(x)
  51. #define __pgd(x) native_make_pgd(x)
  52. #ifndef __PAGETABLE_PUD_FOLDED
  53. #define pud_val(x) native_pud_val(x)
  54. #define __pud(x) native_make_pud(x)
  55. #endif
  56. #ifndef __PAGETABLE_PMD_FOLDED
  57. #define pmd_val(x) native_pmd_val(x)
  58. #define __pmd(x) native_make_pmd(x)
  59. #endif
  60. #define pte_val(x) native_pte_val(x)
  61. #define __pte(x) native_make_pte(x)
  62. #define arch_end_context_switch(prev) do {} while(0)
  63. #endif /* CONFIG_PARAVIRT */
  64. /*
  65. * The following only work if pte_present() is true.
  66. * Undefined behaviour if not..
  67. */
  68. static inline int pte_dirty(pte_t pte)
  69. {
  70. return pte_flags(pte) & _PAGE_DIRTY;
  71. }
  72. static inline int pte_young(pte_t pte)
  73. {
  74. return pte_flags(pte) & _PAGE_ACCESSED;
  75. }
  76. static inline int pmd_young(pmd_t pmd)
  77. {
  78. return pmd_flags(pmd) & _PAGE_ACCESSED;
  79. }
  80. static inline int pte_write(pte_t pte)
  81. {
  82. return pte_flags(pte) & _PAGE_RW;
  83. }
  84. static inline int pte_file(pte_t pte)
  85. {
  86. return pte_flags(pte) & _PAGE_FILE;
  87. }
  88. static inline int pte_huge(pte_t pte)
  89. {
  90. return pte_flags(pte) & _PAGE_PSE;
  91. }
  92. static inline int pte_global(pte_t pte)
  93. {
  94. return pte_flags(pte) & _PAGE_GLOBAL;
  95. }
  96. static inline int pte_exec(pte_t pte)
  97. {
  98. return !(pte_flags(pte) & _PAGE_NX);
  99. }
  100. static inline int pte_special(pte_t pte)
  101. {
  102. return pte_flags(pte) & _PAGE_SPECIAL;
  103. }
  104. static inline unsigned long pte_pfn(pte_t pte)
  105. {
  106. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  107. }
  108. static inline unsigned long pmd_pfn(pmd_t pmd)
  109. {
  110. return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
  111. }
  112. static inline unsigned long pud_pfn(pud_t pud)
  113. {
  114. return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
  115. }
  116. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  117. static inline int pmd_large(pmd_t pte)
  118. {
  119. return pmd_flags(pte) & _PAGE_PSE;
  120. }
  121. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  122. static inline int pmd_trans_splitting(pmd_t pmd)
  123. {
  124. return pmd_val(pmd) & _PAGE_SPLITTING;
  125. }
  126. static inline int pmd_trans_huge(pmd_t pmd)
  127. {
  128. return pmd_val(pmd) & _PAGE_PSE;
  129. }
  130. static inline int has_transparent_hugepage(void)
  131. {
  132. return cpu_has_pse;
  133. }
  134. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  135. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  136. {
  137. pteval_t v = native_pte_val(pte);
  138. return native_make_pte(v | set);
  139. }
  140. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  141. {
  142. pteval_t v = native_pte_val(pte);
  143. return native_make_pte(v & ~clear);
  144. }
  145. static inline pte_t pte_mkclean(pte_t pte)
  146. {
  147. return pte_clear_flags(pte, _PAGE_DIRTY);
  148. }
  149. static inline pte_t pte_mkold(pte_t pte)
  150. {
  151. return pte_clear_flags(pte, _PAGE_ACCESSED);
  152. }
  153. static inline pte_t pte_wrprotect(pte_t pte)
  154. {
  155. return pte_clear_flags(pte, _PAGE_RW);
  156. }
  157. static inline pte_t pte_mkexec(pte_t pte)
  158. {
  159. return pte_clear_flags(pte, _PAGE_NX);
  160. }
  161. static inline pte_t pte_mkdirty(pte_t pte)
  162. {
  163. return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  164. }
  165. static inline pte_t pte_mkyoung(pte_t pte)
  166. {
  167. return pte_set_flags(pte, _PAGE_ACCESSED);
  168. }
  169. static inline pte_t pte_mkwrite(pte_t pte)
  170. {
  171. return pte_set_flags(pte, _PAGE_RW);
  172. }
  173. static inline pte_t pte_mkhuge(pte_t pte)
  174. {
  175. return pte_set_flags(pte, _PAGE_PSE);
  176. }
  177. static inline pte_t pte_clrhuge(pte_t pte)
  178. {
  179. return pte_clear_flags(pte, _PAGE_PSE);
  180. }
  181. static inline pte_t pte_mkglobal(pte_t pte)
  182. {
  183. return pte_set_flags(pte, _PAGE_GLOBAL);
  184. }
  185. static inline pte_t pte_clrglobal(pte_t pte)
  186. {
  187. return pte_clear_flags(pte, _PAGE_GLOBAL);
  188. }
  189. static inline pte_t pte_mkspecial(pte_t pte)
  190. {
  191. return pte_set_flags(pte, _PAGE_SPECIAL);
  192. }
  193. static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
  194. {
  195. pmdval_t v = native_pmd_val(pmd);
  196. return __pmd(v | set);
  197. }
  198. static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
  199. {
  200. pmdval_t v = native_pmd_val(pmd);
  201. return __pmd(v & ~clear);
  202. }
  203. static inline pmd_t pmd_mkold(pmd_t pmd)
  204. {
  205. return pmd_clear_flags(pmd, _PAGE_ACCESSED);
  206. }
  207. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  208. {
  209. return pmd_clear_flags(pmd, _PAGE_RW);
  210. }
  211. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  212. {
  213. return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  214. }
  215. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  216. {
  217. return pmd_set_flags(pmd, _PAGE_PSE);
  218. }
  219. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  220. {
  221. return pmd_set_flags(pmd, _PAGE_ACCESSED);
  222. }
  223. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  224. {
  225. return pmd_set_flags(pmd, _PAGE_RW);
  226. }
  227. static inline pmd_t pmd_mknotpresent(pmd_t pmd)
  228. {
  229. return pmd_clear_flags(pmd, _PAGE_PRESENT);
  230. }
  231. static inline int pte_soft_dirty(pte_t pte)
  232. {
  233. return pte_flags(pte) & _PAGE_SOFT_DIRTY;
  234. }
  235. static inline int pmd_soft_dirty(pmd_t pmd)
  236. {
  237. return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
  238. }
  239. static inline pte_t pte_mksoft_dirty(pte_t pte)
  240. {
  241. return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
  242. }
  243. static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  244. {
  245. return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
  246. }
  247. /*
  248. * Mask out unsupported bits in a present pgprot. Non-present pgprots
  249. * can use those bits for other purposes, so leave them be.
  250. */
  251. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  252. {
  253. pgprotval_t protval = pgprot_val(pgprot);
  254. if (protval & _PAGE_PRESENT)
  255. protval &= __supported_pte_mask;
  256. return protval;
  257. }
  258. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  259. {
  260. return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  261. massage_pgprot(pgprot));
  262. }
  263. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  264. {
  265. return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  266. massage_pgprot(pgprot));
  267. }
  268. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  269. {
  270. pteval_t val = pte_val(pte);
  271. /*
  272. * Chop off the NX bit (if present), and add the NX portion of
  273. * the newprot (if present):
  274. */
  275. val &= _PAGE_CHG_MASK;
  276. val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  277. return __pte(val);
  278. }
  279. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  280. {
  281. pmdval_t val = pmd_val(pmd);
  282. val &= _HPAGE_CHG_MASK;
  283. val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
  284. return __pmd(val);
  285. }
  286. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  287. #define pgprot_modify pgprot_modify
  288. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  289. {
  290. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  291. pgprotval_t addbits = pgprot_val(newprot);
  292. return __pgprot(preservebits | addbits);
  293. }
  294. #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
  295. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  296. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  297. unsigned long flags,
  298. unsigned long new_flags)
  299. {
  300. /*
  301. * PAT type is always WB for untracked ranges, so no need to check.
  302. */
  303. if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
  304. return 1;
  305. /*
  306. * Certain new memtypes are not allowed with certain
  307. * requested memtype:
  308. * - request is uncached, return cannot be write-back
  309. * - request is write-combine, return cannot be write-back
  310. */
  311. if ((flags == _PAGE_CACHE_UC_MINUS &&
  312. new_flags == _PAGE_CACHE_WB) ||
  313. (flags == _PAGE_CACHE_WC &&
  314. new_flags == _PAGE_CACHE_WB)) {
  315. return 0;
  316. }
  317. return 1;
  318. }
  319. pmd_t *populate_extra_pmd(unsigned long vaddr);
  320. pte_t *populate_extra_pte(unsigned long vaddr);
  321. #endif /* __ASSEMBLY__ */
  322. #ifdef CONFIG_X86_32
  323. # include <asm/pgtable_32.h>
  324. #else
  325. # include <asm/pgtable_64.h>
  326. #endif
  327. #ifndef __ASSEMBLY__
  328. #include <linux/mm_types.h>
  329. #include <linux/log2.h>
  330. static inline int pte_none(pte_t pte)
  331. {
  332. return !pte.pte;
  333. }
  334. #define __HAVE_ARCH_PTE_SAME
  335. static inline int pte_same(pte_t a, pte_t b)
  336. {
  337. return a.pte == b.pte;
  338. }
  339. static inline int pte_present(pte_t a)
  340. {
  341. return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
  342. _PAGE_NUMA);
  343. }
  344. #define pte_accessible pte_accessible
  345. static inline int pte_accessible(pte_t a)
  346. {
  347. return pte_flags(a) & _PAGE_PRESENT;
  348. }
  349. static inline int pte_hidden(pte_t pte)
  350. {
  351. return pte_flags(pte) & _PAGE_HIDDEN;
  352. }
  353. static inline int pmd_present(pmd_t pmd)
  354. {
  355. /*
  356. * Checking for _PAGE_PSE is needed too because
  357. * split_huge_page will temporarily clear the present bit (but
  358. * the _PAGE_PSE flag will remain set at all times while the
  359. * _PAGE_PRESENT bit is clear).
  360. */
  361. return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
  362. _PAGE_NUMA);
  363. }
  364. static inline int pmd_none(pmd_t pmd)
  365. {
  366. /* Only check low word on 32-bit platforms, since it might be
  367. out of sync with upper half. */
  368. return (unsigned long)native_pmd_val(pmd) == 0;
  369. }
  370. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  371. {
  372. return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
  373. }
  374. /*
  375. * Currently stuck as a macro due to indirect forward reference to
  376. * linux/mmzone.h's __section_mem_map_addr() definition:
  377. */
  378. #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
  379. /*
  380. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  381. *
  382. * this macro returns the index of the entry in the pmd page which would
  383. * control the given virtual address
  384. */
  385. static inline unsigned long pmd_index(unsigned long address)
  386. {
  387. return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  388. }
  389. /*
  390. * Conversion functions: convert a page and protection to a page entry,
  391. * and a page entry and page directory to the page they refer to.
  392. *
  393. * (Currently stuck as a macro because of indirect forward reference
  394. * to linux/mm.h:page_to_nid())
  395. */
  396. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  397. /*
  398. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  399. *
  400. * this function returns the index of the entry in the pte page which would
  401. * control the given virtual address
  402. */
  403. static inline unsigned long pte_index(unsigned long address)
  404. {
  405. return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  406. }
  407. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  408. {
  409. return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  410. }
  411. static inline int pmd_bad(pmd_t pmd)
  412. {
  413. #ifdef CONFIG_NUMA_BALANCING
  414. /* pmd_numa check */
  415. if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
  416. return 0;
  417. #endif
  418. return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  419. }
  420. static inline unsigned long pages_to_mb(unsigned long npg)
  421. {
  422. return npg >> (20 - PAGE_SHIFT);
  423. }
  424. #if PAGETABLE_LEVELS > 2
  425. static inline int pud_none(pud_t pud)
  426. {
  427. return native_pud_val(pud) == 0;
  428. }
  429. static inline int pud_present(pud_t pud)
  430. {
  431. return pud_flags(pud) & _PAGE_PRESENT;
  432. }
  433. static inline unsigned long pud_page_vaddr(pud_t pud)
  434. {
  435. return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
  436. }
  437. /*
  438. * Currently stuck as a macro due to indirect forward reference to
  439. * linux/mmzone.h's __section_mem_map_addr() definition:
  440. */
  441. #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
  442. /* Find an entry in the second-level page table.. */
  443. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  444. {
  445. return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  446. }
  447. static inline int pud_large(pud_t pud)
  448. {
  449. return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  450. (_PAGE_PSE | _PAGE_PRESENT);
  451. }
  452. static inline int pud_bad(pud_t pud)
  453. {
  454. return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  455. }
  456. #else
  457. static inline int pud_large(pud_t pud)
  458. {
  459. return 0;
  460. }
  461. #endif /* PAGETABLE_LEVELS > 2 */
  462. #if PAGETABLE_LEVELS > 3
  463. static inline int pgd_present(pgd_t pgd)
  464. {
  465. return pgd_flags(pgd) & _PAGE_PRESENT;
  466. }
  467. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  468. {
  469. return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  470. }
  471. /*
  472. * Currently stuck as a macro due to indirect forward reference to
  473. * linux/mmzone.h's __section_mem_map_addr() definition:
  474. */
  475. #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
  476. /* to find an entry in a page-table-directory. */
  477. static inline unsigned long pud_index(unsigned long address)
  478. {
  479. return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  480. }
  481. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  482. {
  483. return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  484. }
  485. static inline int pgd_bad(pgd_t pgd)
  486. {
  487. return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  488. }
  489. static inline int pgd_none(pgd_t pgd)
  490. {
  491. return !native_pgd_val(pgd);
  492. }
  493. #endif /* PAGETABLE_LEVELS > 3 */
  494. #endif /* __ASSEMBLY__ */
  495. /*
  496. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  497. *
  498. * this macro returns the index of the entry in the pgd page which would
  499. * control the given virtual address
  500. */
  501. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  502. /*
  503. * pgd_offset() returns a (pgd_t *)
  504. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  505. */
  506. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  507. /*
  508. * a shortcut which implies the use of the kernel's pgd, instead
  509. * of a process's
  510. */
  511. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  512. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  513. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  514. #ifndef __ASSEMBLY__
  515. extern int direct_gbpages;
  516. void init_mem_mapping(void);
  517. void early_alloc_pgt_buf(void);
  518. /* local pte updates need not use xchg for locking */
  519. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  520. {
  521. pte_t res = *ptep;
  522. /* Pure native function needs no input for mm, addr */
  523. native_pte_clear(NULL, 0, ptep);
  524. return res;
  525. }
  526. static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
  527. {
  528. pmd_t res = *pmdp;
  529. native_pmd_clear(pmdp);
  530. return res;
  531. }
  532. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  533. pte_t *ptep , pte_t pte)
  534. {
  535. native_set_pte(ptep, pte);
  536. }
  537. static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
  538. pmd_t *pmdp , pmd_t pmd)
  539. {
  540. native_set_pmd(pmdp, pmd);
  541. }
  542. #ifndef CONFIG_PARAVIRT
  543. /*
  544. * Rules for using pte_update - it must be called after any PTE update which
  545. * has not been done using the set_pte / clear_pte interfaces. It is used by
  546. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  547. * updates should either be sets, clears, or set_pte_atomic for P->P
  548. * transitions, which means this hook should only be called for user PTEs.
  549. * This hook implies a P->P protection or access change has taken place, which
  550. * requires a subsequent TLB flush. The notification can optionally be delayed
  551. * until the TLB flush event by using the pte_update_defer form of the
  552. * interface, but care must be taken to assure that the flush happens while
  553. * still holding the same page table lock so that the shadow and primary pages
  554. * do not become out of sync on SMP.
  555. */
  556. #define pte_update(mm, addr, ptep) do { } while (0)
  557. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  558. #endif
  559. /*
  560. * We only update the dirty/accessed state if we set
  561. * the dirty bit by hand in the kernel, since the hardware
  562. * will do the accessed bit for us, and we don't want to
  563. * race with other CPU's that might be updating the dirty
  564. * bit at the same time.
  565. */
  566. struct vm_area_struct;
  567. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  568. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  569. unsigned long address, pte_t *ptep,
  570. pte_t entry, int dirty);
  571. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  572. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  573. unsigned long addr, pte_t *ptep);
  574. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  575. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  576. unsigned long address, pte_t *ptep);
  577. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  578. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  579. pte_t *ptep)
  580. {
  581. pte_t pte = native_ptep_get_and_clear(ptep);
  582. pte_update(mm, addr, ptep);
  583. return pte;
  584. }
  585. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  586. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  587. unsigned long addr, pte_t *ptep,
  588. int full)
  589. {
  590. pte_t pte;
  591. if (full) {
  592. /*
  593. * Full address destruction in progress; paravirt does not
  594. * care about updates and native needs no locking
  595. */
  596. pte = native_local_ptep_get_and_clear(ptep);
  597. } else {
  598. pte = ptep_get_and_clear(mm, addr, ptep);
  599. }
  600. return pte;
  601. }
  602. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  603. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  604. unsigned long addr, pte_t *ptep)
  605. {
  606. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  607. pte_update(mm, addr, ptep);
  608. }
  609. #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
  610. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  611. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  612. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  613. unsigned long address, pmd_t *pmdp,
  614. pmd_t entry, int dirty);
  615. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  616. extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  617. unsigned long addr, pmd_t *pmdp);
  618. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  619. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  620. unsigned long address, pmd_t *pmdp);
  621. #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
  622. extern void pmdp_splitting_flush(struct vm_area_struct *vma,
  623. unsigned long addr, pmd_t *pmdp);
  624. #define __HAVE_ARCH_PMD_WRITE
  625. static inline int pmd_write(pmd_t pmd)
  626. {
  627. return pmd_flags(pmd) & _PAGE_RW;
  628. }
  629. #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
  630. static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
  631. pmd_t *pmdp)
  632. {
  633. pmd_t pmd = native_pmdp_get_and_clear(pmdp);
  634. pmd_update(mm, addr, pmdp);
  635. return pmd;
  636. }
  637. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  638. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  639. unsigned long addr, pmd_t *pmdp)
  640. {
  641. clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
  642. pmd_update(mm, addr, pmdp);
  643. }
  644. /*
  645. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  646. *
  647. * dst - pointer to pgd range anwhere on a pgd page
  648. * src - ""
  649. * count - the number of pgds to copy.
  650. *
  651. * dst and src can be on the same page, but the range must not overlap,
  652. * and must not cross a page boundary.
  653. */
  654. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  655. {
  656. memcpy(dst, src, count * sizeof(pgd_t));
  657. }
  658. #define PTE_SHIFT ilog2(PTRS_PER_PTE)
  659. static inline int page_level_shift(enum pg_level level)
  660. {
  661. return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
  662. }
  663. static inline unsigned long page_level_size(enum pg_level level)
  664. {
  665. return 1UL << page_level_shift(level);
  666. }
  667. static inline unsigned long page_level_mask(enum pg_level level)
  668. {
  669. return ~(page_level_size(level) - 1);
  670. }
  671. /*
  672. * The x86 doesn't have any external MMU info: the kernel page
  673. * tables contain all the necessary information.
  674. */
  675. static inline void update_mmu_cache(struct vm_area_struct *vma,
  676. unsigned long addr, pte_t *ptep)
  677. {
  678. }
  679. static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
  680. unsigned long addr, pmd_t *pmd)
  681. {
  682. }
  683. #include <asm-generic/pgtable.h>
  684. #endif /* __ASSEMBLY__ */
  685. #endif /* _ASM_X86_PGTABLE_H */