pgtable.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839
  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3. #include <asm/page.h>
  4. #include <asm/e820.h>
  5. #include <asm/pgtable_types.h>
  6. /*
  7. * Macro to mark a page protection value as UC-
  8. */
  9. #define pgprot_noncached(prot) \
  10. ((boot_cpu_data.x86 > 3) \
  11. ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
  12. : (prot))
  13. #ifndef __ASSEMBLY__
  14. #include <asm/x86_init.h>
  15. /*
  16. * ZERO_PAGE is a global shared page that is always zero: used
  17. * for zero-mapped memory areas etc..
  18. */
  19. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  20. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  21. extern spinlock_t pgd_lock;
  22. extern struct list_head pgd_list;
  23. extern struct mm_struct *pgd_page_get_mm(struct page *page);
  24. #ifdef CONFIG_PARAVIRT
  25. #include <asm/paravirt.h>
  26. #else /* !CONFIG_PARAVIRT */
  27. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  28. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  29. #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  30. #define set_pte_atomic(ptep, pte) \
  31. native_set_pte_atomic(ptep, pte)
  32. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  33. #ifndef __PAGETABLE_PUD_FOLDED
  34. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  35. #define pgd_clear(pgd) native_pgd_clear(pgd)
  36. #endif
  37. #ifndef set_pud
  38. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  39. #endif
  40. #ifndef __PAGETABLE_PMD_FOLDED
  41. #define pud_clear(pud) native_pud_clear(pud)
  42. #endif
  43. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  44. #define pmd_clear(pmd) native_pmd_clear(pmd)
  45. #define pte_update(mm, addr, ptep) do { } while (0)
  46. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  47. #define pmd_update(mm, addr, ptep) do { } while (0)
  48. #define pmd_update_defer(mm, addr, ptep) do { } while (0)
  49. #define pgd_val(x) native_pgd_val(x)
  50. #define __pgd(x) native_make_pgd(x)
  51. #ifndef __PAGETABLE_PUD_FOLDED
  52. #define pud_val(x) native_pud_val(x)
  53. #define __pud(x) native_make_pud(x)
  54. #endif
  55. #ifndef __PAGETABLE_PMD_FOLDED
  56. #define pmd_val(x) native_pmd_val(x)
  57. #define __pmd(x) native_make_pmd(x)
  58. #endif
  59. #define pte_val(x) native_pte_val(x)
  60. #define __pte(x) native_make_pte(x)
  61. #define arch_end_context_switch(prev) do {} while(0)
  62. #endif /* CONFIG_PARAVIRT */
  63. /*
  64. * The following only work if pte_present() is true.
  65. * Undefined behaviour if not..
  66. */
  67. static inline int pte_dirty(pte_t pte)
  68. {
  69. return pte_flags(pte) & _PAGE_DIRTY;
  70. }
  71. static inline int pte_young(pte_t pte)
  72. {
  73. return pte_flags(pte) & _PAGE_ACCESSED;
  74. }
  75. static inline int pmd_young(pmd_t pmd)
  76. {
  77. return pmd_flags(pmd) & _PAGE_ACCESSED;
  78. }
  79. static inline int pte_write(pte_t pte)
  80. {
  81. return pte_flags(pte) & _PAGE_RW;
  82. }
  83. static inline int pte_file(pte_t pte)
  84. {
  85. return pte_flags(pte) & _PAGE_FILE;
  86. }
  87. static inline int pte_huge(pte_t pte)
  88. {
  89. return pte_flags(pte) & _PAGE_PSE;
  90. }
  91. static inline int pte_global(pte_t pte)
  92. {
  93. return pte_flags(pte) & _PAGE_GLOBAL;
  94. }
  95. static inline int pte_exec(pte_t pte)
  96. {
  97. return !(pte_flags(pte) & _PAGE_NX);
  98. }
  99. static inline int pte_special(pte_t pte)
  100. {
  101. return pte_flags(pte) & _PAGE_SPECIAL;
  102. }
  103. static inline unsigned long pte_pfn(pte_t pte)
  104. {
  105. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  106. }
  107. static inline unsigned long pmd_pfn(pmd_t pmd)
  108. {
  109. return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
  110. }
  111. static inline unsigned long pud_pfn(pud_t pud)
  112. {
  113. return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
  114. }
  115. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  116. static inline int pmd_large(pmd_t pte)
  117. {
  118. return pmd_flags(pte) & _PAGE_PSE;
  119. }
  120. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  121. static inline int pmd_trans_splitting(pmd_t pmd)
  122. {
  123. return pmd_val(pmd) & _PAGE_SPLITTING;
  124. }
  125. static inline int pmd_trans_huge(pmd_t pmd)
  126. {
  127. return pmd_val(pmd) & _PAGE_PSE;
  128. }
  129. static inline int has_transparent_hugepage(void)
  130. {
  131. return cpu_has_pse;
  132. }
  133. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  134. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  135. {
  136. pteval_t v = native_pte_val(pte);
  137. return native_make_pte(v | set);
  138. }
  139. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  140. {
  141. pteval_t v = native_pte_val(pte);
  142. return native_make_pte(v & ~clear);
  143. }
  144. static inline pte_t pte_mkclean(pte_t pte)
  145. {
  146. return pte_clear_flags(pte, _PAGE_DIRTY);
  147. }
  148. static inline pte_t pte_mkold(pte_t pte)
  149. {
  150. return pte_clear_flags(pte, _PAGE_ACCESSED);
  151. }
  152. static inline pte_t pte_wrprotect(pte_t pte)
  153. {
  154. return pte_clear_flags(pte, _PAGE_RW);
  155. }
  156. static inline pte_t pte_mkexec(pte_t pte)
  157. {
  158. return pte_clear_flags(pte, _PAGE_NX);
  159. }
  160. static inline pte_t pte_mkdirty(pte_t pte)
  161. {
  162. return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  163. }
  164. static inline pte_t pte_mkyoung(pte_t pte)
  165. {
  166. return pte_set_flags(pte, _PAGE_ACCESSED);
  167. }
  168. static inline pte_t pte_mkwrite(pte_t pte)
  169. {
  170. return pte_set_flags(pte, _PAGE_RW);
  171. }
  172. static inline pte_t pte_mkhuge(pte_t pte)
  173. {
  174. return pte_set_flags(pte, _PAGE_PSE);
  175. }
  176. static inline pte_t pte_clrhuge(pte_t pte)
  177. {
  178. return pte_clear_flags(pte, _PAGE_PSE);
  179. }
  180. static inline pte_t pte_mkglobal(pte_t pte)
  181. {
  182. return pte_set_flags(pte, _PAGE_GLOBAL);
  183. }
  184. static inline pte_t pte_clrglobal(pte_t pte)
  185. {
  186. return pte_clear_flags(pte, _PAGE_GLOBAL);
  187. }
  188. static inline pte_t pte_mkspecial(pte_t pte)
  189. {
  190. return pte_set_flags(pte, _PAGE_SPECIAL);
  191. }
  192. static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
  193. {
  194. pmdval_t v = native_pmd_val(pmd);
  195. return __pmd(v | set);
  196. }
  197. static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
  198. {
  199. pmdval_t v = native_pmd_val(pmd);
  200. return __pmd(v & ~clear);
  201. }
  202. static inline pmd_t pmd_mkold(pmd_t pmd)
  203. {
  204. return pmd_clear_flags(pmd, _PAGE_ACCESSED);
  205. }
  206. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  207. {
  208. return pmd_clear_flags(pmd, _PAGE_RW);
  209. }
  210. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  211. {
  212. return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  213. }
  214. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  215. {
  216. return pmd_set_flags(pmd, _PAGE_PSE);
  217. }
  218. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  219. {
  220. return pmd_set_flags(pmd, _PAGE_ACCESSED);
  221. }
  222. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  223. {
  224. return pmd_set_flags(pmd, _PAGE_RW);
  225. }
  226. static inline pmd_t pmd_mknotpresent(pmd_t pmd)
  227. {
  228. return pmd_clear_flags(pmd, _PAGE_PRESENT);
  229. }
  230. static inline int pte_soft_dirty(pte_t pte)
  231. {
  232. return pte_flags(pte) & _PAGE_SOFT_DIRTY;
  233. }
  234. static inline int pmd_soft_dirty(pmd_t pmd)
  235. {
  236. return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
  237. }
  238. static inline pte_t pte_mksoft_dirty(pte_t pte)
  239. {
  240. return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
  241. }
  242. static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  243. {
  244. return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
  245. }
  246. /*
  247. * Mask out unsupported bits in a present pgprot. Non-present pgprots
  248. * can use those bits for other purposes, so leave them be.
  249. */
  250. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  251. {
  252. pgprotval_t protval = pgprot_val(pgprot);
  253. if (protval & _PAGE_PRESENT)
  254. protval &= __supported_pte_mask;
  255. return protval;
  256. }
  257. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  258. {
  259. return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  260. massage_pgprot(pgprot));
  261. }
  262. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  263. {
  264. return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  265. massage_pgprot(pgprot));
  266. }
  267. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  268. {
  269. pteval_t val = pte_val(pte);
  270. /*
  271. * Chop off the NX bit (if present), and add the NX portion of
  272. * the newprot (if present):
  273. */
  274. val &= _PAGE_CHG_MASK;
  275. val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  276. return __pte(val);
  277. }
  278. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  279. {
  280. pmdval_t val = pmd_val(pmd);
  281. val &= _HPAGE_CHG_MASK;
  282. val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
  283. return __pmd(val);
  284. }
  285. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  286. #define pgprot_modify pgprot_modify
  287. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  288. {
  289. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  290. pgprotval_t addbits = pgprot_val(newprot);
  291. return __pgprot(preservebits | addbits);
  292. }
  293. #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
  294. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  295. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  296. unsigned long flags,
  297. unsigned long new_flags)
  298. {
  299. /*
  300. * PAT type is always WB for untracked ranges, so no need to check.
  301. */
  302. if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
  303. return 1;
  304. /*
  305. * Certain new memtypes are not allowed with certain
  306. * requested memtype:
  307. * - request is uncached, return cannot be write-back
  308. * - request is write-combine, return cannot be write-back
  309. */
  310. if ((flags == _PAGE_CACHE_UC_MINUS &&
  311. new_flags == _PAGE_CACHE_WB) ||
  312. (flags == _PAGE_CACHE_WC &&
  313. new_flags == _PAGE_CACHE_WB)) {
  314. return 0;
  315. }
  316. return 1;
  317. }
  318. pmd_t *populate_extra_pmd(unsigned long vaddr);
  319. pte_t *populate_extra_pte(unsigned long vaddr);
  320. #endif /* __ASSEMBLY__ */
  321. #ifdef CONFIG_X86_32
  322. # include <asm/pgtable_32.h>
  323. #else
  324. # include <asm/pgtable_64.h>
  325. #endif
  326. #ifndef __ASSEMBLY__
  327. #include <linux/mm_types.h>
  328. #include <linux/log2.h>
  329. static inline int pte_none(pte_t pte)
  330. {
  331. return !pte.pte;
  332. }
  333. #define __HAVE_ARCH_PTE_SAME
  334. static inline int pte_same(pte_t a, pte_t b)
  335. {
  336. return a.pte == b.pte;
  337. }
  338. static inline int pte_present(pte_t a)
  339. {
  340. return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
  341. _PAGE_NUMA);
  342. }
  343. #define pte_accessible pte_accessible
  344. static inline int pte_accessible(pte_t a)
  345. {
  346. return pte_flags(a) & _PAGE_PRESENT;
  347. }
  348. static inline int pte_hidden(pte_t pte)
  349. {
  350. return pte_flags(pte) & _PAGE_HIDDEN;
  351. }
  352. static inline int pmd_present(pmd_t pmd)
  353. {
  354. /*
  355. * Checking for _PAGE_PSE is needed too because
  356. * split_huge_page will temporarily clear the present bit (but
  357. * the _PAGE_PSE flag will remain set at all times while the
  358. * _PAGE_PRESENT bit is clear).
  359. */
  360. return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
  361. _PAGE_NUMA);
  362. }
  363. static inline int pmd_none(pmd_t pmd)
  364. {
  365. /* Only check low word on 32-bit platforms, since it might be
  366. out of sync with upper half. */
  367. return (unsigned long)native_pmd_val(pmd) == 0;
  368. }
  369. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  370. {
  371. return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
  372. }
  373. /*
  374. * Currently stuck as a macro due to indirect forward reference to
  375. * linux/mmzone.h's __section_mem_map_addr() definition:
  376. */
  377. #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
  378. /*
  379. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  380. *
  381. * this macro returns the index of the entry in the pmd page which would
  382. * control the given virtual address
  383. */
  384. static inline unsigned long pmd_index(unsigned long address)
  385. {
  386. return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  387. }
  388. /*
  389. * Conversion functions: convert a page and protection to a page entry,
  390. * and a page entry and page directory to the page they refer to.
  391. *
  392. * (Currently stuck as a macro because of indirect forward reference
  393. * to linux/mm.h:page_to_nid())
  394. */
  395. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  396. /*
  397. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  398. *
  399. * this function returns the index of the entry in the pte page which would
  400. * control the given virtual address
  401. */
  402. static inline unsigned long pte_index(unsigned long address)
  403. {
  404. return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  405. }
  406. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  407. {
  408. return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  409. }
  410. static inline int pmd_bad(pmd_t pmd)
  411. {
  412. #ifdef CONFIG_NUMA_BALANCING
  413. /* pmd_numa check */
  414. if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
  415. return 0;
  416. #endif
  417. return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  418. }
  419. static inline unsigned long pages_to_mb(unsigned long npg)
  420. {
  421. return npg >> (20 - PAGE_SHIFT);
  422. }
  423. #if PAGETABLE_LEVELS > 2
  424. static inline int pud_none(pud_t pud)
  425. {
  426. return native_pud_val(pud) == 0;
  427. }
  428. static inline int pud_present(pud_t pud)
  429. {
  430. return pud_flags(pud) & _PAGE_PRESENT;
  431. }
  432. static inline unsigned long pud_page_vaddr(pud_t pud)
  433. {
  434. return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
  435. }
  436. /*
  437. * Currently stuck as a macro due to indirect forward reference to
  438. * linux/mmzone.h's __section_mem_map_addr() definition:
  439. */
  440. #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
  441. /* Find an entry in the second-level page table.. */
  442. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  443. {
  444. return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  445. }
  446. static inline int pud_large(pud_t pud)
  447. {
  448. return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  449. (_PAGE_PSE | _PAGE_PRESENT);
  450. }
  451. static inline int pud_bad(pud_t pud)
  452. {
  453. return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  454. }
  455. #else
  456. static inline int pud_large(pud_t pud)
  457. {
  458. return 0;
  459. }
  460. #endif /* PAGETABLE_LEVELS > 2 */
  461. #if PAGETABLE_LEVELS > 3
  462. static inline int pgd_present(pgd_t pgd)
  463. {
  464. return pgd_flags(pgd) & _PAGE_PRESENT;
  465. }
  466. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  467. {
  468. return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  469. }
  470. /*
  471. * Currently stuck as a macro due to indirect forward reference to
  472. * linux/mmzone.h's __section_mem_map_addr() definition:
  473. */
  474. #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
  475. /* to find an entry in a page-table-directory. */
  476. static inline unsigned long pud_index(unsigned long address)
  477. {
  478. return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  479. }
  480. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  481. {
  482. return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  483. }
  484. static inline int pgd_bad(pgd_t pgd)
  485. {
  486. return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  487. }
  488. static inline int pgd_none(pgd_t pgd)
  489. {
  490. return !native_pgd_val(pgd);
  491. }
  492. #endif /* PAGETABLE_LEVELS > 3 */
  493. #endif /* __ASSEMBLY__ */
  494. /*
  495. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  496. *
  497. * this macro returns the index of the entry in the pgd page which would
  498. * control the given virtual address
  499. */
  500. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  501. /*
  502. * pgd_offset() returns a (pgd_t *)
  503. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  504. */
  505. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  506. /*
  507. * a shortcut which implies the use of the kernel's pgd, instead
  508. * of a process's
  509. */
  510. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  511. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  512. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  513. #ifndef __ASSEMBLY__
  514. extern int direct_gbpages;
  515. void init_mem_mapping(void);
  516. void early_alloc_pgt_buf(void);
  517. /* local pte updates need not use xchg for locking */
  518. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  519. {
  520. pte_t res = *ptep;
  521. /* Pure native function needs no input for mm, addr */
  522. native_pte_clear(NULL, 0, ptep);
  523. return res;
  524. }
  525. static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
  526. {
  527. pmd_t res = *pmdp;
  528. native_pmd_clear(pmdp);
  529. return res;
  530. }
  531. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  532. pte_t *ptep , pte_t pte)
  533. {
  534. native_set_pte(ptep, pte);
  535. }
  536. static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
  537. pmd_t *pmdp , pmd_t pmd)
  538. {
  539. native_set_pmd(pmdp, pmd);
  540. }
  541. #ifndef CONFIG_PARAVIRT
  542. /*
  543. * Rules for using pte_update - it must be called after any PTE update which
  544. * has not been done using the set_pte / clear_pte interfaces. It is used by
  545. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  546. * updates should either be sets, clears, or set_pte_atomic for P->P
  547. * transitions, which means this hook should only be called for user PTEs.
  548. * This hook implies a P->P protection or access change has taken place, which
  549. * requires a subsequent TLB flush. The notification can optionally be delayed
  550. * until the TLB flush event by using the pte_update_defer form of the
  551. * interface, but care must be taken to assure that the flush happens while
  552. * still holding the same page table lock so that the shadow and primary pages
  553. * do not become out of sync on SMP.
  554. */
  555. #define pte_update(mm, addr, ptep) do { } while (0)
  556. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  557. #endif
  558. /*
  559. * We only update the dirty/accessed state if we set
  560. * the dirty bit by hand in the kernel, since the hardware
  561. * will do the accessed bit for us, and we don't want to
  562. * race with other CPU's that might be updating the dirty
  563. * bit at the same time.
  564. */
  565. struct vm_area_struct;
  566. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  567. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  568. unsigned long address, pte_t *ptep,
  569. pte_t entry, int dirty);
  570. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  571. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  572. unsigned long addr, pte_t *ptep);
  573. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  574. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  575. unsigned long address, pte_t *ptep);
  576. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  577. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  578. pte_t *ptep)
  579. {
  580. pte_t pte = native_ptep_get_and_clear(ptep);
  581. pte_update(mm, addr, ptep);
  582. return pte;
  583. }
  584. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  585. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  586. unsigned long addr, pte_t *ptep,
  587. int full)
  588. {
  589. pte_t pte;
  590. if (full) {
  591. /*
  592. * Full address destruction in progress; paravirt does not
  593. * care about updates and native needs no locking
  594. */
  595. pte = native_local_ptep_get_and_clear(ptep);
  596. } else {
  597. pte = ptep_get_and_clear(mm, addr, ptep);
  598. }
  599. return pte;
  600. }
  601. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  602. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  603. unsigned long addr, pte_t *ptep)
  604. {
  605. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  606. pte_update(mm, addr, ptep);
  607. }
  608. #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
  609. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  610. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  611. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  612. unsigned long address, pmd_t *pmdp,
  613. pmd_t entry, int dirty);
  614. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  615. extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  616. unsigned long addr, pmd_t *pmdp);
  617. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  618. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  619. unsigned long address, pmd_t *pmdp);
  620. #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
  621. extern void pmdp_splitting_flush(struct vm_area_struct *vma,
  622. unsigned long addr, pmd_t *pmdp);
  623. #define __HAVE_ARCH_PMD_WRITE
  624. static inline int pmd_write(pmd_t pmd)
  625. {
  626. return pmd_flags(pmd) & _PAGE_RW;
  627. }
  628. #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
  629. static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
  630. pmd_t *pmdp)
  631. {
  632. pmd_t pmd = native_pmdp_get_and_clear(pmdp);
  633. pmd_update(mm, addr, pmdp);
  634. return pmd;
  635. }
  636. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  637. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  638. unsigned long addr, pmd_t *pmdp)
  639. {
  640. clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
  641. pmd_update(mm, addr, pmdp);
  642. }
  643. /*
  644. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  645. *
  646. * dst - pointer to pgd range anwhere on a pgd page
  647. * src - ""
  648. * count - the number of pgds to copy.
  649. *
  650. * dst and src can be on the same page, but the range must not overlap,
  651. * and must not cross a page boundary.
  652. */
  653. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  654. {
  655. memcpy(dst, src, count * sizeof(pgd_t));
  656. }
  657. #define PTE_SHIFT ilog2(PTRS_PER_PTE)
  658. static inline int page_level_shift(enum pg_level level)
  659. {
  660. return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
  661. }
  662. static inline unsigned long page_level_size(enum pg_level level)
  663. {
  664. return 1UL << page_level_shift(level);
  665. }
  666. static inline unsigned long page_level_mask(enum pg_level level)
  667. {
  668. return ~(page_level_size(level) - 1);
  669. }
  670. /*
  671. * The x86 doesn't have any external MMU info: the kernel page
  672. * tables contain all the necessary information.
  673. */
  674. static inline void update_mmu_cache(struct vm_area_struct *vma,
  675. unsigned long addr, pte_t *ptep)
  676. {
  677. }
  678. static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
  679. unsigned long addr, pmd_t *pmd)
  680. {
  681. }
  682. #include <asm-generic/pgtable.h>
  683. #endif /* __ASSEMBLY__ */
  684. #endif /* _ASM_X86_PGTABLE_H */