pgtable.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854
  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3. #include <asm/page.h>
  4. #include <asm/e820.h>
  5. #include <asm/pgtable_types.h>
  6. /*
  7. * Macro to mark a page protection value as UC-
  8. */
  9. #define pgprot_noncached(prot) \
  10. ((boot_cpu_data.x86 > 3) \
  11. ? (__pgprot(pgprot_val(prot) | _PAGE_CACHE_UC_MINUS)) \
  12. : (prot))
  13. #ifndef __ASSEMBLY__
  14. #include <asm/x86_init.h>
  15. /*
  16. * ZERO_PAGE is a global shared page that is always zero: used
  17. * for zero-mapped memory areas etc..
  18. */
  19. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  20. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  21. extern spinlock_t pgd_lock;
  22. extern struct list_head pgd_list;
  23. extern struct mm_struct *pgd_page_get_mm(struct page *page);
  24. #ifdef CONFIG_PARAVIRT
  25. #include <asm/paravirt.h>
  26. #else /* !CONFIG_PARAVIRT */
  27. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  28. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  29. #define set_pmd_at(mm, addr, pmdp, pmd) native_set_pmd_at(mm, addr, pmdp, pmd)
  30. #define set_pte_atomic(ptep, pte) \
  31. native_set_pte_atomic(ptep, pte)
  32. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  33. #ifndef __PAGETABLE_PUD_FOLDED
  34. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  35. #define pgd_clear(pgd) native_pgd_clear(pgd)
  36. #endif
  37. #ifndef set_pud
  38. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  39. #endif
  40. #ifndef __PAGETABLE_PMD_FOLDED
  41. #define pud_clear(pud) native_pud_clear(pud)
  42. #endif
  43. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  44. #define pmd_clear(pmd) native_pmd_clear(pmd)
  45. #define pte_update(mm, addr, ptep) do { } while (0)
  46. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  47. #define pmd_update(mm, addr, ptep) do { } while (0)
  48. #define pmd_update_defer(mm, addr, ptep) do { } while (0)
  49. #define pgd_val(x) native_pgd_val(x)
  50. #define __pgd(x) native_make_pgd(x)
  51. #ifndef __PAGETABLE_PUD_FOLDED
  52. #define pud_val(x) native_pud_val(x)
  53. #define __pud(x) native_make_pud(x)
  54. #endif
  55. #ifndef __PAGETABLE_PMD_FOLDED
  56. #define pmd_val(x) native_pmd_val(x)
  57. #define __pmd(x) native_make_pmd(x)
  58. #endif
  59. #define pte_val(x) native_pte_val(x)
  60. #define __pte(x) native_make_pte(x)
  61. #define arch_end_context_switch(prev) do {} while(0)
  62. #endif /* CONFIG_PARAVIRT */
  63. /*
  64. * The following only work if pte_present() is true.
  65. * Undefined behaviour if not..
  66. */
  67. static inline int pte_dirty(pte_t pte)
  68. {
  69. return pte_flags(pte) & _PAGE_DIRTY;
  70. }
  71. static inline int pte_young(pte_t pte)
  72. {
  73. return pte_flags(pte) & _PAGE_ACCESSED;
  74. }
  75. static inline int pmd_young(pmd_t pmd)
  76. {
  77. return pmd_flags(pmd) & _PAGE_ACCESSED;
  78. }
  79. static inline int pte_write(pte_t pte)
  80. {
  81. return pte_flags(pte) & _PAGE_RW;
  82. }
  83. static inline int pte_file(pte_t pte)
  84. {
  85. return pte_flags(pte) & _PAGE_FILE;
  86. }
  87. static inline int pte_huge(pte_t pte)
  88. {
  89. return pte_flags(pte) & _PAGE_PSE;
  90. }
  91. static inline int pte_global(pte_t pte)
  92. {
  93. return pte_flags(pte) & _PAGE_GLOBAL;
  94. }
  95. static inline int pte_exec(pte_t pte)
  96. {
  97. return !(pte_flags(pte) & _PAGE_NX);
  98. }
  99. static inline int pte_special(pte_t pte)
  100. {
  101. return pte_flags(pte) & _PAGE_SPECIAL;
  102. }
  103. static inline unsigned long pte_pfn(pte_t pte)
  104. {
  105. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  106. }
  107. static inline unsigned long pmd_pfn(pmd_t pmd)
  108. {
  109. return (pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT;
  110. }
  111. static inline unsigned long pud_pfn(pud_t pud)
  112. {
  113. return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
  114. }
  115. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  116. static inline int pmd_large(pmd_t pte)
  117. {
  118. return pmd_flags(pte) & _PAGE_PSE;
  119. }
  120. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  121. static inline int pmd_trans_splitting(pmd_t pmd)
  122. {
  123. return pmd_val(pmd) & _PAGE_SPLITTING;
  124. }
  125. static inline int pmd_trans_huge(pmd_t pmd)
  126. {
  127. return pmd_val(pmd) & _PAGE_PSE;
  128. }
  129. static inline int has_transparent_hugepage(void)
  130. {
  131. return cpu_has_pse;
  132. }
  133. #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
  134. static inline pte_t pte_set_flags(pte_t pte, pteval_t set)
  135. {
  136. pteval_t v = native_pte_val(pte);
  137. return native_make_pte(v | set);
  138. }
  139. static inline pte_t pte_clear_flags(pte_t pte, pteval_t clear)
  140. {
  141. pteval_t v = native_pte_val(pte);
  142. return native_make_pte(v & ~clear);
  143. }
  144. static inline pte_t pte_mkclean(pte_t pte)
  145. {
  146. return pte_clear_flags(pte, _PAGE_DIRTY);
  147. }
  148. static inline pte_t pte_mkold(pte_t pte)
  149. {
  150. return pte_clear_flags(pte, _PAGE_ACCESSED);
  151. }
  152. static inline pte_t pte_wrprotect(pte_t pte)
  153. {
  154. return pte_clear_flags(pte, _PAGE_RW);
  155. }
  156. static inline pte_t pte_mkexec(pte_t pte)
  157. {
  158. return pte_clear_flags(pte, _PAGE_NX);
  159. }
  160. static inline pte_t pte_mkdirty(pte_t pte)
  161. {
  162. return pte_set_flags(pte, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  163. }
  164. static inline pte_t pte_mkyoung(pte_t pte)
  165. {
  166. return pte_set_flags(pte, _PAGE_ACCESSED);
  167. }
  168. static inline pte_t pte_mkwrite(pte_t pte)
  169. {
  170. return pte_set_flags(pte, _PAGE_RW);
  171. }
  172. static inline pte_t pte_mkhuge(pte_t pte)
  173. {
  174. return pte_set_flags(pte, _PAGE_PSE);
  175. }
  176. static inline pte_t pte_clrhuge(pte_t pte)
  177. {
  178. return pte_clear_flags(pte, _PAGE_PSE);
  179. }
  180. static inline pte_t pte_mkglobal(pte_t pte)
  181. {
  182. return pte_set_flags(pte, _PAGE_GLOBAL);
  183. }
  184. static inline pte_t pte_clrglobal(pte_t pte)
  185. {
  186. return pte_clear_flags(pte, _PAGE_GLOBAL);
  187. }
  188. static inline pte_t pte_mkspecial(pte_t pte)
  189. {
  190. return pte_set_flags(pte, _PAGE_SPECIAL);
  191. }
  192. static inline pmd_t pmd_set_flags(pmd_t pmd, pmdval_t set)
  193. {
  194. pmdval_t v = native_pmd_val(pmd);
  195. return __pmd(v | set);
  196. }
  197. static inline pmd_t pmd_clear_flags(pmd_t pmd, pmdval_t clear)
  198. {
  199. pmdval_t v = native_pmd_val(pmd);
  200. return __pmd(v & ~clear);
  201. }
  202. static inline pmd_t pmd_mkold(pmd_t pmd)
  203. {
  204. return pmd_clear_flags(pmd, _PAGE_ACCESSED);
  205. }
  206. static inline pmd_t pmd_wrprotect(pmd_t pmd)
  207. {
  208. return pmd_clear_flags(pmd, _PAGE_RW);
  209. }
  210. static inline pmd_t pmd_mkdirty(pmd_t pmd)
  211. {
  212. return pmd_set_flags(pmd, _PAGE_DIRTY | _PAGE_SOFT_DIRTY);
  213. }
  214. static inline pmd_t pmd_mkhuge(pmd_t pmd)
  215. {
  216. return pmd_set_flags(pmd, _PAGE_PSE);
  217. }
  218. static inline pmd_t pmd_mkyoung(pmd_t pmd)
  219. {
  220. return pmd_set_flags(pmd, _PAGE_ACCESSED);
  221. }
  222. static inline pmd_t pmd_mkwrite(pmd_t pmd)
  223. {
  224. return pmd_set_flags(pmd, _PAGE_RW);
  225. }
  226. static inline pmd_t pmd_mknotpresent(pmd_t pmd)
  227. {
  228. return pmd_clear_flags(pmd, _PAGE_PRESENT);
  229. }
  230. static inline int pte_soft_dirty(pte_t pte)
  231. {
  232. return pte_flags(pte) & _PAGE_SOFT_DIRTY;
  233. }
  234. static inline int pmd_soft_dirty(pmd_t pmd)
  235. {
  236. return pmd_flags(pmd) & _PAGE_SOFT_DIRTY;
  237. }
  238. static inline pte_t pte_mksoft_dirty(pte_t pte)
  239. {
  240. return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
  241. }
  242. static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
  243. {
  244. return pmd_set_flags(pmd, _PAGE_SOFT_DIRTY);
  245. }
  246. static inline pte_t pte_swp_mksoft_dirty(pte_t pte)
  247. {
  248. return pte_set_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  249. }
  250. static inline int pte_swp_soft_dirty(pte_t pte)
  251. {
  252. return pte_flags(pte) & _PAGE_SWP_SOFT_DIRTY;
  253. }
  254. static inline pte_t pte_swp_clear_soft_dirty(pte_t pte)
  255. {
  256. return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
  257. }
  258. /*
  259. * Mask out unsupported bits in a present pgprot. Non-present pgprots
  260. * can use those bits for other purposes, so leave them be.
  261. */
  262. static inline pgprotval_t massage_pgprot(pgprot_t pgprot)
  263. {
  264. pgprotval_t protval = pgprot_val(pgprot);
  265. if (protval & _PAGE_PRESENT)
  266. protval &= __supported_pte_mask;
  267. return protval;
  268. }
  269. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  270. {
  271. return __pte(((phys_addr_t)page_nr << PAGE_SHIFT) |
  272. massage_pgprot(pgprot));
  273. }
  274. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  275. {
  276. return __pmd(((phys_addr_t)page_nr << PAGE_SHIFT) |
  277. massage_pgprot(pgprot));
  278. }
  279. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  280. {
  281. pteval_t val = pte_val(pte);
  282. /*
  283. * Chop off the NX bit (if present), and add the NX portion of
  284. * the newprot (if present):
  285. */
  286. val &= _PAGE_CHG_MASK;
  287. val |= massage_pgprot(newprot) & ~_PAGE_CHG_MASK;
  288. return __pte(val);
  289. }
  290. static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
  291. {
  292. pmdval_t val = pmd_val(pmd);
  293. val &= _HPAGE_CHG_MASK;
  294. val |= massage_pgprot(newprot) & ~_HPAGE_CHG_MASK;
  295. return __pmd(val);
  296. }
  297. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  298. #define pgprot_modify pgprot_modify
  299. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  300. {
  301. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  302. pgprotval_t addbits = pgprot_val(newprot);
  303. return __pgprot(preservebits | addbits);
  304. }
  305. #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
  306. #define canon_pgprot(p) __pgprot(massage_pgprot(p))
  307. static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
  308. unsigned long flags,
  309. unsigned long new_flags)
  310. {
  311. /*
  312. * PAT type is always WB for untracked ranges, so no need to check.
  313. */
  314. if (x86_platform.is_untracked_pat_range(paddr, paddr + size))
  315. return 1;
  316. /*
  317. * Certain new memtypes are not allowed with certain
  318. * requested memtype:
  319. * - request is uncached, return cannot be write-back
  320. * - request is write-combine, return cannot be write-back
  321. */
  322. if ((flags == _PAGE_CACHE_UC_MINUS &&
  323. new_flags == _PAGE_CACHE_WB) ||
  324. (flags == _PAGE_CACHE_WC &&
  325. new_flags == _PAGE_CACHE_WB)) {
  326. return 0;
  327. }
  328. return 1;
  329. }
  330. pmd_t *populate_extra_pmd(unsigned long vaddr);
  331. pte_t *populate_extra_pte(unsigned long vaddr);
  332. #endif /* __ASSEMBLY__ */
  333. #ifdef CONFIG_X86_32
  334. # include <asm/pgtable_32.h>
  335. #else
  336. # include <asm/pgtable_64.h>
  337. #endif
  338. #ifndef __ASSEMBLY__
  339. #include <linux/mm_types.h>
  340. #include <linux/log2.h>
  341. static inline int pte_none(pte_t pte)
  342. {
  343. return !pte.pte;
  344. }
  345. #define __HAVE_ARCH_PTE_SAME
  346. static inline int pte_same(pte_t a, pte_t b)
  347. {
  348. return a.pte == b.pte;
  349. }
  350. static inline int pte_present(pte_t a)
  351. {
  352. return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
  353. _PAGE_NUMA);
  354. }
  355. #define pte_accessible pte_accessible
  356. static inline int pte_accessible(pte_t a)
  357. {
  358. return pte_flags(a) & _PAGE_PRESENT;
  359. }
  360. static inline int pte_hidden(pte_t pte)
  361. {
  362. return pte_flags(pte) & _PAGE_HIDDEN;
  363. }
  364. static inline int pmd_present(pmd_t pmd)
  365. {
  366. /*
  367. * Checking for _PAGE_PSE is needed too because
  368. * split_huge_page will temporarily clear the present bit (but
  369. * the _PAGE_PSE flag will remain set at all times while the
  370. * _PAGE_PRESENT bit is clear).
  371. */
  372. return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE |
  373. _PAGE_NUMA);
  374. }
  375. static inline int pmd_none(pmd_t pmd)
  376. {
  377. /* Only check low word on 32-bit platforms, since it might be
  378. out of sync with upper half. */
  379. return (unsigned long)native_pmd_val(pmd) == 0;
  380. }
  381. static inline unsigned long pmd_page_vaddr(pmd_t pmd)
  382. {
  383. return (unsigned long)__va(pmd_val(pmd) & PTE_PFN_MASK);
  384. }
  385. /*
  386. * Currently stuck as a macro due to indirect forward reference to
  387. * linux/mmzone.h's __section_mem_map_addr() definition:
  388. */
  389. #define pmd_page(pmd) pfn_to_page((pmd_val(pmd) & PTE_PFN_MASK) >> PAGE_SHIFT)
  390. /*
  391. * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
  392. *
  393. * this macro returns the index of the entry in the pmd page which would
  394. * control the given virtual address
  395. */
  396. static inline unsigned long pmd_index(unsigned long address)
  397. {
  398. return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
  399. }
  400. /*
  401. * Conversion functions: convert a page and protection to a page entry,
  402. * and a page entry and page directory to the page they refer to.
  403. *
  404. * (Currently stuck as a macro because of indirect forward reference
  405. * to linux/mm.h:page_to_nid())
  406. */
  407. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  408. /*
  409. * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
  410. *
  411. * this function returns the index of the entry in the pte page which would
  412. * control the given virtual address
  413. */
  414. static inline unsigned long pte_index(unsigned long address)
  415. {
  416. return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
  417. }
  418. static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
  419. {
  420. return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
  421. }
  422. static inline int pmd_bad(pmd_t pmd)
  423. {
  424. #ifdef CONFIG_NUMA_BALANCING
  425. /* pmd_numa check */
  426. if ((pmd_flags(pmd) & (_PAGE_NUMA|_PAGE_PRESENT)) == _PAGE_NUMA)
  427. return 0;
  428. #endif
  429. return (pmd_flags(pmd) & ~_PAGE_USER) != _KERNPG_TABLE;
  430. }
  431. static inline unsigned long pages_to_mb(unsigned long npg)
  432. {
  433. return npg >> (20 - PAGE_SHIFT);
  434. }
  435. #if PAGETABLE_LEVELS > 2
  436. static inline int pud_none(pud_t pud)
  437. {
  438. return native_pud_val(pud) == 0;
  439. }
  440. static inline int pud_present(pud_t pud)
  441. {
  442. return pud_flags(pud) & _PAGE_PRESENT;
  443. }
  444. static inline unsigned long pud_page_vaddr(pud_t pud)
  445. {
  446. return (unsigned long)__va((unsigned long)pud_val(pud) & PTE_PFN_MASK);
  447. }
  448. /*
  449. * Currently stuck as a macro due to indirect forward reference to
  450. * linux/mmzone.h's __section_mem_map_addr() definition:
  451. */
  452. #define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
  453. /* Find an entry in the second-level page table.. */
  454. static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
  455. {
  456. return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(address);
  457. }
  458. static inline int pud_large(pud_t pud)
  459. {
  460. return (pud_val(pud) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  461. (_PAGE_PSE | _PAGE_PRESENT);
  462. }
  463. static inline int pud_bad(pud_t pud)
  464. {
  465. return (pud_flags(pud) & ~(_KERNPG_TABLE | _PAGE_USER)) != 0;
  466. }
  467. #else
  468. static inline int pud_large(pud_t pud)
  469. {
  470. return 0;
  471. }
  472. #endif /* PAGETABLE_LEVELS > 2 */
  473. #if PAGETABLE_LEVELS > 3
  474. static inline int pgd_present(pgd_t pgd)
  475. {
  476. return pgd_flags(pgd) & _PAGE_PRESENT;
  477. }
  478. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  479. {
  480. return (unsigned long)__va((unsigned long)pgd_val(pgd) & PTE_PFN_MASK);
  481. }
  482. /*
  483. * Currently stuck as a macro due to indirect forward reference to
  484. * linux/mmzone.h's __section_mem_map_addr() definition:
  485. */
  486. #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
  487. /* to find an entry in a page-table-directory. */
  488. static inline unsigned long pud_index(unsigned long address)
  489. {
  490. return (address >> PUD_SHIFT) & (PTRS_PER_PUD - 1);
  491. }
  492. static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
  493. {
  494. return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(address);
  495. }
  496. static inline int pgd_bad(pgd_t pgd)
  497. {
  498. return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
  499. }
  500. static inline int pgd_none(pgd_t pgd)
  501. {
  502. return !native_pgd_val(pgd);
  503. }
  504. #endif /* PAGETABLE_LEVELS > 3 */
  505. #endif /* __ASSEMBLY__ */
  506. /*
  507. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  508. *
  509. * this macro returns the index of the entry in the pgd page which would
  510. * control the given virtual address
  511. */
  512. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  513. /*
  514. * pgd_offset() returns a (pgd_t *)
  515. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  516. */
  517. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  518. /*
  519. * a shortcut which implies the use of the kernel's pgd, instead
  520. * of a process's
  521. */
  522. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  523. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  524. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  525. #ifndef __ASSEMBLY__
  526. extern int direct_gbpages;
  527. void init_mem_mapping(void);
  528. void early_alloc_pgt_buf(void);
  529. /* local pte updates need not use xchg for locking */
  530. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  531. {
  532. pte_t res = *ptep;
  533. /* Pure native function needs no input for mm, addr */
  534. native_pte_clear(NULL, 0, ptep);
  535. return res;
  536. }
  537. static inline pmd_t native_local_pmdp_get_and_clear(pmd_t *pmdp)
  538. {
  539. pmd_t res = *pmdp;
  540. native_pmd_clear(pmdp);
  541. return res;
  542. }
  543. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  544. pte_t *ptep , pte_t pte)
  545. {
  546. native_set_pte(ptep, pte);
  547. }
  548. static inline void native_set_pmd_at(struct mm_struct *mm, unsigned long addr,
  549. pmd_t *pmdp , pmd_t pmd)
  550. {
  551. native_set_pmd(pmdp, pmd);
  552. }
  553. #ifndef CONFIG_PARAVIRT
  554. /*
  555. * Rules for using pte_update - it must be called after any PTE update which
  556. * has not been done using the set_pte / clear_pte interfaces. It is used by
  557. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  558. * updates should either be sets, clears, or set_pte_atomic for P->P
  559. * transitions, which means this hook should only be called for user PTEs.
  560. * This hook implies a P->P protection or access change has taken place, which
  561. * requires a subsequent TLB flush. The notification can optionally be delayed
  562. * until the TLB flush event by using the pte_update_defer form of the
  563. * interface, but care must be taken to assure that the flush happens while
  564. * still holding the same page table lock so that the shadow and primary pages
  565. * do not become out of sync on SMP.
  566. */
  567. #define pte_update(mm, addr, ptep) do { } while (0)
  568. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  569. #endif
  570. /*
  571. * We only update the dirty/accessed state if we set
  572. * the dirty bit by hand in the kernel, since the hardware
  573. * will do the accessed bit for us, and we don't want to
  574. * race with other CPU's that might be updating the dirty
  575. * bit at the same time.
  576. */
  577. struct vm_area_struct;
  578. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  579. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  580. unsigned long address, pte_t *ptep,
  581. pte_t entry, int dirty);
  582. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  583. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  584. unsigned long addr, pte_t *ptep);
  585. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  586. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  587. unsigned long address, pte_t *ptep);
  588. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  589. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  590. pte_t *ptep)
  591. {
  592. pte_t pte = native_ptep_get_and_clear(ptep);
  593. pte_update(mm, addr, ptep);
  594. return pte;
  595. }
  596. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  597. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  598. unsigned long addr, pte_t *ptep,
  599. int full)
  600. {
  601. pte_t pte;
  602. if (full) {
  603. /*
  604. * Full address destruction in progress; paravirt does not
  605. * care about updates and native needs no locking
  606. */
  607. pte = native_local_ptep_get_and_clear(ptep);
  608. } else {
  609. pte = ptep_get_and_clear(mm, addr, ptep);
  610. }
  611. return pte;
  612. }
  613. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  614. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  615. unsigned long addr, pte_t *ptep)
  616. {
  617. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  618. pte_update(mm, addr, ptep);
  619. }
  620. #define flush_tlb_fix_spurious_fault(vma, address) do { } while (0)
  621. #define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
  622. #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
  623. extern int pmdp_set_access_flags(struct vm_area_struct *vma,
  624. unsigned long address, pmd_t *pmdp,
  625. pmd_t entry, int dirty);
  626. #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
  627. extern int pmdp_test_and_clear_young(struct vm_area_struct *vma,
  628. unsigned long addr, pmd_t *pmdp);
  629. #define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
  630. extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
  631. unsigned long address, pmd_t *pmdp);
  632. #define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
  633. extern void pmdp_splitting_flush(struct vm_area_struct *vma,
  634. unsigned long addr, pmd_t *pmdp);
  635. #define __HAVE_ARCH_PMD_WRITE
  636. static inline int pmd_write(pmd_t pmd)
  637. {
  638. return pmd_flags(pmd) & _PAGE_RW;
  639. }
  640. #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
  641. static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm, unsigned long addr,
  642. pmd_t *pmdp)
  643. {
  644. pmd_t pmd = native_pmdp_get_and_clear(pmdp);
  645. pmd_update(mm, addr, pmdp);
  646. return pmd;
  647. }
  648. #define __HAVE_ARCH_PMDP_SET_WRPROTECT
  649. static inline void pmdp_set_wrprotect(struct mm_struct *mm,
  650. unsigned long addr, pmd_t *pmdp)
  651. {
  652. clear_bit(_PAGE_BIT_RW, (unsigned long *)pmdp);
  653. pmd_update(mm, addr, pmdp);
  654. }
  655. /*
  656. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  657. *
  658. * dst - pointer to pgd range anwhere on a pgd page
  659. * src - ""
  660. * count - the number of pgds to copy.
  661. *
  662. * dst and src can be on the same page, but the range must not overlap,
  663. * and must not cross a page boundary.
  664. */
  665. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  666. {
  667. memcpy(dst, src, count * sizeof(pgd_t));
  668. }
  669. #define PTE_SHIFT ilog2(PTRS_PER_PTE)
  670. static inline int page_level_shift(enum pg_level level)
  671. {
  672. return (PAGE_SHIFT - PTE_SHIFT) + level * PTE_SHIFT;
  673. }
  674. static inline unsigned long page_level_size(enum pg_level level)
  675. {
  676. return 1UL << page_level_shift(level);
  677. }
  678. static inline unsigned long page_level_mask(enum pg_level level)
  679. {
  680. return ~(page_level_size(level) - 1);
  681. }
  682. /*
  683. * The x86 doesn't have any external MMU info: the kernel page
  684. * tables contain all the necessary information.
  685. */
  686. static inline void update_mmu_cache(struct vm_area_struct *vma,
  687. unsigned long addr, pte_t *ptep)
  688. {
  689. }
  690. static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
  691. unsigned long addr, pmd_t *pmd)
  692. {
  693. }
  694. #include <asm-generic/pgtable.h>
  695. #endif /* __ASSEMBLY__ */
  696. #endif /* _ASM_X86_PGTABLE_H */