pgtable.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. #ifndef ASM_X86__PGTABLE_H
  2. #define ASM_X86__PGTABLE_H
  3. #define FIRST_USER_ADDRESS 0
  4. #define _PAGE_BIT_PRESENT 0 /* is present */
  5. #define _PAGE_BIT_RW 1 /* writeable */
  6. #define _PAGE_BIT_USER 2 /* userspace addressable */
  7. #define _PAGE_BIT_PWT 3 /* page write through */
  8. #define _PAGE_BIT_PCD 4 /* page cache disabled */
  9. #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
  10. #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
  11. #define _PAGE_BIT_FILE 6
  12. #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
  13. #define _PAGE_BIT_PAT 7 /* on 4KB pages */
  14. #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
  15. #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
  16. #define _PAGE_BIT_UNUSED2 10
  17. #define _PAGE_BIT_UNUSED3 11
  18. #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
  19. #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
  20. #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
  21. #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
  22. #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
  23. #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
  24. #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
  25. #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
  26. #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
  27. #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
  28. #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
  29. #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
  30. #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
  31. #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
  32. #define _PAGE_UNUSED2 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED2)
  33. #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
  34. #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
  35. #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
  36. #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
  37. #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
  38. #define __HAVE_ARCH_PTE_SPECIAL
  39. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  40. #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
  41. #else
  42. #define _PAGE_NX (_AT(pteval_t, 0))
  43. #endif
  44. /* If _PAGE_PRESENT is clear, we use these: */
  45. #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
  46. * saved PTE; unset:swap */
  47. #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
  48. pte_present gives true */
  49. #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
  50. _PAGE_ACCESSED | _PAGE_DIRTY)
  51. #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
  52. _PAGE_DIRTY)
  53. /* Set of bits not changed in pte_modify */
  54. #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
  55. _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
  56. #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
  57. #define _PAGE_CACHE_WB (0)
  58. #define _PAGE_CACHE_WC (_PAGE_PWT)
  59. #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
  60. #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
  61. #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  62. #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
  63. _PAGE_ACCESSED | _PAGE_NX)
  64. #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
  65. _PAGE_USER | _PAGE_ACCESSED)
  66. #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  67. _PAGE_ACCESSED | _PAGE_NX)
  68. #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  69. _PAGE_ACCESSED)
  70. #define PAGE_COPY PAGE_COPY_NOEXEC
  71. #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  72. _PAGE_ACCESSED | _PAGE_NX)
  73. #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  74. _PAGE_ACCESSED)
  75. #define __PAGE_KERNEL_EXEC \
  76. (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
  77. #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
  78. #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
  79. #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
  80. #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
  81. #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
  82. #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
  83. #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
  84. #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
  85. #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
  86. #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
  87. #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
  88. #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
  89. #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
  90. #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
  91. #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  92. #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
  93. #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
  94. #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
  95. #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
  96. #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
  97. #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
  98. #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
  99. #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
  100. #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
  101. #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
  102. /* xwr */
  103. #define __P000 PAGE_NONE
  104. #define __P001 PAGE_READONLY
  105. #define __P010 PAGE_COPY
  106. #define __P011 PAGE_COPY
  107. #define __P100 PAGE_READONLY_EXEC
  108. #define __P101 PAGE_READONLY_EXEC
  109. #define __P110 PAGE_COPY_EXEC
  110. #define __P111 PAGE_COPY_EXEC
  111. #define __S000 PAGE_NONE
  112. #define __S001 PAGE_READONLY
  113. #define __S010 PAGE_SHARED
  114. #define __S011 PAGE_SHARED
  115. #define __S100 PAGE_READONLY_EXEC
  116. #define __S101 PAGE_READONLY_EXEC
  117. #define __S110 PAGE_SHARED_EXEC
  118. #define __S111 PAGE_SHARED_EXEC
  119. /*
  120. * early identity mapping pte attrib macros.
  121. */
  122. #ifdef CONFIG_X86_64
  123. #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
  124. #else
  125. /*
  126. * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
  127. * bits are combined, this will alow user to access the high address mapped
  128. * VDSO in the presence of CONFIG_COMPAT_VDSO
  129. */
  130. #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
  131. #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
  132. #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
  133. #endif
  134. #ifndef __ASSEMBLY__
  135. /*
  136. * ZERO_PAGE is a global shared page that is always zero: used
  137. * for zero-mapped memory areas etc..
  138. */
  139. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  140. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  141. extern spinlock_t pgd_lock;
  142. extern struct list_head pgd_list;
  143. /*
  144. * The following only work if pte_present() is true.
  145. * Undefined behaviour if not..
  146. */
  147. static inline int pte_dirty(pte_t pte)
  148. {
  149. return pte_flags(pte) & _PAGE_DIRTY;
  150. }
  151. static inline int pte_young(pte_t pte)
  152. {
  153. return pte_flags(pte) & _PAGE_ACCESSED;
  154. }
  155. static inline int pte_write(pte_t pte)
  156. {
  157. return pte_flags(pte) & _PAGE_RW;
  158. }
  159. static inline int pte_file(pte_t pte)
  160. {
  161. return pte_flags(pte) & _PAGE_FILE;
  162. }
  163. static inline int pte_huge(pte_t pte)
  164. {
  165. return pte_flags(pte) & _PAGE_PSE;
  166. }
  167. static inline int pte_global(pte_t pte)
  168. {
  169. return pte_flags(pte) & _PAGE_GLOBAL;
  170. }
  171. static inline int pte_exec(pte_t pte)
  172. {
  173. return !(pte_flags(pte) & _PAGE_NX);
  174. }
  175. static inline int pte_special(pte_t pte)
  176. {
  177. return pte_val(pte) & _PAGE_SPECIAL;
  178. }
  179. static inline unsigned long pte_pfn(pte_t pte)
  180. {
  181. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  182. }
  183. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  184. static inline int pmd_large(pmd_t pte)
  185. {
  186. return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  187. (_PAGE_PSE | _PAGE_PRESENT);
  188. }
  189. static inline pte_t pte_mkclean(pte_t pte)
  190. {
  191. return __pte(pte_val(pte) & ~_PAGE_DIRTY);
  192. }
  193. static inline pte_t pte_mkold(pte_t pte)
  194. {
  195. return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
  196. }
  197. static inline pte_t pte_wrprotect(pte_t pte)
  198. {
  199. return __pte(pte_val(pte) & ~_PAGE_RW);
  200. }
  201. static inline pte_t pte_mkexec(pte_t pte)
  202. {
  203. return __pte(pte_val(pte) & ~_PAGE_NX);
  204. }
  205. static inline pte_t pte_mkdirty(pte_t pte)
  206. {
  207. return __pte(pte_val(pte) | _PAGE_DIRTY);
  208. }
  209. static inline pte_t pte_mkyoung(pte_t pte)
  210. {
  211. return __pte(pte_val(pte) | _PAGE_ACCESSED);
  212. }
  213. static inline pte_t pte_mkwrite(pte_t pte)
  214. {
  215. return __pte(pte_val(pte) | _PAGE_RW);
  216. }
  217. static inline pte_t pte_mkhuge(pte_t pte)
  218. {
  219. return __pte(pte_val(pte) | _PAGE_PSE);
  220. }
  221. static inline pte_t pte_clrhuge(pte_t pte)
  222. {
  223. return __pte(pte_val(pte) & ~_PAGE_PSE);
  224. }
  225. static inline pte_t pte_mkglobal(pte_t pte)
  226. {
  227. return __pte(pte_val(pte) | _PAGE_GLOBAL);
  228. }
  229. static inline pte_t pte_clrglobal(pte_t pte)
  230. {
  231. return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
  232. }
  233. static inline pte_t pte_mkspecial(pte_t pte)
  234. {
  235. return __pte(pte_val(pte) | _PAGE_SPECIAL);
  236. }
  237. extern pteval_t __supported_pte_mask;
  238. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  239. {
  240. return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
  241. pgprot_val(pgprot)) & __supported_pte_mask);
  242. }
  243. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  244. {
  245. return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
  246. pgprot_val(pgprot)) & __supported_pte_mask);
  247. }
  248. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  249. {
  250. pteval_t val = pte_val(pte);
  251. /*
  252. * Chop off the NX bit (if present), and add the NX portion of
  253. * the newprot (if present):
  254. */
  255. val &= _PAGE_CHG_MASK;
  256. val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
  257. return __pte(val);
  258. }
  259. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  260. #define pgprot_modify pgprot_modify
  261. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  262. {
  263. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  264. pgprotval_t addbits = pgprot_val(newprot);
  265. return __pgprot(preservebits | addbits);
  266. }
  267. #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
  268. #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
  269. #ifndef __ASSEMBLY__
  270. #define __HAVE_PHYS_MEM_ACCESS_PROT
  271. struct file;
  272. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  273. unsigned long size, pgprot_t vma_prot);
  274. int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
  275. unsigned long size, pgprot_t *vma_prot);
  276. #endif
  277. /* Install a pte for a particular vaddr in kernel space. */
  278. void set_pte_vaddr(unsigned long vaddr, pte_t pte);
  279. #ifdef CONFIG_X86_32
  280. extern void native_pagetable_setup_start(pgd_t *base);
  281. extern void native_pagetable_setup_done(pgd_t *base);
  282. #else
  283. static inline void native_pagetable_setup_start(pgd_t *base) {}
  284. static inline void native_pagetable_setup_done(pgd_t *base) {}
  285. #endif
  286. extern int arch_report_meminfo(char *page);
  287. #ifdef CONFIG_PARAVIRT
  288. #include <asm/paravirt.h>
  289. #else /* !CONFIG_PARAVIRT */
  290. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  291. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  292. #define set_pte_present(mm, addr, ptep, pte) \
  293. native_set_pte_present(mm, addr, ptep, pte)
  294. #define set_pte_atomic(ptep, pte) \
  295. native_set_pte_atomic(ptep, pte)
  296. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  297. #ifndef __PAGETABLE_PUD_FOLDED
  298. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  299. #define pgd_clear(pgd) native_pgd_clear(pgd)
  300. #endif
  301. #ifndef set_pud
  302. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  303. #endif
  304. #ifndef __PAGETABLE_PMD_FOLDED
  305. #define pud_clear(pud) native_pud_clear(pud)
  306. #endif
  307. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  308. #define pmd_clear(pmd) native_pmd_clear(pmd)
  309. #define pte_update(mm, addr, ptep) do { } while (0)
  310. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  311. static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
  312. {
  313. native_pagetable_setup_start(base);
  314. }
  315. static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
  316. {
  317. native_pagetable_setup_done(base);
  318. }
  319. #endif /* CONFIG_PARAVIRT */
  320. #endif /* __ASSEMBLY__ */
  321. #ifdef CONFIG_X86_32
  322. # include "pgtable_32.h"
  323. #else
  324. # include "pgtable_64.h"
  325. #endif
  326. /*
  327. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  328. *
  329. * this macro returns the index of the entry in the pgd page which would
  330. * control the given virtual address
  331. */
  332. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  333. /*
  334. * pgd_offset() returns a (pgd_t *)
  335. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  336. */
  337. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  338. /*
  339. * a shortcut which implies the use of the kernel's pgd, instead
  340. * of a process's
  341. */
  342. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  343. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  344. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  345. #ifndef __ASSEMBLY__
  346. enum {
  347. PG_LEVEL_NONE,
  348. PG_LEVEL_4K,
  349. PG_LEVEL_2M,
  350. PG_LEVEL_1G,
  351. PG_LEVEL_NUM
  352. };
  353. #ifdef CONFIG_PROC_FS
  354. extern void update_page_count(int level, unsigned long pages);
  355. #else
  356. static inline void update_page_count(int level, unsigned long pages) { }
  357. #endif
  358. /*
  359. * Helper function that returns the kernel pagetable entry controlling
  360. * the virtual address 'address'. NULL means no pagetable entry present.
  361. * NOTE: the return type is pte_t but if the pmd is PSE then we return it
  362. * as a pte too.
  363. */
  364. extern pte_t *lookup_address(unsigned long address, unsigned int *level);
  365. /* local pte updates need not use xchg for locking */
  366. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  367. {
  368. pte_t res = *ptep;
  369. /* Pure native function needs no input for mm, addr */
  370. native_pte_clear(NULL, 0, ptep);
  371. return res;
  372. }
  373. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  374. pte_t *ptep , pte_t pte)
  375. {
  376. native_set_pte(ptep, pte);
  377. }
  378. #ifndef CONFIG_PARAVIRT
  379. /*
  380. * Rules for using pte_update - it must be called after any PTE update which
  381. * has not been done using the set_pte / clear_pte interfaces. It is used by
  382. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  383. * updates should either be sets, clears, or set_pte_atomic for P->P
  384. * transitions, which means this hook should only be called for user PTEs.
  385. * This hook implies a P->P protection or access change has taken place, which
  386. * requires a subsequent TLB flush. The notification can optionally be delayed
  387. * until the TLB flush event by using the pte_update_defer form of the
  388. * interface, but care must be taken to assure that the flush happens while
  389. * still holding the same page table lock so that the shadow and primary pages
  390. * do not become out of sync on SMP.
  391. */
  392. #define pte_update(mm, addr, ptep) do { } while (0)
  393. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  394. #endif
  395. /*
  396. * We only update the dirty/accessed state if we set
  397. * the dirty bit by hand in the kernel, since the hardware
  398. * will do the accessed bit for us, and we don't want to
  399. * race with other CPU's that might be updating the dirty
  400. * bit at the same time.
  401. */
  402. struct vm_area_struct;
  403. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  404. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  405. unsigned long address, pte_t *ptep,
  406. pte_t entry, int dirty);
  407. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  408. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  409. unsigned long addr, pte_t *ptep);
  410. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  411. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  412. unsigned long address, pte_t *ptep);
  413. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  414. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  415. pte_t *ptep)
  416. {
  417. pte_t pte = native_ptep_get_and_clear(ptep);
  418. pte_update(mm, addr, ptep);
  419. return pte;
  420. }
  421. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  422. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  423. unsigned long addr, pte_t *ptep,
  424. int full)
  425. {
  426. pte_t pte;
  427. if (full) {
  428. /*
  429. * Full address destruction in progress; paravirt does not
  430. * care about updates and native needs no locking
  431. */
  432. pte = native_local_ptep_get_and_clear(ptep);
  433. } else {
  434. pte = ptep_get_and_clear(mm, addr, ptep);
  435. }
  436. return pte;
  437. }
  438. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  439. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  440. unsigned long addr, pte_t *ptep)
  441. {
  442. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  443. pte_update(mm, addr, ptep);
  444. }
  445. /*
  446. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  447. *
  448. * dst - pointer to pgd range anwhere on a pgd page
  449. * src - ""
  450. * count - the number of pgds to copy.
  451. *
  452. * dst and src can be on the same page, but the range must not overlap,
  453. * and must not cross a page boundary.
  454. */
  455. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  456. {
  457. memcpy(dst, src, count * sizeof(pgd_t));
  458. }
  459. #include <asm-generic/pgtable.h>
  460. #endif /* __ASSEMBLY__ */
  461. #endif /* ASM_X86__PGTABLE_H */