pgtable.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562
  1. #ifndef _ASM_X86_PGTABLE_H
  2. #define _ASM_X86_PGTABLE_H
  3. #define FIRST_USER_ADDRESS 0
  4. #define _PAGE_BIT_PRESENT 0 /* is present */
  5. #define _PAGE_BIT_RW 1 /* writeable */
  6. #define _PAGE_BIT_USER 2 /* userspace addressable */
  7. #define _PAGE_BIT_PWT 3 /* page write through */
  8. #define _PAGE_BIT_PCD 4 /* page cache disabled */
  9. #define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
  10. #define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
  11. #define _PAGE_BIT_FILE 6
  12. #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
  13. #define _PAGE_BIT_PAT 7 /* on 4KB pages */
  14. #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
  15. #define _PAGE_BIT_UNUSED1 9 /* available for programmer */
  16. #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
  17. #define _PAGE_BIT_UNUSED3 11
  18. #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
  19. #define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
  20. #define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
  21. #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
  22. #define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
  23. #define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
  24. #define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
  25. #define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
  26. #define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
  27. #define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
  28. #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
  29. #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
  30. #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
  31. #define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
  32. #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
  33. #define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
  34. #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
  35. #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
  36. #define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
  37. #define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
  38. #define __HAVE_ARCH_PTE_SPECIAL
  39. #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
  40. #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
  41. #else
  42. #define _PAGE_NX (_AT(pteval_t, 0))
  43. #endif
  44. /* If _PAGE_PRESENT is clear, we use these: */
  45. #define _PAGE_FILE _PAGE_DIRTY /* nonlinear file mapping,
  46. * saved PTE; unset:swap */
  47. #define _PAGE_PROTNONE _PAGE_PSE /* if the user mapped it with PROT_NONE;
  48. pte_present gives true */
  49. #define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
  50. _PAGE_ACCESSED | _PAGE_DIRTY)
  51. #define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | \
  52. _PAGE_DIRTY)
  53. /* Set of bits not changed in pte_modify */
  54. #define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
  55. _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY)
  56. #define _PAGE_CACHE_MASK (_PAGE_PCD | _PAGE_PWT)
  57. #define _PAGE_CACHE_WB (0)
  58. #define _PAGE_CACHE_WC (_PAGE_PWT)
  59. #define _PAGE_CACHE_UC_MINUS (_PAGE_PCD)
  60. #define _PAGE_CACHE_UC (_PAGE_PCD | _PAGE_PWT)
  61. #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
  62. #define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | \
  63. _PAGE_ACCESSED | _PAGE_NX)
  64. #define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | \
  65. _PAGE_USER | _PAGE_ACCESSED)
  66. #define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  67. _PAGE_ACCESSED | _PAGE_NX)
  68. #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  69. _PAGE_ACCESSED)
  70. #define PAGE_COPY PAGE_COPY_NOEXEC
  71. #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  72. _PAGE_ACCESSED | _PAGE_NX)
  73. #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
  74. _PAGE_ACCESSED)
  75. #define __PAGE_KERNEL_EXEC \
  76. (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
  77. #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
  78. #define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
  79. #define __PAGE_KERNEL_RX (__PAGE_KERNEL_EXEC & ~_PAGE_RW)
  80. #define __PAGE_KERNEL_EXEC_NOCACHE (__PAGE_KERNEL_EXEC | _PAGE_PCD | _PAGE_PWT)
  81. #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
  82. #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
  83. #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
  84. #define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
  85. #define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
  86. #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
  87. #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
  88. #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
  89. #define __PAGE_KERNEL_IO (__PAGE_KERNEL | _PAGE_IOMAP)
  90. #define __PAGE_KERNEL_IO_NOCACHE (__PAGE_KERNEL_NOCACHE | _PAGE_IOMAP)
  91. #define __PAGE_KERNEL_IO_UC_MINUS (__PAGE_KERNEL_UC_MINUS | _PAGE_IOMAP)
  92. #define __PAGE_KERNEL_IO_WC (__PAGE_KERNEL_WC | _PAGE_IOMAP)
  93. #define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
  94. #define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
  95. #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
  96. #define PAGE_KERNEL_RX __pgprot(__PAGE_KERNEL_RX)
  97. #define PAGE_KERNEL_WC __pgprot(__PAGE_KERNEL_WC)
  98. #define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
  99. #define PAGE_KERNEL_UC_MINUS __pgprot(__PAGE_KERNEL_UC_MINUS)
  100. #define PAGE_KERNEL_EXEC_NOCACHE __pgprot(__PAGE_KERNEL_EXEC_NOCACHE)
  101. #define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
  102. #define PAGE_KERNEL_LARGE_NOCACHE __pgprot(__PAGE_KERNEL_LARGE_NOCACHE)
  103. #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
  104. #define PAGE_KERNEL_VSYSCALL __pgprot(__PAGE_KERNEL_VSYSCALL)
  105. #define PAGE_KERNEL_VSYSCALL_NOCACHE __pgprot(__PAGE_KERNEL_VSYSCALL_NOCACHE)
  106. #define PAGE_KERNEL_IO __pgprot(__PAGE_KERNEL_IO)
  107. #define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
  108. #define PAGE_KERNEL_IO_UC_MINUS __pgprot(__PAGE_KERNEL_IO_UC_MINUS)
  109. #define PAGE_KERNEL_IO_WC __pgprot(__PAGE_KERNEL_IO_WC)
  110. /* xwr */
  111. #define __P000 PAGE_NONE
  112. #define __P001 PAGE_READONLY
  113. #define __P010 PAGE_COPY
  114. #define __P011 PAGE_COPY
  115. #define __P100 PAGE_READONLY_EXEC
  116. #define __P101 PAGE_READONLY_EXEC
  117. #define __P110 PAGE_COPY_EXEC
  118. #define __P111 PAGE_COPY_EXEC
  119. #define __S000 PAGE_NONE
  120. #define __S001 PAGE_READONLY
  121. #define __S010 PAGE_SHARED
  122. #define __S011 PAGE_SHARED
  123. #define __S100 PAGE_READONLY_EXEC
  124. #define __S101 PAGE_READONLY_EXEC
  125. #define __S110 PAGE_SHARED_EXEC
  126. #define __S111 PAGE_SHARED_EXEC
  127. /*
  128. * early identity mapping pte attrib macros.
  129. */
  130. #ifdef CONFIG_X86_64
  131. #define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
  132. #else
  133. /*
  134. * For PDE_IDENT_ATTR include USER bit. As the PDE and PTE protection
  135. * bits are combined, this will alow user to access the high address mapped
  136. * VDSO in the presence of CONFIG_COMPAT_VDSO
  137. */
  138. #define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
  139. #define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
  140. #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
  141. #endif
  142. #ifndef __ASSEMBLY__
  143. /*
  144. * ZERO_PAGE is a global shared page that is always zero: used
  145. * for zero-mapped memory areas etc..
  146. */
  147. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  148. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  149. extern spinlock_t pgd_lock;
  150. extern struct list_head pgd_list;
  151. /*
  152. * The following only work if pte_present() is true.
  153. * Undefined behaviour if not..
  154. */
  155. static inline int pte_dirty(pte_t pte)
  156. {
  157. return pte_flags(pte) & _PAGE_DIRTY;
  158. }
  159. static inline int pte_young(pte_t pte)
  160. {
  161. return pte_flags(pte) & _PAGE_ACCESSED;
  162. }
  163. static inline int pte_write(pte_t pte)
  164. {
  165. return pte_flags(pte) & _PAGE_RW;
  166. }
  167. static inline int pte_file(pte_t pte)
  168. {
  169. return pte_flags(pte) & _PAGE_FILE;
  170. }
  171. static inline int pte_huge(pte_t pte)
  172. {
  173. return pte_flags(pte) & _PAGE_PSE;
  174. }
  175. static inline int pte_global(pte_t pte)
  176. {
  177. return pte_flags(pte) & _PAGE_GLOBAL;
  178. }
  179. static inline int pte_exec(pte_t pte)
  180. {
  181. return !(pte_flags(pte) & _PAGE_NX);
  182. }
  183. static inline int pte_special(pte_t pte)
  184. {
  185. return pte_flags(pte) & _PAGE_SPECIAL;
  186. }
  187. static inline unsigned long pte_pfn(pte_t pte)
  188. {
  189. return (pte_val(pte) & PTE_PFN_MASK) >> PAGE_SHIFT;
  190. }
  191. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  192. static inline int pmd_large(pmd_t pte)
  193. {
  194. return (pmd_val(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
  195. (_PAGE_PSE | _PAGE_PRESENT);
  196. }
  197. static inline pte_t pte_mkclean(pte_t pte)
  198. {
  199. return __pte(pte_val(pte) & ~_PAGE_DIRTY);
  200. }
  201. static inline pte_t pte_mkold(pte_t pte)
  202. {
  203. return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
  204. }
  205. static inline pte_t pte_wrprotect(pte_t pte)
  206. {
  207. return __pte(pte_val(pte) & ~_PAGE_RW);
  208. }
  209. static inline pte_t pte_mkexec(pte_t pte)
  210. {
  211. return __pte(pte_val(pte) & ~_PAGE_NX);
  212. }
  213. static inline pte_t pte_mkdirty(pte_t pte)
  214. {
  215. return __pte(pte_val(pte) | _PAGE_DIRTY);
  216. }
  217. static inline pte_t pte_mkyoung(pte_t pte)
  218. {
  219. return __pte(pte_val(pte) | _PAGE_ACCESSED);
  220. }
  221. static inline pte_t pte_mkwrite(pte_t pte)
  222. {
  223. return __pte(pte_val(pte) | _PAGE_RW);
  224. }
  225. static inline pte_t pte_mkhuge(pte_t pte)
  226. {
  227. return __pte(pte_val(pte) | _PAGE_PSE);
  228. }
  229. static inline pte_t pte_clrhuge(pte_t pte)
  230. {
  231. return __pte(pte_val(pte) & ~_PAGE_PSE);
  232. }
  233. static inline pte_t pte_mkglobal(pte_t pte)
  234. {
  235. return __pte(pte_val(pte) | _PAGE_GLOBAL);
  236. }
  237. static inline pte_t pte_clrglobal(pte_t pte)
  238. {
  239. return __pte(pte_val(pte) & ~_PAGE_GLOBAL);
  240. }
  241. static inline pte_t pte_mkspecial(pte_t pte)
  242. {
  243. return __pte(pte_val(pte) | _PAGE_SPECIAL);
  244. }
  245. extern pteval_t __supported_pte_mask;
  246. static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
  247. {
  248. return __pte((((phys_addr_t)page_nr << PAGE_SHIFT) |
  249. pgprot_val(pgprot)) & __supported_pte_mask);
  250. }
  251. static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
  252. {
  253. return __pmd((((phys_addr_t)page_nr << PAGE_SHIFT) |
  254. pgprot_val(pgprot)) & __supported_pte_mask);
  255. }
  256. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  257. {
  258. pteval_t val = pte_val(pte);
  259. /*
  260. * Chop off the NX bit (if present), and add the NX portion of
  261. * the newprot (if present):
  262. */
  263. val &= _PAGE_CHG_MASK;
  264. val |= pgprot_val(newprot) & (~_PAGE_CHG_MASK) & __supported_pte_mask;
  265. return __pte(val);
  266. }
  267. /* mprotect needs to preserve PAT bits when updating vm_page_prot */
  268. #define pgprot_modify pgprot_modify
  269. static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
  270. {
  271. pgprotval_t preservebits = pgprot_val(oldprot) & _PAGE_CHG_MASK;
  272. pgprotval_t addbits = pgprot_val(newprot);
  273. return __pgprot(preservebits | addbits);
  274. }
  275. #define pte_pgprot(x) __pgprot(pte_flags(x) & PTE_FLAGS_MASK)
  276. #define canon_pgprot(p) __pgprot(pgprot_val(p) & __supported_pte_mask)
  277. #ifndef __ASSEMBLY__
  278. #define __HAVE_PHYS_MEM_ACCESS_PROT
  279. struct file;
  280. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  281. unsigned long size, pgprot_t vma_prot);
  282. int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
  283. unsigned long size, pgprot_t *vma_prot);
  284. #endif
  285. /* Install a pte for a particular vaddr in kernel space. */
  286. void set_pte_vaddr(unsigned long vaddr, pte_t pte);
  287. #ifdef CONFIG_X86_32
  288. extern void native_pagetable_setup_start(pgd_t *base);
  289. extern void native_pagetable_setup_done(pgd_t *base);
  290. #else
  291. static inline void native_pagetable_setup_start(pgd_t *base) {}
  292. static inline void native_pagetable_setup_done(pgd_t *base) {}
  293. #endif
  294. struct seq_file;
  295. extern void arch_report_meminfo(struct seq_file *m);
  296. #ifdef CONFIG_PARAVIRT
  297. #include <asm/paravirt.h>
  298. #else /* !CONFIG_PARAVIRT */
  299. #define set_pte(ptep, pte) native_set_pte(ptep, pte)
  300. #define set_pte_at(mm, addr, ptep, pte) native_set_pte_at(mm, addr, ptep, pte)
  301. #define set_pte_present(mm, addr, ptep, pte) \
  302. native_set_pte_present(mm, addr, ptep, pte)
  303. #define set_pte_atomic(ptep, pte) \
  304. native_set_pte_atomic(ptep, pte)
  305. #define set_pmd(pmdp, pmd) native_set_pmd(pmdp, pmd)
  306. #ifndef __PAGETABLE_PUD_FOLDED
  307. #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
  308. #define pgd_clear(pgd) native_pgd_clear(pgd)
  309. #endif
  310. #ifndef set_pud
  311. # define set_pud(pudp, pud) native_set_pud(pudp, pud)
  312. #endif
  313. #ifndef __PAGETABLE_PMD_FOLDED
  314. #define pud_clear(pud) native_pud_clear(pud)
  315. #endif
  316. #define pte_clear(mm, addr, ptep) native_pte_clear(mm, addr, ptep)
  317. #define pmd_clear(pmd) native_pmd_clear(pmd)
  318. #define pte_update(mm, addr, ptep) do { } while (0)
  319. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  320. static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
  321. {
  322. native_pagetable_setup_start(base);
  323. }
  324. static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
  325. {
  326. native_pagetable_setup_done(base);
  327. }
  328. #endif /* CONFIG_PARAVIRT */
  329. #endif /* __ASSEMBLY__ */
  330. #ifdef CONFIG_X86_32
  331. # include "pgtable_32.h"
  332. #else
  333. # include "pgtable_64.h"
  334. #endif
  335. /*
  336. * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
  337. *
  338. * this macro returns the index of the entry in the pgd page which would
  339. * control the given virtual address
  340. */
  341. #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
  342. /*
  343. * pgd_offset() returns a (pgd_t *)
  344. * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
  345. */
  346. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
  347. /*
  348. * a shortcut which implies the use of the kernel's pgd, instead
  349. * of a process's
  350. */
  351. #define pgd_offset_k(address) pgd_offset(&init_mm, (address))
  352. #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
  353. #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
  354. #ifndef __ASSEMBLY__
  355. enum {
  356. PG_LEVEL_NONE,
  357. PG_LEVEL_4K,
  358. PG_LEVEL_2M,
  359. PG_LEVEL_1G,
  360. PG_LEVEL_NUM
  361. };
  362. #ifdef CONFIG_PROC_FS
  363. extern void update_page_count(int level, unsigned long pages);
  364. #else
  365. static inline void update_page_count(int level, unsigned long pages) { }
  366. #endif
  367. /*
  368. * Helper function that returns the kernel pagetable entry controlling
  369. * the virtual address 'address'. NULL means no pagetable entry present.
  370. * NOTE: the return type is pte_t but if the pmd is PSE then we return it
  371. * as a pte too.
  372. */
  373. extern pte_t *lookup_address(unsigned long address, unsigned int *level);
  374. /* local pte updates need not use xchg for locking */
  375. static inline pte_t native_local_ptep_get_and_clear(pte_t *ptep)
  376. {
  377. pte_t res = *ptep;
  378. /* Pure native function needs no input for mm, addr */
  379. native_pte_clear(NULL, 0, ptep);
  380. return res;
  381. }
  382. static inline void native_set_pte_at(struct mm_struct *mm, unsigned long addr,
  383. pte_t *ptep , pte_t pte)
  384. {
  385. native_set_pte(ptep, pte);
  386. }
  387. #ifndef CONFIG_PARAVIRT
  388. /*
  389. * Rules for using pte_update - it must be called after any PTE update which
  390. * has not been done using the set_pte / clear_pte interfaces. It is used by
  391. * shadow mode hypervisors to resynchronize the shadow page tables. Kernel PTE
  392. * updates should either be sets, clears, or set_pte_atomic for P->P
  393. * transitions, which means this hook should only be called for user PTEs.
  394. * This hook implies a P->P protection or access change has taken place, which
  395. * requires a subsequent TLB flush. The notification can optionally be delayed
  396. * until the TLB flush event by using the pte_update_defer form of the
  397. * interface, but care must be taken to assure that the flush happens while
  398. * still holding the same page table lock so that the shadow and primary pages
  399. * do not become out of sync on SMP.
  400. */
  401. #define pte_update(mm, addr, ptep) do { } while (0)
  402. #define pte_update_defer(mm, addr, ptep) do { } while (0)
  403. #endif
  404. /*
  405. * We only update the dirty/accessed state if we set
  406. * the dirty bit by hand in the kernel, since the hardware
  407. * will do the accessed bit for us, and we don't want to
  408. * race with other CPU's that might be updating the dirty
  409. * bit at the same time.
  410. */
  411. struct vm_area_struct;
  412. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  413. extern int ptep_set_access_flags(struct vm_area_struct *vma,
  414. unsigned long address, pte_t *ptep,
  415. pte_t entry, int dirty);
  416. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  417. extern int ptep_test_and_clear_young(struct vm_area_struct *vma,
  418. unsigned long addr, pte_t *ptep);
  419. #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
  420. extern int ptep_clear_flush_young(struct vm_area_struct *vma,
  421. unsigned long address, pte_t *ptep);
  422. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  423. static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
  424. pte_t *ptep)
  425. {
  426. pte_t pte = native_ptep_get_and_clear(ptep);
  427. pte_update(mm, addr, ptep);
  428. return pte;
  429. }
  430. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
  431. static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
  432. unsigned long addr, pte_t *ptep,
  433. int full)
  434. {
  435. pte_t pte;
  436. if (full) {
  437. /*
  438. * Full address destruction in progress; paravirt does not
  439. * care about updates and native needs no locking
  440. */
  441. pte = native_local_ptep_get_and_clear(ptep);
  442. } else {
  443. pte = ptep_get_and_clear(mm, addr, ptep);
  444. }
  445. return pte;
  446. }
  447. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  448. static inline void ptep_set_wrprotect(struct mm_struct *mm,
  449. unsigned long addr, pte_t *ptep)
  450. {
  451. clear_bit(_PAGE_BIT_RW, (unsigned long *)&ptep->pte);
  452. pte_update(mm, addr, ptep);
  453. }
  454. /*
  455. * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
  456. *
  457. * dst - pointer to pgd range anwhere on a pgd page
  458. * src - ""
  459. * count - the number of pgds to copy.
  460. *
  461. * dst and src can be on the same page, but the range must not overlap,
  462. * and must not cross a page boundary.
  463. */
  464. static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
  465. {
  466. memcpy(dst, src, count * sizeof(pgd_t));
  467. }
  468. #include <asm-generic/pgtable.h>
  469. #endif /* __ASSEMBLY__ */
  470. #endif /* _ASM_X86_PGTABLE_H */