pgtable.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. #ifndef _ASM_IA64_PGTABLE_H
  2. #define _ASM_IA64_PGTABLE_H
  3. /*
  4. * This file contains the functions and defines necessary to modify and use
  5. * the IA-64 page table tree.
  6. *
  7. * This hopefully works with any (fixed) IA-64 page-size, as defined
  8. * in <asm/page.h>.
  9. *
  10. * Copyright (C) 1998-2005 Hewlett-Packard Co
  11. * David Mosberger-Tang <davidm@hpl.hp.com>
  12. */
  13. #include <linux/config.h>
  14. #include <asm/mman.h>
  15. #include <asm/page.h>
  16. #include <asm/processor.h>
  17. #include <asm/system.h>
  18. #include <asm/types.h>
  19. #define IA64_MAX_PHYS_BITS 50 /* max. number of physical address bits (architected) */
  20. /*
  21. * First, define the various bits in a PTE. Note that the PTE format
  22. * matches the VHPT short format, the firt doubleword of the VHPD long
  23. * format, and the first doubleword of the TLB insertion format.
  24. */
  25. #define _PAGE_P_BIT 0
  26. #define _PAGE_A_BIT 5
  27. #define _PAGE_D_BIT 6
  28. #define _PAGE_P (1 << _PAGE_P_BIT) /* page present bit */
  29. #define _PAGE_MA_WB (0x0 << 2) /* write back memory attribute */
  30. #define _PAGE_MA_UC (0x4 << 2) /* uncacheable memory attribute */
  31. #define _PAGE_MA_UCE (0x5 << 2) /* UC exported attribute */
  32. #define _PAGE_MA_WC (0x6 << 2) /* write coalescing memory attribute */
  33. #define _PAGE_MA_NAT (0x7 << 2) /* not-a-thing attribute */
  34. #define _PAGE_MA_MASK (0x7 << 2)
  35. #define _PAGE_PL_0 (0 << 7) /* privilege level 0 (kernel) */
  36. #define _PAGE_PL_1 (1 << 7) /* privilege level 1 (unused) */
  37. #define _PAGE_PL_2 (2 << 7) /* privilege level 2 (unused) */
  38. #define _PAGE_PL_3 (3 << 7) /* privilege level 3 (user) */
  39. #define _PAGE_PL_MASK (3 << 7)
  40. #define _PAGE_AR_R (0 << 9) /* read only */
  41. #define _PAGE_AR_RX (1 << 9) /* read & execute */
  42. #define _PAGE_AR_RW (2 << 9) /* read & write */
  43. #define _PAGE_AR_RWX (3 << 9) /* read, write & execute */
  44. #define _PAGE_AR_R_RW (4 << 9) /* read / read & write */
  45. #define _PAGE_AR_RX_RWX (5 << 9) /* read & exec / read, write & exec */
  46. #define _PAGE_AR_RWX_RW (6 << 9) /* read, write & exec / read & write */
  47. #define _PAGE_AR_X_RX (7 << 9) /* exec & promote / read & exec */
  48. #define _PAGE_AR_MASK (7 << 9)
  49. #define _PAGE_AR_SHIFT 9
  50. #define _PAGE_A (1 << _PAGE_A_BIT) /* page accessed bit */
  51. #define _PAGE_D (1 << _PAGE_D_BIT) /* page dirty bit */
  52. #define _PAGE_PPN_MASK (((__IA64_UL(1) << IA64_MAX_PHYS_BITS) - 1) & ~0xfffUL)
  53. #define _PAGE_ED (__IA64_UL(1) << 52) /* exception deferral */
  54. #define _PAGE_PROTNONE (__IA64_UL(1) << 63)
  55. /* Valid only for a PTE with the present bit cleared: */
  56. #define _PAGE_FILE (1 << 1) /* see swap & file pte remarks below */
  57. #define _PFN_MASK _PAGE_PPN_MASK
  58. /* Mask of bits which may be changed by pte_modify(); the odd bits are there for _PAGE_PROTNONE */
  59. #define _PAGE_CHG_MASK (_PAGE_P | _PAGE_PROTNONE | _PAGE_PL_MASK | _PAGE_AR_MASK | _PAGE_ED)
  60. #define _PAGE_SIZE_4K 12
  61. #define _PAGE_SIZE_8K 13
  62. #define _PAGE_SIZE_16K 14
  63. #define _PAGE_SIZE_64K 16
  64. #define _PAGE_SIZE_256K 18
  65. #define _PAGE_SIZE_1M 20
  66. #define _PAGE_SIZE_4M 22
  67. #define _PAGE_SIZE_16M 24
  68. #define _PAGE_SIZE_64M 26
  69. #define _PAGE_SIZE_256M 28
  70. #define _PAGE_SIZE_1G 30
  71. #define _PAGE_SIZE_4G 32
  72. #define __ACCESS_BITS _PAGE_ED | _PAGE_A | _PAGE_P | _PAGE_MA_WB
  73. #define __DIRTY_BITS_NO_ED _PAGE_A | _PAGE_P | _PAGE_D | _PAGE_MA_WB
  74. #define __DIRTY_BITS _PAGE_ED | __DIRTY_BITS_NO_ED
  75. /*
  76. * Definitions for first level:
  77. *
  78. * PGDIR_SHIFT determines what a first-level page table entry can map.
  79. */
  80. #define PGDIR_SHIFT (PAGE_SHIFT + 2*(PAGE_SHIFT-3))
  81. #define PGDIR_SIZE (__IA64_UL(1) << PGDIR_SHIFT)
  82. #define PGDIR_MASK (~(PGDIR_SIZE-1))
  83. #define PTRS_PER_PGD (1UL << (PAGE_SHIFT-3))
  84. #define USER_PTRS_PER_PGD (5*PTRS_PER_PGD/8) /* regions 0-4 are user regions */
  85. #define FIRST_USER_ADDRESS 0
  86. /*
  87. * Definitions for second level:
  88. *
  89. * PMD_SHIFT determines the size of the area a second-level page table
  90. * can map.
  91. */
  92. #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3))
  93. #define PMD_SIZE (1UL << PMD_SHIFT)
  94. #define PMD_MASK (~(PMD_SIZE-1))
  95. #define PTRS_PER_PMD (1UL << (PAGE_SHIFT-3))
  96. /*
  97. * Definitions for third level:
  98. */
  99. #define PTRS_PER_PTE (__IA64_UL(1) << (PAGE_SHIFT-3))
  100. /*
  101. * All the normal masks have the "page accessed" bits on, as any time
  102. * they are used, the page is accessed. They are cleared only by the
  103. * page-out routines.
  104. */
  105. #define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_A)
  106. #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
  107. #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
  108. #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
  109. #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
  110. #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
  111. #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
  112. #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
  113. # ifndef __ASSEMBLY__
  114. #include <asm/bitops.h>
  115. #include <asm/cacheflush.h>
  116. #include <asm/mmu_context.h>
  117. #include <asm/processor.h>
  118. /*
  119. * Next come the mappings that determine how mmap() protection bits
  120. * (PROT_EXEC, PROT_READ, PROT_WRITE, PROT_NONE) get implemented. The
  121. * _P version gets used for a private shared memory segment, the _S
  122. * version gets used for a shared memory segment with MAP_SHARED on.
  123. * In a private shared memory segment, we do a copy-on-write if a task
  124. * attempts to write to the page.
  125. */
  126. /* xwr */
  127. #define __P000 PAGE_NONE
  128. #define __P001 PAGE_READONLY
  129. #define __P010 PAGE_READONLY /* write to priv pg -> copy & make writable */
  130. #define __P011 PAGE_READONLY /* ditto */
  131. #define __P100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
  132. #define __P101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
  133. #define __P110 PAGE_COPY_EXEC
  134. #define __P111 PAGE_COPY_EXEC
  135. #define __S000 PAGE_NONE
  136. #define __S001 PAGE_READONLY
  137. #define __S010 PAGE_SHARED /* we don't have (and don't need) write-only */
  138. #define __S011 PAGE_SHARED
  139. #define __S100 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_X_RX)
  140. #define __S101 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
  141. #define __S110 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
  142. #define __S111 __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RWX)
  143. #define pgd_ERROR(e) printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
  144. #define pmd_ERROR(e) printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
  145. #define pte_ERROR(e) printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
  146. /*
  147. * Some definitions to translate between mem_map, PTEs, and page addresses:
  148. */
  149. /* Quick test to see if ADDR is a (potentially) valid physical address. */
  150. static inline long
  151. ia64_phys_addr_valid (unsigned long addr)
  152. {
  153. return (addr & (local_cpu_data->unimpl_pa_mask)) == 0;
  154. }
  155. /*
  156. * kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
  157. * memory. For the return value to be meaningful, ADDR must be >=
  158. * PAGE_OFFSET. This operation can be relatively expensive (e.g.,
  159. * require a hash-, or multi-level tree-lookup or something of that
  160. * sort) but it guarantees to return TRUE only if accessing the page
  161. * at that address does not cause an error. Note that there may be
  162. * addresses for which kern_addr_valid() returns FALSE even though an
  163. * access would not cause an error (e.g., this is typically true for
  164. * memory mapped I/O regions.
  165. *
  166. * XXX Need to implement this for IA-64.
  167. */
  168. #define kern_addr_valid(addr) (1)
  169. /*
  170. * Now come the defines and routines to manage and access the three-level
  171. * page table.
  172. */
  173. /*
  174. * On some architectures, special things need to be done when setting
  175. * the PTE in a page table. Nothing special needs to be on IA-64.
  176. */
  177. #define set_pte(ptep, pteval) (*(ptep) = (pteval))
  178. #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  179. #define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
  180. #ifdef CONFIG_VIRTUAL_MEM_MAP
  181. # define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
  182. # define VMALLOC_END vmalloc_end
  183. extern unsigned long vmalloc_end;
  184. #else
  185. # define VMALLOC_END (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
  186. #endif
  187. /* fs/proc/kcore.c */
  188. #define kc_vaddr_to_offset(v) ((v) - RGN_BASE(RGN_GATE))
  189. #define kc_offset_to_vaddr(o) ((o) + RGN_BASE(RGN_GATE))
  190. /*
  191. * Conversion functions: convert page frame number (pfn) and a protection value to a page
  192. * table entry (pte).
  193. */
  194. #define pfn_pte(pfn, pgprot) \
  195. ({ pte_t __pte; pte_val(__pte) = ((pfn) << PAGE_SHIFT) | pgprot_val(pgprot); __pte; })
  196. /* Extract pfn from pte. */
  197. #define pte_pfn(_pte) ((pte_val(_pte) & _PFN_MASK) >> PAGE_SHIFT)
  198. #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
  199. /* This takes a physical page address that is used by the remapping functions */
  200. #define mk_pte_phys(physpage, pgprot) \
  201. ({ pte_t __pte; pte_val(__pte) = physpage + pgprot_val(pgprot); __pte; })
  202. #define pte_modify(_pte, newprot) \
  203. (__pte((pte_val(_pte) & ~_PAGE_CHG_MASK) | (pgprot_val(newprot) & _PAGE_CHG_MASK)))
  204. #define page_pte_prot(page,prot) mk_pte(page, prot)
  205. #define page_pte(page) page_pte_prot(page, __pgprot(0))
  206. #define pte_none(pte) (!pte_val(pte))
  207. #define pte_present(pte) (pte_val(pte) & (_PAGE_P | _PAGE_PROTNONE))
  208. #define pte_clear(mm,addr,pte) (pte_val(*(pte)) = 0UL)
  209. /* pte_page() returns the "struct page *" corresponding to the PTE: */
  210. #define pte_page(pte) virt_to_page(((pte_val(pte) & _PFN_MASK) + PAGE_OFFSET))
  211. #define pmd_none(pmd) (!pmd_val(pmd))
  212. #define pmd_bad(pmd) (!ia64_phys_addr_valid(pmd_val(pmd)))
  213. #define pmd_present(pmd) (pmd_val(pmd) != 0UL)
  214. #define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0UL)
  215. #define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & _PFN_MASK))
  216. #define pmd_page(pmd) virt_to_page((pmd_val(pmd) + PAGE_OFFSET))
  217. #define pud_none(pud) (!pud_val(pud))
  218. #define pud_bad(pud) (!ia64_phys_addr_valid(pud_val(pud)))
  219. #define pud_present(pud) (pud_val(pud) != 0UL)
  220. #define pud_clear(pudp) (pud_val(*(pudp)) = 0UL)
  221. #define pud_page(pud) ((unsigned long) __va(pud_val(pud) & _PFN_MASK))
  222. /*
  223. * The following have defined behavior only work if pte_present() is true.
  224. */
  225. #define pte_user(pte) ((pte_val(pte) & _PAGE_PL_MASK) == _PAGE_PL_3)
  226. #define pte_read(pte) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) < 6)
  227. #define pte_write(pte) ((unsigned) (((pte_val(pte) & _PAGE_AR_MASK) >> _PAGE_AR_SHIFT) - 2) <= 4)
  228. #define pte_exec(pte) ((pte_val(pte) & _PAGE_AR_RX) != 0)
  229. #define pte_dirty(pte) ((pte_val(pte) & _PAGE_D) != 0)
  230. #define pte_young(pte) ((pte_val(pte) & _PAGE_A) != 0)
  231. #define pte_file(pte) ((pte_val(pte) & _PAGE_FILE) != 0)
  232. /*
  233. * Note: we convert AR_RWX to AR_RX and AR_RW to AR_R by clearing the 2nd bit in the
  234. * access rights:
  235. */
  236. #define pte_wrprotect(pte) (__pte(pte_val(pte) & ~_PAGE_AR_RW))
  237. #define pte_mkwrite(pte) (__pte(pte_val(pte) | _PAGE_AR_RW))
  238. #define pte_mkexec(pte) (__pte(pte_val(pte) | _PAGE_AR_RX))
  239. #define pte_mkold(pte) (__pte(pte_val(pte) & ~_PAGE_A))
  240. #define pte_mkyoung(pte) (__pte(pte_val(pte) | _PAGE_A))
  241. #define pte_mkclean(pte) (__pte(pte_val(pte) & ~_PAGE_D))
  242. #define pte_mkdirty(pte) (__pte(pte_val(pte) | _PAGE_D))
  243. #define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_P))
  244. /*
  245. * Macro to a page protection value as "uncacheable". Note that "protection" is really a
  246. * misnomer here as the protection value contains the memory attribute bits, dirty bits,
  247. * and various other bits as well.
  248. */
  249. #define pgprot_noncached(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_UC)
  250. /*
  251. * Macro to make mark a page protection value as "write-combining".
  252. * Note that "protection" is really a misnomer here as the protection
  253. * value contains the memory attribute bits, dirty bits, and various
  254. * other bits as well. Accesses through a write-combining translation
  255. * works bypasses the caches, but does allow for consecutive writes to
  256. * be combined into single (but larger) write transactions.
  257. */
  258. #define pgprot_writecombine(prot) __pgprot((pgprot_val(prot) & ~_PAGE_MA_MASK) | _PAGE_MA_WC)
  259. static inline unsigned long
  260. pgd_index (unsigned long address)
  261. {
  262. unsigned long region = address >> 61;
  263. unsigned long l1index = (address >> PGDIR_SHIFT) & ((PTRS_PER_PGD >> 3) - 1);
  264. return (region << (PAGE_SHIFT - 6)) | l1index;
  265. }
  266. /* The offset in the 1-level directory is given by the 3 region bits
  267. (61..63) and the level-1 bits. */
  268. static inline pgd_t*
  269. pgd_offset (struct mm_struct *mm, unsigned long address)
  270. {
  271. return mm->pgd + pgd_index(address);
  272. }
  273. /* In the kernel's mapped region we completely ignore the region number
  274. (since we know it's in region number 5). */
  275. #define pgd_offset_k(addr) \
  276. (init_mm.pgd + (((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1)))
  277. /* Look up a pgd entry in the gate area. On IA-64, the gate-area
  278. resides in the kernel-mapped segment, hence we use pgd_offset_k()
  279. here. */
  280. #define pgd_offset_gate(mm, addr) pgd_offset_k(addr)
  281. /* Find an entry in the second-level page table.. */
  282. #define pmd_offset(dir,addr) \
  283. ((pmd_t *) pud_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
  284. /*
  285. * Find an entry in the third-level page table. This looks more complicated than it
  286. * should be because some platforms place page tables in high memory.
  287. */
  288. #define pte_index(addr) (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
  289. #define pte_offset_kernel(dir,addr) ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(addr))
  290. #define pte_offset_map(dir,addr) pte_offset_kernel(dir, addr)
  291. #define pte_offset_map_nested(dir,addr) pte_offset_map(dir, addr)
  292. #define pte_unmap(pte) do { } while (0)
  293. #define pte_unmap_nested(pte) do { } while (0)
  294. /* atomic versions of the some PTE manipulations: */
  295. static inline int
  296. ptep_test_and_clear_young (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
  297. {
  298. #ifdef CONFIG_SMP
  299. if (!pte_young(*ptep))
  300. return 0;
  301. return test_and_clear_bit(_PAGE_A_BIT, ptep);
  302. #else
  303. pte_t pte = *ptep;
  304. if (!pte_young(pte))
  305. return 0;
  306. set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
  307. return 1;
  308. #endif
  309. }
  310. static inline int
  311. ptep_test_and_clear_dirty (struct vm_area_struct *vma, unsigned long addr, pte_t *ptep)
  312. {
  313. #ifdef CONFIG_SMP
  314. if (!pte_dirty(*ptep))
  315. return 0;
  316. return test_and_clear_bit(_PAGE_D_BIT, ptep);
  317. #else
  318. pte_t pte = *ptep;
  319. if (!pte_dirty(pte))
  320. return 0;
  321. set_pte_at(vma->vm_mm, addr, ptep, pte_mkclean(pte));
  322. return 1;
  323. #endif
  324. }
  325. static inline pte_t
  326. ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  327. {
  328. #ifdef CONFIG_SMP
  329. return __pte(xchg((long *) ptep, 0));
  330. #else
  331. pte_t pte = *ptep;
  332. pte_clear(mm, addr, ptep);
  333. return pte;
  334. #endif
  335. }
  336. static inline void
  337. ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  338. {
  339. #ifdef CONFIG_SMP
  340. unsigned long new, old;
  341. do {
  342. old = pte_val(*ptep);
  343. new = pte_val(pte_wrprotect(__pte (old)));
  344. } while (cmpxchg((unsigned long *) ptep, old, new) != old);
  345. #else
  346. pte_t old_pte = *ptep;
  347. set_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
  348. #endif
  349. }
  350. static inline int
  351. pte_same (pte_t a, pte_t b)
  352. {
  353. return pte_val(a) == pte_val(b);
  354. }
  355. #define update_mmu_cache(vma, address, pte) do { } while (0)
  356. extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  357. extern void paging_init (void);
  358. /*
  359. * Note: The macros below rely on the fact that MAX_SWAPFILES_SHIFT <= number of
  360. * bits in the swap-type field of the swap pte. It would be nice to
  361. * enforce that, but we can't easily include <linux/swap.h> here.
  362. * (Of course, better still would be to define MAX_SWAPFILES_SHIFT here...).
  363. *
  364. * Format of swap pte:
  365. * bit 0 : present bit (must be zero)
  366. * bit 1 : _PAGE_FILE (must be zero)
  367. * bits 2- 8: swap-type
  368. * bits 9-62: swap offset
  369. * bit 63 : _PAGE_PROTNONE bit
  370. *
  371. * Format of file pte:
  372. * bit 0 : present bit (must be zero)
  373. * bit 1 : _PAGE_FILE (must be one)
  374. * bits 2-62: file_offset/PAGE_SIZE
  375. * bit 63 : _PAGE_PROTNONE bit
  376. */
  377. #define __swp_type(entry) (((entry).val >> 2) & 0x7f)
  378. #define __swp_offset(entry) (((entry).val << 1) >> 10)
  379. #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((long) (offset) << 9) })
  380. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  381. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  382. #define PTE_FILE_MAX_BITS 61
  383. #define pte_to_pgoff(pte) ((pte_val(pte) << 1) >> 3)
  384. #define pgoff_to_pte(off) ((pte_t) { ((off) << 2) | _PAGE_FILE })
  385. /* XXX is this right? */
  386. #define io_remap_page_range(vma, vaddr, paddr, size, prot) \
  387. remap_pfn_range(vma, vaddr, (paddr) >> PAGE_SHIFT, size, prot)
  388. #define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
  389. remap_pfn_range(vma, vaddr, pfn, size, prot)
  390. #define MK_IOSPACE_PFN(space, pfn) (pfn)
  391. #define GET_IOSPACE(pfn) 0
  392. #define GET_PFN(pfn) (pfn)
  393. /*
  394. * ZERO_PAGE is a global shared page that is always zero: used
  395. * for zero-mapped memory areas etc..
  396. */
  397. extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
  398. extern struct page *zero_page_memmap_ptr;
  399. #define ZERO_PAGE(vaddr) (zero_page_memmap_ptr)
  400. /* We provide our own get_unmapped_area to cope with VA holes for userland */
  401. #define HAVE_ARCH_UNMAPPED_AREA
  402. #ifdef CONFIG_HUGETLB_PAGE
  403. #define HUGETLB_PGDIR_SHIFT (HPAGE_SHIFT + 2*(PAGE_SHIFT-3))
  404. #define HUGETLB_PGDIR_SIZE (__IA64_UL(1) << HUGETLB_PGDIR_SHIFT)
  405. #define HUGETLB_PGDIR_MASK (~(HUGETLB_PGDIR_SIZE-1))
  406. struct mmu_gather;
  407. void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long addr,
  408. unsigned long end, unsigned long floor, unsigned long ceiling);
  409. #endif
  410. /*
  411. * IA-64 doesn't have any external MMU info: the page tables contain all the necessary
  412. * information. However, we use this routine to take care of any (delayed) i-cache
  413. * flushing that may be necessary.
  414. */
  415. extern void lazy_mmu_prot_update (pte_t pte);
  416. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  417. /*
  418. * Update PTEP with ENTRY, which is guaranteed to be a less
  419. * restrictive PTE. That is, ENTRY may have the ACCESSED, DIRTY, and
  420. * WRITABLE bits turned on, when the value at PTEP did not. The
  421. * WRITABLE bit may only be turned if SAFELY_WRITABLE is TRUE.
  422. *
  423. * SAFELY_WRITABLE is TRUE if we can update the value at PTEP without
  424. * having to worry about races. On SMP machines, there are only two
  425. * cases where this is true:
  426. *
  427. * (1) *PTEP has the PRESENT bit turned OFF
  428. * (2) ENTRY has the DIRTY bit turned ON
  429. *
  430. * On ia64, we could implement this routine with a cmpxchg()-loop
  431. * which ORs in the _PAGE_A/_PAGE_D bit if they're set in ENTRY.
  432. * However, like on x86, we can get a more streamlined version by
  433. * observing that it is OK to drop ACCESSED bit updates when
  434. * SAFELY_WRITABLE is FALSE. Besides being rare, all that would do is
  435. * result in an extra Access-bit fault, which would then turn on the
  436. * ACCESSED bit in the low-level fault handler (iaccess_bit or
  437. * daccess_bit in ivt.S).
  438. */
  439. #ifdef CONFIG_SMP
  440. # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
  441. do { \
  442. if (__safely_writable) { \
  443. set_pte(__ptep, __entry); \
  444. flush_tlb_page(__vma, __addr); \
  445. } \
  446. } while (0)
  447. #else
  448. # define ptep_set_access_flags(__vma, __addr, __ptep, __entry, __safely_writable) \
  449. ptep_establish(__vma, __addr, __ptep, __entry)
  450. #endif
  451. # ifdef CONFIG_VIRTUAL_MEM_MAP
  452. /* arch mem_map init routine is needed due to holes in a virtual mem_map */
  453. # define __HAVE_ARCH_MEMMAP_INIT
  454. extern void memmap_init (unsigned long size, int nid, unsigned long zone,
  455. unsigned long start_pfn);
  456. # endif /* CONFIG_VIRTUAL_MEM_MAP */
  457. # endif /* !__ASSEMBLY__ */
  458. /*
  459. * Identity-mapped regions use a large page size. We'll call such large pages
  460. * "granules". If you can think of a better name that's unambiguous, let me
  461. * know...
  462. */
  463. #if defined(CONFIG_IA64_GRANULE_64MB)
  464. # define IA64_GRANULE_SHIFT _PAGE_SIZE_64M
  465. #elif defined(CONFIG_IA64_GRANULE_16MB)
  466. # define IA64_GRANULE_SHIFT _PAGE_SIZE_16M
  467. #endif
  468. #define IA64_GRANULE_SIZE (1 << IA64_GRANULE_SHIFT)
  469. /*
  470. * log2() of the page size we use to map the kernel image (IA64_TR_KERNEL):
  471. */
  472. #define KERNEL_TR_PAGE_SHIFT _PAGE_SIZE_64M
  473. #define KERNEL_TR_PAGE_SIZE (1 << KERNEL_TR_PAGE_SHIFT)
  474. /*
  475. * No page table caches to initialise
  476. */
  477. #define pgtable_cache_init() do { } while (0)
  478. /* These tell get_user_pages() that the first gate page is accessible from user-level. */
  479. #define FIXADDR_USER_START GATE_ADDR
  480. #ifdef HAVE_BUGGY_SEGREL
  481. # define FIXADDR_USER_END (GATE_ADDR + 2*PAGE_SIZE)
  482. #else
  483. # define FIXADDR_USER_END (GATE_ADDR + 2*PERCPU_PAGE_SIZE)
  484. #endif
  485. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
  486. #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
  487. #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
  488. #define __HAVE_ARCH_PTEP_SET_WRPROTECT
  489. #define __HAVE_ARCH_PTE_SAME
  490. #define __HAVE_ARCH_PGD_OFFSET_GATE
  491. #define __HAVE_ARCH_LAZY_MMU_PROT_UPDATE
  492. #include <asm-generic/pgtable-nopud.h>
  493. #include <asm-generic/pgtable.h>
  494. #endif /* _ASM_IA64_PGTABLE_H */