page.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224
  1. #ifndef _ASM_IA64_PAGE_H
  2. #define _ASM_IA64_PAGE_H
  3. /*
  4. * Pagetable related stuff.
  5. *
  6. * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
  7. * David Mosberger-Tang <davidm@hpl.hp.com>
  8. */
  9. #include <linux/config.h>
  10. #include <asm/intrinsics.h>
  11. #include <asm/types.h>
  12. /*
  13. * The top three bits of an IA64 address are its Region Number.
  14. * Different regions are assigned to different purposes.
  15. */
  16. #define RGN_SHIFT (61)
  17. #define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
  18. #define RGN_BITS (RGN_BASE(-1))
  19. #define RGN_KERNEL 7 /* Identity mapped region */
  20. #define RGN_UNCACHED 6 /* Identity mapped I/O region */
  21. #define RGN_GATE 5 /* Gate page, Kernel text, etc */
  22. #define RGN_HPAGE 4 /* For Huge TLB pages */
  23. /*
  24. * PAGE_SHIFT determines the actual kernel page size.
  25. */
  26. #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
  27. # define PAGE_SHIFT 12
  28. #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
  29. # define PAGE_SHIFT 13
  30. #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
  31. # define PAGE_SHIFT 14
  32. #elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
  33. # define PAGE_SHIFT 16
  34. #else
  35. # error Unsupported page size!
  36. #endif
  37. #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
  38. #define PAGE_MASK (~(PAGE_SIZE - 1))
  39. #define PAGE_ALIGN(addr) (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
  40. #define PERCPU_PAGE_SHIFT 16 /* log2() of max. size of per-CPU area */
  41. #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
  42. #ifdef CONFIG_HUGETLB_PAGE
  43. # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
  44. # define HPAGE_SHIFT hpage_shift
  45. # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
  46. # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
  47. # define HPAGE_MASK (~(HPAGE_SIZE - 1))
  48. # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  49. # define ARCH_HAS_HUGEPAGE_ONLY_RANGE
  50. # define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
  51. # define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
  52. #endif /* CONFIG_HUGETLB_PAGE */
  53. #ifdef __ASSEMBLY__
  54. # define __pa(x) ((x) - PAGE_OFFSET)
  55. # define __va(x) ((x) + PAGE_OFFSET)
  56. #else /* !__ASSEMBLY */
  57. # ifdef __KERNEL__
  58. # define STRICT_MM_TYPECHECKS
  59. extern void clear_page (void *page);
  60. extern void copy_page (void *to, void *from);
  61. /*
  62. * clear_user_page() and copy_user_page() can't be inline functions because
  63. * flush_dcache_page() can't be defined until later...
  64. */
  65. #define clear_user_page(addr, vaddr, page) \
  66. do { \
  67. clear_page(addr); \
  68. flush_dcache_page(page); \
  69. } while (0)
  70. #define copy_user_page(to, from, vaddr, page) \
  71. do { \
  72. copy_page((to), (from)); \
  73. flush_dcache_page(page); \
  74. } while (0)
  75. #define alloc_zeroed_user_highpage(vma, vaddr) \
  76. ({ \
  77. struct page *page = alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr); \
  78. if (page) \
  79. flush_dcache_page(page); \
  80. page; \
  81. })
  82. #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  83. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  84. #ifdef CONFIG_VIRTUAL_MEM_MAP
  85. extern int ia64_pfn_valid (unsigned long pfn);
  86. #elif defined(CONFIG_FLATMEM)
  87. # define ia64_pfn_valid(pfn) 1
  88. #endif
  89. #ifdef CONFIG_FLATMEM
  90. # define pfn_valid(pfn) (((pfn) < max_mapnr) && ia64_pfn_valid(pfn))
  91. # define page_to_pfn(page) ((unsigned long) (page - mem_map))
  92. # define pfn_to_page(pfn) (mem_map + (pfn))
  93. #elif defined(CONFIG_DISCONTIGMEM)
  94. extern struct page *vmem_map;
  95. extern unsigned long min_low_pfn;
  96. extern unsigned long max_low_pfn;
  97. # define pfn_valid(pfn) (((pfn) >= min_low_pfn) && ((pfn) < max_low_pfn) && ia64_pfn_valid(pfn))
  98. # define page_to_pfn(page) ((unsigned long) (page - vmem_map))
  99. # define pfn_to_page(pfn) (vmem_map + (pfn))
  100. #endif
  101. #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
  102. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  103. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  104. typedef union ia64_va {
  105. struct {
  106. unsigned long off : 61; /* intra-region offset */
  107. unsigned long reg : 3; /* region number */
  108. } f;
  109. unsigned long l;
  110. void *p;
  111. } ia64_va;
  112. /*
  113. * Note: These macros depend on the fact that PAGE_OFFSET has all
  114. * region bits set to 1 and all other bits set to zero. They are
  115. * expressed in this way to ensure they result in a single "dep"
  116. * instruction.
  117. */
  118. #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
  119. #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
  120. #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
  121. #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
  122. #ifdef CONFIG_HUGETLB_PAGE
  123. # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
  124. | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
  125. # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  126. # define is_hugepage_only_range(mm, addr, len) \
  127. (REGION_NUMBER(addr) == RGN_HPAGE && \
  128. REGION_NUMBER((addr)+(len)-1) == RGN_HPAGE)
  129. extern unsigned int hpage_shift;
  130. #endif
  131. static __inline__ int
  132. get_order (unsigned long size)
  133. {
  134. long double d = size - 1;
  135. long order;
  136. order = ia64_getf_exp(d);
  137. order = order - PAGE_SHIFT - 0xffff + 1;
  138. if (order < 0)
  139. order = 0;
  140. return order;
  141. }
  142. # endif /* __KERNEL__ */
  143. #endif /* !__ASSEMBLY__ */
  144. #ifdef STRICT_MM_TYPECHECKS
  145. /*
  146. * These are used to make use of C type-checking..
  147. */
  148. typedef struct { unsigned long pte; } pte_t;
  149. typedef struct { unsigned long pmd; } pmd_t;
  150. #ifdef CONFIG_PGTABLE_4
  151. typedef struct { unsigned long pud; } pud_t;
  152. #endif
  153. typedef struct { unsigned long pgd; } pgd_t;
  154. typedef struct { unsigned long pgprot; } pgprot_t;
  155. # define pte_val(x) ((x).pte)
  156. # define pmd_val(x) ((x).pmd)
  157. #ifdef CONFIG_PGTABLE_4
  158. # define pud_val(x) ((x).pud)
  159. #endif
  160. # define pgd_val(x) ((x).pgd)
  161. # define pgprot_val(x) ((x).pgprot)
  162. # define __pte(x) ((pte_t) { (x) } )
  163. # define __pgprot(x) ((pgprot_t) { (x) } )
  164. #else /* !STRICT_MM_TYPECHECKS */
  165. /*
  166. * .. while these make it easier on the compiler
  167. */
  168. # ifndef __ASSEMBLY__
  169. typedef unsigned long pte_t;
  170. typedef unsigned long pmd_t;
  171. typedef unsigned long pgd_t;
  172. typedef unsigned long pgprot_t;
  173. # endif
  174. # define pte_val(x) (x)
  175. # define pmd_val(x) (x)
  176. # define pgd_val(x) (x)
  177. # define pgprot_val(x) (x)
  178. # define __pte(x) (x)
  179. # define __pgd(x) (x)
  180. # define __pgprot(x) (x)
  181. #endif /* !STRICT_MM_TYPECHECKS */
  182. #define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
  183. #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
  184. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC | \
  185. (((current->personality & READ_IMPLIES_EXEC) != 0) \
  186. ? VM_EXEC : 0))
  187. #endif /* _ASM_IA64_PAGE_H */