page.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264
  1. #ifndef _PPC64_PAGE_H
  2. #define _PPC64_PAGE_H
  3. /*
  4. * Copyright (C) 2001 PPC64 Team, IBM Corp
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/config.h>
  12. #ifdef __ASSEMBLY__
  13. #define ASM_CONST(x) x
  14. #else
  15. #define __ASM_CONST(x) x##UL
  16. #define ASM_CONST(x) __ASM_CONST(x)
  17. #endif
  18. /* PAGE_SHIFT determines the page size */
  19. #define PAGE_SHIFT 12
  20. #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
  21. #define PAGE_MASK (~(PAGE_SIZE-1))
  22. #define SID_SHIFT 28
  23. #define SID_MASK 0xfffffffffUL
  24. #define ESID_MASK 0xfffffffff0000000UL
  25. #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
  26. #define HPAGE_SHIFT 24
  27. #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
  28. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  29. #ifdef CONFIG_HUGETLB_PAGE
  30. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  31. /* For 64-bit processes the hugepage range is 1T-1.5T */
  32. #define TASK_HPAGE_BASE ASM_CONST(0x0000010000000000)
  33. #define TASK_HPAGE_END ASM_CONST(0x0000018000000000)
  34. #define LOW_ESID_MASK(addr, len) (((1U << (GET_ESID(addr+len-1)+1)) \
  35. - (1U << GET_ESID(addr))) & 0xffff)
  36. #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
  37. #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
  38. #define touches_hugepage_low_range(mm, addr, len) \
  39. (LOW_ESID_MASK((addr), (len)) & mm->context.htlb_segs)
  40. #define touches_hugepage_high_range(addr, len) \
  41. (((addr) > (TASK_HPAGE_BASE-(len))) && ((addr) < TASK_HPAGE_END))
  42. #define __within_hugepage_low_range(addr, len, segmask) \
  43. ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask))
  44. #define within_hugepage_low_range(addr, len) \
  45. __within_hugepage_low_range((addr), (len), \
  46. current->mm->context.htlb_segs)
  47. #define within_hugepage_high_range(addr, len) (((addr) >= TASK_HPAGE_BASE) \
  48. && ((addr)+(len) <= TASK_HPAGE_END) && ((addr)+(len) >= (addr)))
  49. #define is_hugepage_only_range(mm, addr, len) \
  50. (touches_hugepage_high_range((addr), (len)) || \
  51. touches_hugepage_low_range((mm), (addr), (len)))
  52. #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  53. #define in_hugepage_area(context, addr) \
  54. (cpu_has_feature(CPU_FTR_16M_PAGE) && \
  55. ( (((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
  56. ( ((addr) < 0x100000000L) && \
  57. ((1 << GET_ESID(addr)) & (context).htlb_segs) ) ) )
  58. #else /* !CONFIG_HUGETLB_PAGE */
  59. #define in_hugepage_area(mm, addr) 0
  60. #endif /* !CONFIG_HUGETLB_PAGE */
  61. /* align addr on a size boundary - adjust address up/down if needed */
  62. #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
  63. #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
  64. /* align addr on a size boundary - adjust address up if needed */
  65. #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
  66. /* to align the pointer to the (next) page boundary */
  67. #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
  68. #ifdef __KERNEL__
  69. #ifndef __ASSEMBLY__
  70. #include <asm/cache.h>
  71. #undef STRICT_MM_TYPECHECKS
  72. #define REGION_SIZE 4UL
  73. #define REGION_SHIFT 60UL
  74. #define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
  75. static __inline__ void clear_page(void *addr)
  76. {
  77. unsigned long lines, line_size;
  78. line_size = ppc64_caches.dline_size;
  79. lines = ppc64_caches.dlines_per_page;
  80. __asm__ __volatile__(
  81. "mtctr %1 # clear_page\n\
  82. 1: dcbz 0,%0\n\
  83. add %0,%0,%3\n\
  84. bdnz+ 1b"
  85. : "=r" (addr)
  86. : "r" (lines), "0" (addr), "r" (line_size)
  87. : "ctr", "memory");
  88. }
  89. extern void copy_page(void *to, void *from);
  90. struct page;
  91. extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
  92. extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *p);
  93. #ifdef STRICT_MM_TYPECHECKS
  94. /*
  95. * These are used to make use of C type-checking.
  96. * Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
  97. */
  98. typedef struct { unsigned long pte; } pte_t;
  99. typedef struct { unsigned int pmd; } pmd_t;
  100. typedef struct { unsigned int pgd; } pgd_t;
  101. typedef struct { unsigned long pgprot; } pgprot_t;
  102. #define pte_val(x) ((x).pte)
  103. #define pmd_val(x) ((x).pmd)
  104. #define pgd_val(x) ((x).pgd)
  105. #define pgprot_val(x) ((x).pgprot)
  106. #define __pte(x) ((pte_t) { (x) } )
  107. #define __pmd(x) ((pmd_t) { (x) } )
  108. #define __pgd(x) ((pgd_t) { (x) } )
  109. #define __pgprot(x) ((pgprot_t) { (x) } )
  110. #else
  111. /*
  112. * .. while these make it easier on the compiler
  113. */
  114. typedef unsigned long pte_t;
  115. typedef unsigned int pmd_t;
  116. typedef unsigned int pgd_t;
  117. typedef unsigned long pgprot_t;
  118. #define pte_val(x) (x)
  119. #define pmd_val(x) (x)
  120. #define pgd_val(x) (x)
  121. #define pgprot_val(x) (x)
  122. #define __pte(x) (x)
  123. #define __pmd(x) (x)
  124. #define __pgd(x) (x)
  125. #define __pgprot(x) (x)
  126. #endif
  127. /* Pure 2^n version of get_order */
  128. static inline int get_order(unsigned long size)
  129. {
  130. int order;
  131. size = (size-1) >> (PAGE_SHIFT-1);
  132. order = -1;
  133. do {
  134. size >>= 1;
  135. order++;
  136. } while (size);
  137. return order;
  138. }
  139. #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
  140. extern int page_is_ram(unsigned long pfn);
  141. extern u64 ppc64_pft_size; /* Log 2 of page table size */
  142. /* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
  143. #define __HAVE_ARCH_GATE_AREA 1
  144. #endif /* __ASSEMBLY__ */
  145. #ifdef MODULE
  146. #define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
  147. #else
  148. #define __page_aligned \
  149. __attribute__((__aligned__(PAGE_SIZE), \
  150. __section__(".data.page_aligned")))
  151. #endif
  152. /* This must match the -Ttext linker address */
  153. /* Note: tophys & tovirt make assumptions about how */
  154. /* KERNELBASE is defined for performance reasons. */
  155. /* When KERNELBASE moves, those macros may have */
  156. /* to change! */
  157. #define PAGE_OFFSET ASM_CONST(0xC000000000000000)
  158. #define KERNELBASE PAGE_OFFSET
  159. #define VMALLOCBASE ASM_CONST(0xD000000000000000)
  160. #define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
  161. #define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
  162. #define USER_REGION_ID (0UL)
  163. #define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
  164. #define __bpn_to_ba(x) ((((unsigned long)(x)) << PAGE_SHIFT) + KERNELBASE)
  165. #define __ba_to_bpn(x) ((((unsigned long)(x)) & ~REGION_MASK) >> PAGE_SHIFT)
  166. #define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
  167. #ifdef CONFIG_DISCONTIGMEM
  168. #define page_to_pfn(page) discontigmem_page_to_pfn(page)
  169. #define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
  170. #define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
  171. #endif
  172. #ifdef CONFIG_FLATMEM
  173. #define pfn_to_page(pfn) (mem_map + (pfn))
  174. #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
  175. #define pfn_valid(pfn) ((pfn) < max_mapnr)
  176. #endif
  177. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  178. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  179. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  180. /*
  181. * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
  182. * and needs to be executable. This means the whole heap ends
  183. * up being executable.
  184. */
  185. #define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
  186. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  187. #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
  188. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  189. #define VM_DATA_DEFAULT_FLAGS \
  190. (test_thread_flag(TIF_32BIT) ? \
  191. VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
  192. /*
  193. * This is the default if a program doesn't have a PT_GNU_STACK
  194. * program header entry. The PPC64 ELF ABI has a non executable stack
  195. * stack by default, so in the absense of a PT_GNU_STACK program header
  196. * we turn execute permission off.
  197. */
  198. #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
  199. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  200. #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
  201. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  202. #define VM_STACK_DEFAULT_FLAGS \
  203. (test_thread_flag(TIF_32BIT) ? \
  204. VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
  205. #endif /* __KERNEL__ */
  206. #endif /* _PPC64_PAGE_H */