page_64.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. #ifndef _ASM_POWERPC_PAGE_64_H
  2. #define _ASM_POWERPC_PAGE_64_H
  3. #ifdef __KERNEL__
  4. /*
  5. * Copyright (C) 2001 PPC64 Team, IBM Corp
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. /*
  13. * We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
  14. * specific, every notion of page number shared with the firmware, TCEs,
  15. * iommu, etc... still uses a page size of 4K.
  16. */
  17. #define HW_PAGE_SHIFT 12
  18. #define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
  19. #define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
  20. /*
  21. * PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
  22. * HW_PAGE_SHIFT, that is 4K pages.
  23. */
  24. #define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
  25. /* Segment size */
  26. #define SID_SHIFT 28
  27. #define SID_MASK 0xfffffffffUL
  28. #define ESID_MASK 0xfffffffff0000000UL
  29. #define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
  30. #ifndef __ASSEMBLY__
  31. #include <asm/cache.h>
  32. typedef unsigned long pte_basic_t;
  33. static __inline__ void clear_page(void *addr)
  34. {
  35. unsigned long lines, line_size;
  36. line_size = ppc64_caches.dline_size;
  37. lines = ppc64_caches.dlines_per_page;
  38. __asm__ __volatile__(
  39. "mtctr %1 # clear_page\n\
  40. 1: dcbz 0,%0\n\
  41. add %0,%0,%3\n\
  42. bdnz+ 1b"
  43. : "=r" (addr)
  44. : "r" (lines), "0" (addr), "r" (line_size)
  45. : "ctr", "memory");
  46. }
  47. extern void copy_4K_page(void *to, void *from);
  48. #ifdef CONFIG_PPC_64K_PAGES
  49. static inline void copy_page(void *to, void *from)
  50. {
  51. unsigned int i;
  52. for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) {
  53. copy_4K_page(to, from);
  54. to += 4096;
  55. from += 4096;
  56. }
  57. }
  58. #else /* CONFIG_PPC_64K_PAGES */
  59. static inline void copy_page(void *to, void *from)
  60. {
  61. copy_4K_page(to, from);
  62. }
  63. #endif /* CONFIG_PPC_64K_PAGES */
  64. /* Log 2 of page table size */
  65. extern u64 ppc64_pft_size;
  66. /* Large pages size */
  67. #ifdef CONFIG_HUGETLB_PAGE
  68. extern unsigned int HPAGE_SHIFT;
  69. #else
  70. #define HPAGE_SHIFT PAGE_SHIFT
  71. #endif
  72. #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
  73. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  74. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  75. #endif /* __ASSEMBLY__ */
  76. #ifdef CONFIG_HUGETLB_PAGE
  77. #define HTLB_AREA_SHIFT 40
  78. #define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
  79. #define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
  80. #define LOW_ESID_MASK(addr, len) \
  81. (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \
  82. - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff)
  83. #define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
  84. - (1U << GET_HTLB_AREA(addr))) & 0xffff)
  85. #define ARCH_HAS_HUGEPAGE_ONLY_RANGE
  86. #define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
  87. #define ARCH_HAS_SETCLEAR_HUGE_PTE
  88. #define touches_hugepage_low_range(mm, addr, len) \
  89. (((addr) < 0x100000000UL) \
  90. && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas))
  91. #define touches_hugepage_high_range(mm, addr, len) \
  92. ((((addr) + (len)) > 0x100000000UL) \
  93. && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas))
  94. #define __within_hugepage_low_range(addr, len, segmask) \
  95. ( (((addr)+(len)) <= 0x100000000UL) \
  96. && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
  97. #define within_hugepage_low_range(addr, len) \
  98. __within_hugepage_low_range((addr), (len), \
  99. current->mm->context.low_htlb_areas)
  100. #define __within_hugepage_high_range(addr, len, zonemask) \
  101. ( ((addr) >= 0x100000000UL) \
  102. && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
  103. #define within_hugepage_high_range(addr, len) \
  104. __within_hugepage_high_range((addr), (len), \
  105. current->mm->context.high_htlb_areas)
  106. #define is_hugepage_only_range(mm, addr, len) \
  107. (touches_hugepage_high_range((mm), (addr), (len)) || \
  108. touches_hugepage_low_range((mm), (addr), (len)))
  109. #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  110. #define in_hugepage_area(context, addr) \
  111. (cpu_has_feature(CPU_FTR_16M_PAGE) && \
  112. ( ( (addr) >= 0x100000000UL) \
  113. ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \
  114. : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) )
  115. #else /* !CONFIG_HUGETLB_PAGE */
  116. #define in_hugepage_area(mm, addr) 0
  117. #endif /* !CONFIG_HUGETLB_PAGE */
  118. #ifdef MODULE
  119. #define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
  120. #else
  121. #define __page_aligned \
  122. __attribute__((__aligned__(PAGE_SIZE), \
  123. __section__(".data.page_aligned")))
  124. #endif
  125. #define VM_DATA_DEFAULT_FLAGS \
  126. (test_thread_flag(TIF_32BIT) ? \
  127. VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
  128. /*
  129. * This is the default if a program doesn't have a PT_GNU_STACK
  130. * program header entry. The PPC64 ELF ABI has a non executable stack
  131. * stack by default, so in the absense of a PT_GNU_STACK program header
  132. * we turn execute permission off.
  133. */
  134. #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
  135. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  136. #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
  137. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  138. #define VM_STACK_DEFAULT_FLAGS \
  139. (test_thread_flag(TIF_32BIT) ? \
  140. VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
  141. #include <asm-generic/page.h>
  142. #endif /* __KERNEL__ */
  143. #endif /* _ASM_POWERPC_PAGE_64_H */