page.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. #ifndef _ASM_X86_PAGE_H
  2. #define _ASM_X86_PAGE_H
  3. #include <linux/const.h>
  4. /* PAGE_SHIFT determines the page size */
  5. #define PAGE_SHIFT 12
  6. #define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT)
  7. #define PAGE_MASK (~(PAGE_SIZE-1))
  8. #define PHYSICAL_PAGE_MASK (PAGE_MASK & __PHYSICAL_MASK)
  9. #define PTE_MASK PHYSICAL_PAGE_MASK
  10. #define LARGE_PAGE_SIZE (_AC(1,UL) << PMD_SHIFT)
  11. #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
  12. #define HPAGE_SHIFT PMD_SHIFT
  13. #define HPAGE_SIZE (_AC(1,UL) << HPAGE_SHIFT)
  14. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  15. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  16. /* to align the pointer to the (next) page boundary */
  17. #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
  18. #define __PHYSICAL_MASK ((_AC(1,UL) << __PHYSICAL_MASK_SHIFT) - 1)
  19. #define __VIRTUAL_MASK ((_AC(1,UL) << __VIRTUAL_MASK_SHIFT) - 1)
  20. #ifndef __ASSEMBLY__
  21. #include <linux/types.h>
  22. #endif
  23. #ifdef CONFIG_X86_64
  24. #define PAGETABLE_LEVELS 4
  25. #define THREAD_ORDER 1
  26. #define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
  27. #define CURRENT_MASK (~(THREAD_SIZE-1))
  28. #define EXCEPTION_STACK_ORDER 0
  29. #define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
  30. #define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
  31. #define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
  32. #define IRQSTACK_ORDER 2
  33. #define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
  34. #define STACKFAULT_STACK 1
  35. #define DOUBLEFAULT_STACK 2
  36. #define NMI_STACK 3
  37. #define DEBUG_STACK 4
  38. #define MCE_STACK 5
  39. #define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
  40. #define __PAGE_OFFSET _AC(0xffff810000000000, UL)
  41. #define __PHYSICAL_START CONFIG_PHYSICAL_START
  42. #define __KERNEL_ALIGN 0x200000
  43. /*
  44. * Make sure kernel is aligned to 2MB address. Catching it at compile
  45. * time is better. Change your config file and compile the kernel
  46. * for a 2MB aligned address (CONFIG_PHYSICAL_START)
  47. */
  48. #if (CONFIG_PHYSICAL_START % __KERNEL_ALIGN) != 0
  49. #error "CONFIG_PHYSICAL_START must be a multiple of 2MB"
  50. #endif
  51. #define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
  52. #define __START_KERNEL_map _AC(0xffffffff80000000, UL)
  53. /* See Documentation/x86_64/mm.txt for a description of the memory map. */
  54. #define __PHYSICAL_MASK_SHIFT 46
  55. #define __VIRTUAL_MASK_SHIFT 48
  56. #define KERNEL_TEXT_SIZE (40*1024*1024)
  57. #define KERNEL_TEXT_START _AC(0xffffffff80000000, UL)
  58. #ifndef __ASSEMBLY__
  59. void clear_page(void *page);
  60. void copy_page(void *to, void *from);
  61. #endif /* !__ASSEMBLY__ */
  62. #endif /* CONFIG_X86_64 */
  63. #ifdef CONFIG_X86_32
  64. /*
  65. * This handles the memory map.
  66. *
  67. * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
  68. * a virtual address space of one gigabyte, which limits the
  69. * amount of physical memory you can use to about 950MB.
  70. *
  71. * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
  72. * and CONFIG_HIGHMEM64G options in the kernel configuration.
  73. */
  74. #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
  75. #ifdef CONFIG_X86_PAE
  76. #define __PHYSICAL_MASK_SHIFT 36
  77. #define __VIRTUAL_MASK_SHIFT 32
  78. #else /* !CONFIG_X86_PAE */
  79. #define __PHYSICAL_MASK_SHIFT 32
  80. #define __VIRTUAL_MASK_SHIFT 32
  81. #endif /* CONFIG_X86_PAE */
  82. #ifdef CONFIG_HUGETLB_PAGE
  83. #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  84. #endif
  85. #ifndef __ASSEMBLY__
  86. #ifdef CONFIG_X86_USE_3DNOW
  87. #include <asm/mmx.h>
  88. static inline void clear_page(void *page)
  89. {
  90. mmx_clear_page(page);
  91. }
  92. static inline void copy_page(void *to, void *from)
  93. {
  94. mmx_copy_page(to, from);
  95. }
  96. #else /* !CONFIG_X86_USE_3DNOW */
  97. #include <linux/string.h>
  98. static inline void clear_page(void *page)
  99. {
  100. memset(page, 0, PAGE_SIZE);
  101. }
  102. static inline void copy_page(void *to, void *from)
  103. {
  104. memcpy(to, from, PAGE_SIZE);
  105. }
  106. #endif /* CONFIG_X86_3DNOW */
  107. #endif /* !__ASSEMBLY__ */
  108. #endif /* CONFIG_X86_32 */
  109. #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
  110. #define VM_DATA_DEFAULT_FLAGS \
  111. (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
  112. VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  113. #ifndef __ASSEMBLY__
  114. struct page;
  115. static void inline clear_user_page(void *page, unsigned long vaddr,
  116. struct page *pg)
  117. {
  118. clear_page(page);
  119. }
  120. static void inline copy_user_page(void *to, void *from, unsigned long vaddr,
  121. struct page *topage)
  122. {
  123. copy_page(to, from);
  124. }
  125. #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
  126. alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
  127. #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  128. #endif /* __ASSEMBLY__ */
  129. #ifdef CONFIG_X86_32
  130. # include "page_32.h"
  131. #else
  132. # include "page_64.h"
  133. #endif
  134. #endif /* _ASM_X86_PAGE_H */