page.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. #ifndef _I386_PAGE_H
  2. #define _I386_PAGE_H
  3. /* PAGE_SHIFT determines the page size */
  4. #define PAGE_SHIFT 12
  5. #define PAGE_SIZE (1UL << PAGE_SHIFT)
  6. #define PAGE_MASK (~(PAGE_SIZE-1))
  7. #define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
  8. #define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
  9. #ifdef __KERNEL__
  10. #ifndef __ASSEMBLY__
  11. #include <linux/config.h>
  12. #ifdef CONFIG_X86_USE_3DNOW
  13. #include <asm/mmx.h>
  14. #define clear_page(page) mmx_clear_page((void *)(page))
  15. #define copy_page(to,from) mmx_copy_page(to,from)
  16. #else
  17. /*
  18. * On older X86 processors it's not a win to use MMX here it seems.
  19. * Maybe the K6-III ?
  20. */
  21. #define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
  22. #define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
  23. #endif
  24. #define clear_user_page(page, vaddr, pg) clear_page(page)
  25. #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  26. #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
  27. #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  28. /*
  29. * These are used to make use of C type-checking..
  30. */
  31. extern int nx_enabled;
  32. #ifdef CONFIG_X86_PAE
  33. extern unsigned long long __supported_pte_mask;
  34. typedef struct { unsigned long pte_low, pte_high; } pte_t;
  35. typedef struct { unsigned long long pmd; } pmd_t;
  36. typedef struct { unsigned long long pgd; } pgd_t;
  37. typedef struct { unsigned long long pgprot; } pgprot_t;
  38. #define pmd_val(x) ((x).pmd)
  39. #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
  40. #define __pmd(x) ((pmd_t) { (x) } )
  41. #define HPAGE_SHIFT 21
  42. #else
  43. typedef struct { unsigned long pte_low; } pte_t;
  44. typedef struct { unsigned long pgd; } pgd_t;
  45. typedef struct { unsigned long pgprot; } pgprot_t;
  46. #define boot_pte_t pte_t /* or would you rather have a typedef */
  47. #define pte_val(x) ((x).pte_low)
  48. #define HPAGE_SHIFT 22
  49. #endif
  50. #define PTE_MASK PAGE_MASK
  51. #ifdef CONFIG_HUGETLB_PAGE
  52. #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
  53. #define HPAGE_MASK (~(HPAGE_SIZE - 1))
  54. #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
  55. #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  56. #define ARCH_HAS_HUGETLB_CLEAN_STALE_PGTABLE
  57. #endif
  58. #define pgd_val(x) ((x).pgd)
  59. #define pgprot_val(x) ((x).pgprot)
  60. #define __pte(x) ((pte_t) { (x) } )
  61. #define __pgd(x) ((pgd_t) { (x) } )
  62. #define __pgprot(x) ((pgprot_t) { (x) } )
  63. #endif /* !__ASSEMBLY__ */
  64. /* to align the pointer to the (next) page boundary */
  65. #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
  66. /*
  67. * This handles the memory map.. We could make this a config
  68. * option, but too many people screw it up, and too few need
  69. * it.
  70. *
  71. * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
  72. * a virtual address space of one gigabyte, which limits the
  73. * amount of physical memory you can use to about 950MB.
  74. *
  75. * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
  76. * and CONFIG_HIGHMEM64G options in the kernel configuration.
  77. */
  78. #ifndef __ASSEMBLY__
  79. /*
  80. * This much address space is reserved for vmalloc() and iomap()
  81. * as well as fixmap mappings.
  82. */
  83. extern unsigned int __VMALLOC_RESERVE;
  84. /* Pure 2^n version of get_order */
  85. static __inline__ int get_order(unsigned long size)
  86. {
  87. int order;
  88. size = (size-1) >> (PAGE_SHIFT-1);
  89. order = -1;
  90. do {
  91. size >>= 1;
  92. order++;
  93. } while (size);
  94. return order;
  95. }
  96. extern int sysctl_legacy_va_layout;
  97. extern int page_is_ram(unsigned long pagenr);
  98. #endif /* __ASSEMBLY__ */
  99. #ifdef __ASSEMBLY__
  100. #define __PAGE_OFFSET (0xC0000000)
  101. #define __PHYSICAL_START CONFIG_PHYSICAL_START
  102. #else
  103. #define __PAGE_OFFSET (0xC0000000UL)
  104. #define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
  105. #endif
  106. #define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
  107. #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
  108. #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
  109. #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
  110. #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
  111. #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
  112. #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
  113. #ifdef CONFIG_FLATMEM
  114. #define pfn_to_page(pfn) (mem_map + (pfn))
  115. #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
  116. #define pfn_valid(pfn) ((pfn) < max_mapnr)
  117. #endif /* CONFIG_FLATMEM */
  118. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  119. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  120. #define VM_DATA_DEFAULT_FLAGS \
  121. (VM_READ | VM_WRITE | \
  122. ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
  123. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  124. #endif /* __KERNEL__ */
  125. #endif /* _I386_PAGE_H */