page_32.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124
  1. #ifndef _ASM_X86_PAGE_32_H
  2. #define _ASM_X86_PAGE_32_H
  3. /*
  4. * This handles the memory map.
  5. *
  6. * A __PAGE_OFFSET of 0xC0000000 means that the kernel has
  7. * a virtual address space of one gigabyte, which limits the
  8. * amount of physical memory you can use to about 950MB.
  9. *
  10. * If you want more physical memory than this then see the CONFIG_HIGHMEM4G
  11. * and CONFIG_HIGHMEM64G options in the kernel configuration.
  12. */
  13. #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
  14. #ifdef CONFIG_X86_PAE
  15. #define __PHYSICAL_MASK_SHIFT 36
  16. #define __VIRTUAL_MASK_SHIFT 32
  17. #define PAGETABLE_LEVELS 3
  18. #ifndef __ASSEMBLY__
  19. typedef u64 pteval_t;
  20. typedef u64 pmdval_t;
  21. typedef u64 pudval_t;
  22. typedef u64 pgdval_t;
  23. typedef u64 pgprotval_t;
  24. typedef u64 phys_addr_t;
  25. typedef struct { unsigned long pte_low, pte_high; } pte_t;
  26. static inline unsigned long long native_pte_val(pte_t pte)
  27. {
  28. return pte.pte_low | ((unsigned long long)pte.pte_high << 32);
  29. }
  30. static inline pte_t native_make_pte(unsigned long long val)
  31. {
  32. return (pte_t) { .pte_low = val, .pte_high = (val >> 32) } ;
  33. }
  34. #endif /* __ASSEMBLY__
  35. */
  36. #else /* !CONFIG_X86_PAE */
  37. #define __PHYSICAL_MASK_SHIFT 32
  38. #define __VIRTUAL_MASK_SHIFT 32
  39. #define PAGETABLE_LEVELS 2
  40. #ifndef __ASSEMBLY__
  41. typedef unsigned long pteval_t;
  42. typedef unsigned long pmdval_t;
  43. typedef unsigned long pudval_t;
  44. typedef unsigned long pgdval_t;
  45. typedef unsigned long pgprotval_t;
  46. typedef unsigned long phys_addr_t;
  47. typedef struct { pteval_t pte_low; } pte_t;
  48. typedef pte_t boot_pte_t;
  49. static inline unsigned long native_pte_val(pte_t pte)
  50. {
  51. return pte.pte_low;
  52. }
  53. static inline pte_t native_make_pte(unsigned long val)
  54. {
  55. return (pte_t) { .pte_low = val };
  56. }
  57. #endif /* __ASSEMBLY__ */
  58. #endif /* CONFIG_X86_PAE */
  59. #ifdef CONFIG_HUGETLB_PAGE
  60. #define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
  61. #endif
  62. #ifndef __ASSEMBLY__
  63. #define __phys_addr(x) ((x)-PAGE_OFFSET)
  64. #define __phys_reloc_hide(x) RELOC_HIDE((x), 0)
  65. #ifdef CONFIG_FLATMEM
  66. #define pfn_valid(pfn) ((pfn) < max_mapnr)
  67. #endif /* CONFIG_FLATMEM */
  68. extern int nx_enabled;
  69. /*
  70. * This much address space is reserved for vmalloc() and iomap()
  71. * as well as fixmap mappings.
  72. */
  73. extern unsigned int __VMALLOC_RESERVE;
  74. extern int sysctl_legacy_va_layout;
  75. extern int page_is_ram(unsigned long pagenr);
  76. #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
  77. #define MAXMEM (-__PAGE_OFFSET-__VMALLOC_RESERVE)
  78. #ifdef CONFIG_X86_USE_3DNOW
  79. #include <asm/mmx.h>
  80. static inline void clear_page(void *page)
  81. {
  82. mmx_clear_page(page);
  83. }
  84. static inline void copy_page(void *to, void *from)
  85. {
  86. mmx_copy_page(to, from);
  87. }
  88. #else /* !CONFIG_X86_USE_3DNOW */
  89. #include <linux/string.h>
  90. static inline void clear_page(void *page)
  91. {
  92. memset(page, 0, PAGE_SIZE);
  93. }
  94. static inline void copy_page(void *to, void *from)
  95. {
  96. memcpy(to, from, PAGE_SIZE);
  97. }
  98. #endif /* CONFIG_X86_3DNOW */
  99. #endif /* !__ASSEMBLY__ */
  100. #endif /* _ASM_X86_PAGE_32_H */