page.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. #ifndef _PPC_PAGE_H
  2. #define _PPC_PAGE_H
  3. #include <asm/asm-compat.h>
  4. /* PAGE_SHIFT determines the page size */
  5. #define PAGE_SHIFT 12
  6. #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
  7. /*
  8. * Subtle: this is an int (not an unsigned long) and so it
  9. * gets extended to 64 bits the way want (i.e. with 1s). -- paulus
  10. */
  11. #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
  12. #ifdef __KERNEL__
  13. /* This must match what is in arch/ppc/Makefile */
  14. #define PAGE_OFFSET CONFIG_KERNEL_START
  15. #define KERNELBASE PAGE_OFFSET
  16. #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
  17. #ifndef __ASSEMBLY__
  18. /*
  19. * The basic type of a PTE - 64 bits for those CPUs with > 32 bit
  20. * physical addressing. For now this just the IBM PPC440.
  21. */
  22. #ifdef CONFIG_PTE_64BIT
  23. typedef unsigned long long pte_basic_t;
  24. #define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */
  25. #define PTE_FMT "%16Lx"
  26. #else
  27. typedef unsigned long pte_basic_t;
  28. #define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */
  29. #define PTE_FMT "%.8lx"
  30. #endif
  31. /* align addr on a size boundary - adjust address up/down if needed */
  32. #define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
  33. #define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
  34. /* align addr on a size boundary - adjust address up if needed */
  35. #define _ALIGN(addr,size) _ALIGN_UP(addr,size)
  36. /* to align the pointer to the (next) page boundary */
  37. #define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
  38. #undef STRICT_MM_TYPECHECKS
  39. #ifdef STRICT_MM_TYPECHECKS
  40. /*
  41. * These are used to make use of C type-checking..
  42. */
  43. typedef struct { pte_basic_t pte; } pte_t;
  44. typedef struct { unsigned long pmd; } pmd_t;
  45. typedef struct { unsigned long pgd; } pgd_t;
  46. typedef struct { unsigned long pgprot; } pgprot_t;
  47. #define pte_val(x) ((x).pte)
  48. #define pmd_val(x) ((x).pmd)
  49. #define pgd_val(x) ((x).pgd)
  50. #define pgprot_val(x) ((x).pgprot)
  51. #define __pte(x) ((pte_t) { (x) } )
  52. #define __pmd(x) ((pmd_t) { (x) } )
  53. #define __pgd(x) ((pgd_t) { (x) } )
  54. #define __pgprot(x) ((pgprot_t) { (x) } )
  55. #else
  56. /*
  57. * .. while these make it easier on the compiler
  58. */
  59. typedef pte_basic_t pte_t;
  60. typedef unsigned long pmd_t;
  61. typedef unsigned long pgd_t;
  62. typedef unsigned long pgprot_t;
  63. #define pte_val(x) (x)
  64. #define pmd_val(x) (x)
  65. #define pgd_val(x) (x)
  66. #define pgprot_val(x) (x)
  67. #define __pte(x) (x)
  68. #define __pmd(x) (x)
  69. #define __pgd(x) (x)
  70. #define __pgprot(x) (x)
  71. #endif
  72. struct page;
  73. extern void clear_pages(void *page, int order);
  74. static inline void clear_page(void *page) { clear_pages(page, 0); }
  75. extern void copy_page(void *to, void *from);
  76. extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
  77. extern void copy_user_page(void *to, void *from, unsigned long vaddr,
  78. struct page *pg);
  79. #ifndef CONFIG_APUS
  80. #define PPC_MEMSTART 0
  81. #define PPC_PGSTART 0
  82. #define PPC_MEMOFFSET PAGE_OFFSET
  83. #else
  84. extern unsigned long ppc_memstart;
  85. extern unsigned long ppc_pgstart;
  86. extern unsigned long ppc_memoffset;
  87. #define PPC_MEMSTART ppc_memstart
  88. #define PPC_PGSTART ppc_pgstart
  89. #define PPC_MEMOFFSET ppc_memoffset
  90. #endif
  91. #if defined(CONFIG_APUS) && !defined(MODULE)
  92. /* map phys->virtual and virtual->phys for RAM pages */
  93. static inline unsigned long ___pa(unsigned long v)
  94. {
  95. unsigned long p;
  96. asm volatile ("1: addis %0, %1, %2;"
  97. ".section \".vtop_fixup\",\"aw\";"
  98. ".align 1;"
  99. ".long 1b;"
  100. ".previous;"
  101. : "=r" (p)
  102. : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff));
  103. return p;
  104. }
  105. static inline void* ___va(unsigned long p)
  106. {
  107. unsigned long v;
  108. asm volatile ("1: addis %0, %1, %2;"
  109. ".section \".ptov_fixup\",\"aw\";"
  110. ".align 1;"
  111. ".long 1b;"
  112. ".previous;"
  113. : "=r" (v)
  114. : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff));
  115. return (void*) v;
  116. }
  117. #else
  118. #define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET)
  119. #define ___va(paddr) ((paddr)+PPC_MEMOFFSET)
  120. #endif
  121. extern int page_is_ram(unsigned long pfn);
  122. #define __pa(x) ___pa((unsigned long)(x))
  123. #define __va(x) ((void *)(___va((unsigned long)(x))))
  124. #define ARCH_PFN_OFFSET (PPC_PGSTART)
  125. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  126. #define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT)
  127. #define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr)
  128. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  129. /* Pure 2^n version of get_order */
  130. extern __inline__ int get_order(unsigned long size)
  131. {
  132. int lz;
  133. size = (size-1) >> PAGE_SHIFT;
  134. asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size));
  135. return 32 - lz;
  136. }
  137. #endif /* __ASSEMBLY__ */
  138. #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
  139. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  140. /* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
  141. #define __HAVE_ARCH_GATE_AREA 1
  142. #include <asm-generic/memory_model.h>
  143. #endif /* __KERNEL__ */
  144. #endif /* _PPC_PAGE_H */