page.h 5.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. * include/asm-s390/page.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. */
  8. #ifndef _S390_PAGE_H
  9. #define _S390_PAGE_H
  10. #include <asm/setup.h>
  11. #include <asm/types.h>
  12. /* PAGE_SHIFT determines the page size */
  13. #define PAGE_SHIFT 12
  14. #define PAGE_SIZE (1UL << PAGE_SHIFT)
  15. #define PAGE_MASK (~(PAGE_SIZE-1))
  16. #ifdef __KERNEL__
  17. #ifndef __ASSEMBLY__
  18. #ifndef __s390x__
  19. static inline void clear_page(void *page)
  20. {
  21. register_pair rp;
  22. rp.subreg.even = (unsigned long) page;
  23. rp.subreg.odd = (unsigned long) 4096;
  24. asm volatile (" slr 1,1\n"
  25. " mvcl %0,0"
  26. : "+&a" (rp) : : "memory", "cc", "1" );
  27. }
  28. static inline void copy_page(void *to, void *from)
  29. {
  30. if (MACHINE_HAS_MVPG)
  31. asm volatile (" sr 0,0\n"
  32. " mvpg %0,%1"
  33. : : "a" ((void *)(to)), "a" ((void *)(from))
  34. : "memory", "cc", "0" );
  35. else
  36. asm volatile (" mvc 0(256,%0),0(%1)\n"
  37. " mvc 256(256,%0),256(%1)\n"
  38. " mvc 512(256,%0),512(%1)\n"
  39. " mvc 768(256,%0),768(%1)\n"
  40. " mvc 1024(256,%0),1024(%1)\n"
  41. " mvc 1280(256,%0),1280(%1)\n"
  42. " mvc 1536(256,%0),1536(%1)\n"
  43. " mvc 1792(256,%0),1792(%1)\n"
  44. " mvc 2048(256,%0),2048(%1)\n"
  45. " mvc 2304(256,%0),2304(%1)\n"
  46. " mvc 2560(256,%0),2560(%1)\n"
  47. " mvc 2816(256,%0),2816(%1)\n"
  48. " mvc 3072(256,%0),3072(%1)\n"
  49. " mvc 3328(256,%0),3328(%1)\n"
  50. " mvc 3584(256,%0),3584(%1)\n"
  51. " mvc 3840(256,%0),3840(%1)\n"
  52. : : "a"((void *)(to)),"a"((void *)(from))
  53. : "memory" );
  54. }
  55. #else /* __s390x__ */
  56. static inline void clear_page(void *page)
  57. {
  58. asm volatile (" lgr 2,%0\n"
  59. " lghi 3,4096\n"
  60. " slgr 1,1\n"
  61. " mvcl 2,0"
  62. : : "a" ((void *) (page))
  63. : "memory", "cc", "1", "2", "3" );
  64. }
  65. static inline void copy_page(void *to, void *from)
  66. {
  67. if (MACHINE_HAS_MVPG)
  68. asm volatile (" sgr 0,0\n"
  69. " mvpg %0,%1"
  70. : : "a" ((void *)(to)), "a" ((void *)(from))
  71. : "memory", "cc", "0" );
  72. else
  73. asm volatile (" mvc 0(256,%0),0(%1)\n"
  74. " mvc 256(256,%0),256(%1)\n"
  75. " mvc 512(256,%0),512(%1)\n"
  76. " mvc 768(256,%0),768(%1)\n"
  77. " mvc 1024(256,%0),1024(%1)\n"
  78. " mvc 1280(256,%0),1280(%1)\n"
  79. " mvc 1536(256,%0),1536(%1)\n"
  80. " mvc 1792(256,%0),1792(%1)\n"
  81. " mvc 2048(256,%0),2048(%1)\n"
  82. " mvc 2304(256,%0),2304(%1)\n"
  83. " mvc 2560(256,%0),2560(%1)\n"
  84. " mvc 2816(256,%0),2816(%1)\n"
  85. " mvc 3072(256,%0),3072(%1)\n"
  86. " mvc 3328(256,%0),3328(%1)\n"
  87. " mvc 3584(256,%0),3584(%1)\n"
  88. " mvc 3840(256,%0),3840(%1)\n"
  89. : : "a"((void *)(to)),"a"((void *)(from))
  90. : "memory" );
  91. }
  92. #endif /* __s390x__ */
  93. #define clear_user_page(page, vaddr, pg) clear_page(page)
  94. #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
  95. #define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
  96. #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
  97. /* Pure 2^n version of get_order */
  98. extern __inline__ int get_order(unsigned long size)
  99. {
  100. int order;
  101. size = (size-1) >> (PAGE_SHIFT-1);
  102. order = -1;
  103. do {
  104. size >>= 1;
  105. order++;
  106. } while (size);
  107. return order;
  108. }
  109. /*
  110. * These are used to make use of C type-checking..
  111. */
  112. typedef struct { unsigned long pgprot; } pgprot_t;
  113. typedef struct { unsigned long pte; } pte_t;
  114. #define pte_val(x) ((x).pte)
  115. #define pgprot_val(x) ((x).pgprot)
  116. #ifndef __s390x__
  117. typedef struct { unsigned long pmd; } pmd_t;
  118. typedef struct {
  119. unsigned long pgd0;
  120. unsigned long pgd1;
  121. unsigned long pgd2;
  122. unsigned long pgd3;
  123. } pgd_t;
  124. #define pmd_val(x) ((x).pmd)
  125. #define pgd_val(x) ((x).pgd0)
  126. #else /* __s390x__ */
  127. typedef struct {
  128. unsigned long pmd0;
  129. unsigned long pmd1;
  130. } pmd_t;
  131. typedef struct { unsigned long pgd; } pgd_t;
  132. #define pmd_val(x) ((x).pmd0)
  133. #define pmd_val1(x) ((x).pmd1)
  134. #define pgd_val(x) ((x).pgd)
  135. #endif /* __s390x__ */
  136. #define __pte(x) ((pte_t) { (x) } )
  137. #define __pmd(x) ((pmd_t) { (x) } )
  138. #define __pgd(x) ((pgd_t) { (x) } )
  139. #define __pgprot(x) ((pgprot_t) { (x) } )
  140. /* default storage key used for all pages */
  141. extern unsigned int default_storage_key;
  142. static inline void
  143. page_set_storage_key(unsigned long addr, unsigned int skey)
  144. {
  145. asm volatile ( "sske %0,%1" : : "d" (skey), "a" (addr) );
  146. }
  147. static inline unsigned int
  148. page_get_storage_key(unsigned long addr)
  149. {
  150. unsigned int skey;
  151. asm volatile ( "iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0) );
  152. return skey;
  153. }
  154. #endif /* !__ASSEMBLY__ */
  155. /* to align the pointer to the (next) page boundary */
  156. #define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
  157. #define __PAGE_OFFSET 0x0UL
  158. #define PAGE_OFFSET 0x0UL
  159. #define __pa(x) (unsigned long)(x)
  160. #define __va(x) (void *)(unsigned long)(x)
  161. #define pfn_to_page(pfn) (mem_map + (pfn))
  162. #define page_to_pfn(page) ((unsigned long)((page) - mem_map))
  163. #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
  164. #define pfn_valid(pfn) ((pfn) < max_mapnr)
  165. #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
  166. #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
  167. VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
  168. #endif /* __KERNEL__ */
  169. #endif /* _S390_PAGE_H */