pgalloc.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237
  1. /*
  2. * include/asm-s390/pgalloc.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgalloc.h"
  10. * Copyright (C) 1994 Linus Torvalds
  11. */
  12. #ifndef _S390_PGALLOC_H
  13. #define _S390_PGALLOC_H
  14. #include <linux/threads.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #define check_pgt_cache() do {} while (0)
  18. /*
  19. * Page allocation orders.
  20. */
  21. #ifndef __s390x__
  22. # define PTE_ALLOC_ORDER 0
  23. # define PMD_ALLOC_ORDER 0
  24. # define PGD_ALLOC_ORDER 1
  25. #else /* __s390x__ */
  26. # define PTE_ALLOC_ORDER 0
  27. # define PMD_ALLOC_ORDER 2
  28. # define PGD_ALLOC_ORDER 2
  29. #endif /* __s390x__ */
  30. /*
  31. * Allocate and free page tables. The xxx_kernel() versions are
  32. * used to allocate a kernel page table - this turns on ASN bits
  33. * if any.
  34. */
  35. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  36. {
  37. pgd_t *pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
  38. int i;
  39. if (!pgd)
  40. return NULL;
  41. if (s390_noexec) {
  42. pgd_t *shadow_pgd = (pgd_t *)
  43. __get_free_pages(GFP_KERNEL, PGD_ALLOC_ORDER);
  44. struct page *page = virt_to_page(pgd);
  45. if (!shadow_pgd) {
  46. free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
  47. return NULL;
  48. }
  49. page->lru.next = (void *) shadow_pgd;
  50. }
  51. for (i = 0; i < PTRS_PER_PGD; i++)
  52. #ifndef __s390x__
  53. pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
  54. #else
  55. pgd_clear(pgd + i);
  56. #endif
  57. return pgd;
  58. }
  59. static inline void pgd_free(pgd_t *pgd)
  60. {
  61. pgd_t *shadow_pgd = get_shadow_pgd(pgd);
  62. if (shadow_pgd)
  63. free_pages((unsigned long) shadow_pgd, PGD_ALLOC_ORDER);
  64. free_pages((unsigned long) pgd, PGD_ALLOC_ORDER);
  65. }
  66. #ifndef __s390x__
  67. /*
  68. * page middle directory allocation/free routines.
  69. * We use pmd cache only on s390x, so these are dummy routines. This
  70. * code never triggers because the pgd will always be present.
  71. */
  72. #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
  73. #define pmd_free(x) do { } while (0)
  74. #define __pmd_free_tlb(tlb,x) do { } while (0)
  75. #define pgd_populate(mm, pmd, pte) BUG()
  76. #define pgd_populate_kernel(mm, pmd, pte) BUG()
  77. #else /* __s390x__ */
  78. static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  79. {
  80. pmd_t *pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
  81. int i;
  82. if (!pmd)
  83. return NULL;
  84. if (s390_noexec) {
  85. pmd_t *shadow_pmd = (pmd_t *)
  86. __get_free_pages(GFP_KERNEL, PMD_ALLOC_ORDER);
  87. struct page *page = virt_to_page(pmd);
  88. if (!shadow_pmd) {
  89. free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
  90. return NULL;
  91. }
  92. page->lru.next = (void *) shadow_pmd;
  93. }
  94. for (i=0; i < PTRS_PER_PMD; i++)
  95. pmd_clear(pmd + i);
  96. return pmd;
  97. }
  98. static inline void pmd_free (pmd_t *pmd)
  99. {
  100. pmd_t *shadow_pmd = get_shadow_pmd(pmd);
  101. if (shadow_pmd)
  102. free_pages((unsigned long) shadow_pmd, PMD_ALLOC_ORDER);
  103. free_pages((unsigned long) pmd, PMD_ALLOC_ORDER);
  104. }
  105. #define __pmd_free_tlb(tlb,pmd) \
  106. do { \
  107. tlb_flush_mmu(tlb, 0, 0); \
  108. pmd_free(pmd); \
  109. } while (0)
  110. static inline void
  111. pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  112. {
  113. pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
  114. }
  115. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  116. {
  117. pgd_t *shadow_pgd = get_shadow_pgd(pgd);
  118. pmd_t *shadow_pmd = get_shadow_pmd(pmd);
  119. if (shadow_pgd && shadow_pmd)
  120. pgd_populate_kernel(mm, shadow_pgd, shadow_pmd);
  121. pgd_populate_kernel(mm, pgd, pmd);
  122. }
  123. #endif /* __s390x__ */
  124. static inline void
  125. pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
  126. {
  127. #ifndef __s390x__
  128. pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
  129. pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
  130. pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
  131. pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
  132. #else /* __s390x__ */
  133. pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
  134. pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
  135. #endif /* __s390x__ */
  136. }
  137. static inline void
  138. pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
  139. {
  140. pte_t *pte = (pte_t *)page_to_phys(page);
  141. pmd_t *shadow_pmd = get_shadow_pmd(pmd);
  142. pte_t *shadow_pte = get_shadow_pte(pte);
  143. pmd_populate_kernel(mm, pmd, pte);
  144. if (shadow_pmd && shadow_pte)
  145. pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
  146. }
  147. /*
  148. * page table entry allocation/free routines.
  149. */
  150. static inline pte_t *
  151. pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
  152. {
  153. pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
  154. int i;
  155. if (!pte)
  156. return NULL;
  157. if (s390_noexec) {
  158. pte_t *shadow_pte = (pte_t *)
  159. __get_free_page(GFP_KERNEL|__GFP_REPEAT);
  160. struct page *page = virt_to_page(pte);
  161. if (!shadow_pte) {
  162. free_page((unsigned long) pte);
  163. return NULL;
  164. }
  165. page->lru.next = (void *) shadow_pte;
  166. }
  167. for (i=0; i < PTRS_PER_PTE; i++) {
  168. pte_clear(mm, vmaddr, pte + i);
  169. vmaddr += PAGE_SIZE;
  170. }
  171. return pte;
  172. }
  173. static inline struct page *
  174. pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  175. {
  176. pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
  177. if (pte)
  178. return virt_to_page(pte);
  179. return NULL;
  180. }
  181. static inline void pte_free_kernel(pte_t *pte)
  182. {
  183. pte_t *shadow_pte = get_shadow_pte(pte);
  184. if (shadow_pte)
  185. free_page((unsigned long) shadow_pte);
  186. free_page((unsigned long) pte);
  187. }
  188. static inline void pte_free(struct page *pte)
  189. {
  190. struct page *shadow_page = get_shadow_page(pte);
  191. if (shadow_page)
  192. __free_page(shadow_page);
  193. __free_page(pte);
  194. }
  195. #define __pte_free_tlb(tlb, pte) \
  196. ({ \
  197. struct mmu_gather *__tlb = (tlb); \
  198. struct page *__pte = (pte); \
  199. struct page *shadow_page = get_shadow_page(__pte); \
  200. if (shadow_page) \
  201. tlb_remove_page(__tlb, shadow_page); \
  202. tlb_remove_page(__tlb, __pte); \
  203. })
  204. #endif /* _S390_PGALLOC_H */