pgalloc.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /*
  2. * include/asm-s390/pgalloc.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgalloc.h"
  10. * Copyright (C) 1994 Linus Torvalds
  11. */
  12. #ifndef _S390_PGALLOC_H
  13. #define _S390_PGALLOC_H
  14. #include <linux/threads.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #define check_pgt_cache() do {} while (0)
  18. unsigned long *crst_table_alloc(struct mm_struct *, int);
  19. void crst_table_free(unsigned long *);
  20. unsigned long *page_table_alloc(int);
  21. void page_table_free(unsigned long *);
  22. static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
  23. {
  24. *s = val;
  25. n = (n / 256) - 1;
  26. asm volatile(
  27. #ifdef CONFIG_64BIT
  28. " mvc 8(248,%0),0(%0)\n"
  29. #else
  30. " mvc 4(252,%0),0(%0)\n"
  31. #endif
  32. "0: mvc 256(256,%0),0(%0)\n"
  33. " la %0,256(%0)\n"
  34. " brct %1,0b\n"
  35. : "+a" (s), "+d" (n));
  36. }
  37. static inline void crst_table_init(unsigned long *crst, unsigned long entry)
  38. {
  39. clear_table(crst, entry, sizeof(unsigned long)*2048);
  40. crst = get_shadow_table(crst);
  41. if (crst)
  42. clear_table(crst, entry, sizeof(unsigned long)*2048);
  43. }
  44. #ifndef __s390x__
  45. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  46. {
  47. return _SEGMENT_ENTRY_EMPTY;
  48. }
  49. #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
  50. #define pud_free(x) do { } while (0)
  51. #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
  52. #define pmd_free(x) do { } while (0)
  53. #define pgd_populate(mm, pgd, pud) BUG()
  54. #define pgd_populate_kernel(mm, pgd, pud) BUG()
  55. #define pud_populate(mm, pud, pmd) BUG()
  56. #define pud_populate_kernel(mm, pud, pmd) BUG()
  57. #else /* __s390x__ */
  58. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  59. {
  60. return _REGION3_ENTRY_EMPTY;
  61. }
  62. #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
  63. #define pud_free(x) do { } while (0)
  64. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  65. {
  66. unsigned long *crst = crst_table_alloc(mm, s390_noexec);
  67. if (crst)
  68. crst_table_init(crst, _SEGMENT_ENTRY_EMPTY);
  69. return (pmd_t *) crst;
  70. }
  71. #define pmd_free(pmd) crst_table_free((unsigned long *) pmd)
  72. #define pgd_populate(mm, pgd, pud) BUG()
  73. #define pgd_populate_kernel(mm, pgd, pud) BUG()
  74. static inline void pud_populate_kernel(struct mm_struct *mm,
  75. pud_t *pud, pmd_t *pmd)
  76. {
  77. pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
  78. }
  79. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  80. {
  81. pud_t *shadow_pud = get_shadow_table(pud);
  82. pmd_t *shadow_pmd = get_shadow_table(pmd);
  83. if (shadow_pud && shadow_pmd)
  84. pud_populate_kernel(mm, shadow_pud, shadow_pmd);
  85. pud_populate_kernel(mm, pud, pmd);
  86. }
  87. #endif /* __s390x__ */
  88. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  89. {
  90. unsigned long *crst = crst_table_alloc(mm, s390_noexec);
  91. if (crst)
  92. crst_table_init(crst, pgd_entry_type(mm));
  93. return (pgd_t *) crst;
  94. }
  95. #define pgd_free(pgd) crst_table_free((unsigned long *) pgd)
  96. static inline void
  97. pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
  98. {
  99. #ifndef __s390x__
  100. pmd_val(pmd[0]) = _SEGMENT_ENTRY + __pa(pte);
  101. pmd_val(pmd[1]) = _SEGMENT_ENTRY + __pa(pte+256);
  102. pmd_val(pmd[2]) = _SEGMENT_ENTRY + __pa(pte+512);
  103. pmd_val(pmd[3]) = _SEGMENT_ENTRY + __pa(pte+768);
  104. #else /* __s390x__ */
  105. pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
  106. pmd_val1(*pmd) = _SEGMENT_ENTRY + __pa(pte+256);
  107. #endif /* __s390x__ */
  108. }
  109. static inline void
  110. pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
  111. {
  112. pte_t *pte = (pte_t *)page_to_phys(page);
  113. pmd_t *shadow_pmd = get_shadow_table(pmd);
  114. pte_t *shadow_pte = get_shadow_pte(pte);
  115. pmd_populate_kernel(mm, pmd, pte);
  116. if (shadow_pmd && shadow_pte)
  117. pmd_populate_kernel(mm, shadow_pmd, shadow_pte);
  118. }
  119. /*
  120. * page table entry allocation/free routines.
  121. */
  122. #define pte_alloc_one_kernel(mm, vmaddr) \
  123. ((pte_t *) page_table_alloc(s390_noexec))
  124. #define pte_alloc_one(mm, vmaddr) \
  125. virt_to_page(page_table_alloc(s390_noexec))
  126. #define pte_free_kernel(pte) \
  127. page_table_free((unsigned long *) pte)
  128. #define pte_free(pte) \
  129. page_table_free((unsigned long *) page_to_phys((struct page *) pte))
  130. #endif /* _S390_PGALLOC_H */