pgalloc.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172
  1. /*
  2. * include/asm-s390/pgalloc.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgalloc.h"
  10. * Copyright (C) 1994 Linus Torvalds
  11. */
  12. #ifndef _S390_PGALLOC_H
  13. #define _S390_PGALLOC_H
  14. #include <linux/threads.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #define check_pgt_cache() do {} while (0)
  18. unsigned long *crst_table_alloc(struct mm_struct *, int);
  19. void crst_table_free(struct mm_struct *, unsigned long *);
  20. unsigned long *page_table_alloc(struct mm_struct *);
  21. void page_table_free(struct mm_struct *, unsigned long *);
  22. void disable_noexec(struct mm_struct *, struct task_struct *);
  23. static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
  24. {
  25. *s = val;
  26. n = (n / 256) - 1;
  27. asm volatile(
  28. #ifdef CONFIG_64BIT
  29. " mvc 8(248,%0),0(%0)\n"
  30. #else
  31. " mvc 4(252,%0),0(%0)\n"
  32. #endif
  33. "0: mvc 256(256,%0),0(%0)\n"
  34. " la %0,256(%0)\n"
  35. " brct %1,0b\n"
  36. : "+a" (s), "+d" (n));
  37. }
  38. static inline void crst_table_init(unsigned long *crst, unsigned long entry)
  39. {
  40. clear_table(crst, entry, sizeof(unsigned long)*2048);
  41. crst = get_shadow_table(crst);
  42. if (crst)
  43. clear_table(crst, entry, sizeof(unsigned long)*2048);
  44. }
  45. #ifndef __s390x__
  46. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  47. {
  48. return _SEGMENT_ENTRY_EMPTY;
  49. }
  50. #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
  51. #define pud_free(mm, x) do { } while (0)
  52. #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
  53. #define pmd_free(mm, x) do { } while (0)
  54. #define pgd_populate(mm, pgd, pud) BUG()
  55. #define pgd_populate_kernel(mm, pgd, pud) BUG()
  56. #define pud_populate(mm, pud, pmd) BUG()
  57. #define pud_populate_kernel(mm, pud, pmd) BUG()
  58. #else /* __s390x__ */
  59. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  60. {
  61. return _REGION2_ENTRY_EMPTY;
  62. }
  63. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
  64. {
  65. unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
  66. if (table)
  67. crst_table_init(table, _REGION3_ENTRY_EMPTY);
  68. return (pud_t *) table;
  69. }
  70. #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
  71. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  72. {
  73. unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
  74. if (table)
  75. crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
  76. return (pmd_t *) table;
  77. }
  78. #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
  79. static inline void pgd_populate_kernel(struct mm_struct *mm,
  80. pgd_t *pgd, pud_t *pud)
  81. {
  82. pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
  83. }
  84. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  85. {
  86. pgd_t *shadow_pgd = get_shadow_table(pgd);
  87. pud_t *shadow_pud = get_shadow_table(pud);
  88. if (shadow_pgd && shadow_pud)
  89. pgd_populate_kernel(mm, shadow_pgd, shadow_pud);
  90. pgd_populate_kernel(mm, pgd, pud);
  91. }
  92. static inline void pud_populate_kernel(struct mm_struct *mm,
  93. pud_t *pud, pmd_t *pmd)
  94. {
  95. pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
  96. }
  97. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  98. {
  99. pud_populate_kernel(mm, pud, pmd);
  100. if (mm->context.noexec) {
  101. pud = get_shadow_table(pud);
  102. pmd = get_shadow_table(pmd);
  103. pud_populate_kernel(mm, pud, pmd);
  104. }
  105. }
  106. #endif /* __s390x__ */
  107. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  108. {
  109. unsigned long *crst;
  110. INIT_LIST_HEAD(&mm->context.crst_list);
  111. INIT_LIST_HEAD(&mm->context.pgtable_list);
  112. crst = crst_table_alloc(mm, s390_noexec);
  113. if (crst)
  114. crst_table_init(crst, pgd_entry_type(mm));
  115. return (pgd_t *) crst;
  116. }
  117. #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
  118. static inline void pmd_populate_kernel(struct mm_struct *mm,
  119. pmd_t *pmd, pte_t *pte)
  120. {
  121. pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
  122. }
  123. static inline void pmd_populate(struct mm_struct *mm,
  124. pmd_t *pmd, pgtable_t pte)
  125. {
  126. pmd_populate_kernel(mm, pmd, pte);
  127. if (mm->context.noexec) {
  128. pmd = get_shadow_table(pmd);
  129. pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
  130. }
  131. }
  132. #define pmd_pgtable(pmd) \
  133. (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
  134. /*
  135. * page table entry allocation/free routines.
  136. */
  137. #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  138. #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  139. #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
  140. #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
  141. #endif /* _S390_PGALLOC_H */