pgalloc.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177
  1. /*
  2. * include/asm-s390/pgalloc.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgalloc.h"
  10. * Copyright (C) 1994 Linus Torvalds
  11. */
  12. #ifndef _S390_PGALLOC_H
  13. #define _S390_PGALLOC_H
  14. #include <linux/threads.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #define check_pgt_cache() do {} while (0)
  18. unsigned long *crst_table_alloc(struct mm_struct *, int);
  19. void crst_table_free(struct mm_struct *, unsigned long *);
  20. unsigned long *page_table_alloc(struct mm_struct *);
  21. void page_table_free(struct mm_struct *, unsigned long *);
  22. void disable_noexec(struct mm_struct *, struct task_struct *);
  23. static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
  24. {
  25. typedef struct { char _[n]; } addrtype;
  26. *s = val;
  27. n = (n / 256) - 1;
  28. asm volatile(
  29. #ifdef CONFIG_64BIT
  30. " mvc 8(248,%0),0(%0)\n"
  31. #else
  32. " mvc 4(252,%0),0(%0)\n"
  33. #endif
  34. "0: mvc 256(256,%0),0(%0)\n"
  35. " la %0,256(%0)\n"
  36. " brct %1,0b\n"
  37. : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
  38. : "m" (*(addrtype *) s));
  39. }
  40. static inline void crst_table_init(unsigned long *crst, unsigned long entry)
  41. {
  42. clear_table(crst, entry, sizeof(unsigned long)*2048);
  43. crst = get_shadow_table(crst);
  44. if (crst)
  45. clear_table(crst, entry, sizeof(unsigned long)*2048);
  46. }
  47. #ifndef __s390x__
  48. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  49. {
  50. return _SEGMENT_ENTRY_EMPTY;
  51. }
  52. #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
  53. #define pud_free(mm, x) do { } while (0)
  54. #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
  55. #define pmd_free(mm, x) do { } while (0)
  56. #define pgd_populate(mm, pgd, pud) BUG()
  57. #define pgd_populate_kernel(mm, pgd, pud) BUG()
  58. #define pud_populate(mm, pud, pmd) BUG()
  59. #define pud_populate_kernel(mm, pud, pmd) BUG()
  60. #else /* __s390x__ */
  61. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  62. {
  63. if (mm->context.asce_limit <= (1UL << 31))
  64. return _SEGMENT_ENTRY_EMPTY;
  65. if (mm->context.asce_limit <= (1UL << 42))
  66. return _REGION3_ENTRY_EMPTY;
  67. return _REGION2_ENTRY_EMPTY;
  68. }
  69. int crst_table_upgrade(struct mm_struct *, unsigned long limit);
  70. void crst_table_downgrade(struct mm_struct *, unsigned long limit);
  71. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
  72. {
  73. unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
  74. if (table)
  75. crst_table_init(table, _REGION3_ENTRY_EMPTY);
  76. return (pud_t *) table;
  77. }
  78. #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
  79. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  80. {
  81. unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
  82. if (table)
  83. crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
  84. return (pmd_t *) table;
  85. }
  86. #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
  87. static inline void pgd_populate_kernel(struct mm_struct *mm,
  88. pgd_t *pgd, pud_t *pud)
  89. {
  90. pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
  91. }
  92. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  93. {
  94. pgd_populate_kernel(mm, pgd, pud);
  95. if (mm->context.noexec) {
  96. pgd = get_shadow_table(pgd);
  97. pud = get_shadow_table(pud);
  98. pgd_populate_kernel(mm, pgd, pud);
  99. }
  100. }
  101. static inline void pud_populate_kernel(struct mm_struct *mm,
  102. pud_t *pud, pmd_t *pmd)
  103. {
  104. pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
  105. }
  106. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  107. {
  108. pud_populate_kernel(mm, pud, pmd);
  109. if (mm->context.noexec) {
  110. pud = get_shadow_table(pud);
  111. pmd = get_shadow_table(pmd);
  112. pud_populate_kernel(mm, pud, pmd);
  113. }
  114. }
  115. #endif /* __s390x__ */
  116. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  117. {
  118. INIT_LIST_HEAD(&mm->context.crst_list);
  119. INIT_LIST_HEAD(&mm->context.pgtable_list);
  120. return (pgd_t *) crst_table_alloc(mm, s390_noexec);
  121. }
  122. #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
  123. static inline void pmd_populate_kernel(struct mm_struct *mm,
  124. pmd_t *pmd, pte_t *pte)
  125. {
  126. pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
  127. }
  128. static inline void pmd_populate(struct mm_struct *mm,
  129. pmd_t *pmd, pgtable_t pte)
  130. {
  131. pmd_populate_kernel(mm, pmd, pte);
  132. if (mm->context.noexec) {
  133. pmd = get_shadow_table(pmd);
  134. pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
  135. }
  136. }
  137. #define pmd_pgtable(pmd) \
  138. (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
  139. /*
  140. * page table entry allocation/free routines.
  141. */
  142. #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  143. #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  144. #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
  145. #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
  146. #endif /* _S390_PGALLOC_H */