pgalloc.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /*
  2. * include/asm-s390/pgalloc.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgalloc.h"
  10. * Copyright (C) 1994 Linus Torvalds
  11. */
  12. #ifndef _S390_PGALLOC_H
  13. #define _S390_PGALLOC_H
  14. #include <linux/threads.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #define check_pgt_cache() do {} while (0)
  18. unsigned long *crst_table_alloc(struct mm_struct *, int);
  19. void crst_table_free(struct mm_struct *, unsigned long *);
  20. unsigned long *page_table_alloc(struct mm_struct *);
  21. void page_table_free(struct mm_struct *, unsigned long *);
  22. void disable_noexec(struct mm_struct *, struct task_struct *);
  23. static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
  24. {
  25. *s = val;
  26. n = (n / 256) - 1;
  27. asm volatile(
  28. #ifdef CONFIG_64BIT
  29. " mvc 8(248,%0),0(%0)\n"
  30. #else
  31. " mvc 4(252,%0),0(%0)\n"
  32. #endif
  33. "0: mvc 256(256,%0),0(%0)\n"
  34. " la %0,256(%0)\n"
  35. " brct %1,0b\n"
  36. : "+a" (s), "+d" (n));
  37. }
  38. static inline void crst_table_init(unsigned long *crst, unsigned long entry)
  39. {
  40. clear_table(crst, entry, sizeof(unsigned long)*2048);
  41. crst = get_shadow_table(crst);
  42. if (crst)
  43. clear_table(crst, entry, sizeof(unsigned long)*2048);
  44. }
  45. #ifndef __s390x__
  46. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  47. {
  48. return _SEGMENT_ENTRY_EMPTY;
  49. }
  50. #define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
  51. #define pud_free(mm, x) do { } while (0)
  52. #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
  53. #define pmd_free(mm, x) do { } while (0)
  54. #define pgd_populate(mm, pgd, pud) BUG()
  55. #define pgd_populate_kernel(mm, pgd, pud) BUG()
  56. #define pud_populate(mm, pud, pmd) BUG()
  57. #define pud_populate_kernel(mm, pud, pmd) BUG()
  58. #else /* __s390x__ */
  59. static inline unsigned long pgd_entry_type(struct mm_struct *mm)
  60. {
  61. if (mm->context.asce_limit <= (1UL << 31))
  62. return _SEGMENT_ENTRY_EMPTY;
  63. if (mm->context.asce_limit <= (1UL << 42))
  64. return _REGION3_ENTRY_EMPTY;
  65. return _REGION2_ENTRY_EMPTY;
  66. }
  67. int crst_table_upgrade(struct mm_struct *, unsigned long limit);
  68. void crst_table_downgrade(struct mm_struct *, unsigned long limit);
  69. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
  70. {
  71. unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
  72. if (table)
  73. crst_table_init(table, _REGION3_ENTRY_EMPTY);
  74. return (pud_t *) table;
  75. }
  76. #define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
  77. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  78. {
  79. unsigned long *table = crst_table_alloc(mm, mm->context.noexec);
  80. if (table)
  81. crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
  82. return (pmd_t *) table;
  83. }
  84. #define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
  85. static inline void pgd_populate_kernel(struct mm_struct *mm,
  86. pgd_t *pgd, pud_t *pud)
  87. {
  88. pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
  89. }
  90. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  91. {
  92. pgd_populate_kernel(mm, pgd, pud);
  93. if (mm->context.noexec) {
  94. pgd = get_shadow_table(pgd);
  95. pud = get_shadow_table(pud);
  96. pgd_populate_kernel(mm, pgd, pud);
  97. }
  98. }
  99. static inline void pud_populate_kernel(struct mm_struct *mm,
  100. pud_t *pud, pmd_t *pmd)
  101. {
  102. pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
  103. }
  104. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  105. {
  106. pud_populate_kernel(mm, pud, pmd);
  107. if (mm->context.noexec) {
  108. pud = get_shadow_table(pud);
  109. pmd = get_shadow_table(pmd);
  110. pud_populate_kernel(mm, pud, pmd);
  111. }
  112. }
  113. #endif /* __s390x__ */
  114. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  115. {
  116. INIT_LIST_HEAD(&mm->context.crst_list);
  117. INIT_LIST_HEAD(&mm->context.pgtable_list);
  118. return (pgd_t *) crst_table_alloc(mm, s390_noexec);
  119. }
  120. #define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
  121. static inline void pmd_populate_kernel(struct mm_struct *mm,
  122. pmd_t *pmd, pte_t *pte)
  123. {
  124. pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
  125. }
  126. static inline void pmd_populate(struct mm_struct *mm,
  127. pmd_t *pmd, pgtable_t pte)
  128. {
  129. pmd_populate_kernel(mm, pmd, pte);
  130. if (mm->context.noexec) {
  131. pmd = get_shadow_table(pmd);
  132. pmd_populate_kernel(mm, pmd, pte + PTRS_PER_PTE);
  133. }
  134. }
  135. #define pmd_pgtable(pmd) \
  136. (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
  137. /*
  138. * page table entry allocation/free routines.
  139. */
  140. #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  141. #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
  142. #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
  143. #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
  144. #endif /* _S390_PGALLOC_H */