pgalloc.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161
  1. /*
  2. * include/asm-s390/pgalloc.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgalloc.h"
  10. * Copyright (C) 1994 Linus Torvalds
  11. */
  12. #ifndef _S390_PGALLOC_H
  13. #define _S390_PGALLOC_H
  14. #include <linux/config.h>
  15. #include <linux/threads.h>
  16. #include <linux/gfp.h>
  17. #include <linux/mm.h>
  18. #define check_pgt_cache() do {} while (0)
  19. extern void diag10(unsigned long addr);
  20. /*
  21. * Allocate and free page tables. The xxx_kernel() versions are
  22. * used to allocate a kernel page table - this turns on ASN bits
  23. * if any.
  24. */
  25. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  26. {
  27. pgd_t *pgd;
  28. int i;
  29. #ifndef __s390x__
  30. pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
  31. if (pgd != NULL)
  32. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  33. pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
  34. #else /* __s390x__ */
  35. pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
  36. if (pgd != NULL)
  37. for (i = 0; i < PTRS_PER_PGD; i++)
  38. pgd_clear(pgd + i);
  39. #endif /* __s390x__ */
  40. return pgd;
  41. }
  42. static inline void pgd_free(pgd_t *pgd)
  43. {
  44. #ifndef __s390x__
  45. free_pages((unsigned long) pgd, 1);
  46. #else /* __s390x__ */
  47. free_pages((unsigned long) pgd, 2);
  48. #endif /* __s390x__ */
  49. }
  50. #ifndef __s390x__
  51. /*
  52. * page middle directory allocation/free routines.
  53. * We use pmd cache only on s390x, so these are dummy routines. This
  54. * code never triggers because the pgd will always be present.
  55. */
  56. #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
  57. #define pmd_free(x) do { } while (0)
  58. #define __pmd_free_tlb(tlb,x) do { } while (0)
  59. #define pgd_populate(mm, pmd, pte) BUG()
  60. #else /* __s390x__ */
  61. static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  62. {
  63. pmd_t *pmd;
  64. int i;
  65. pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2);
  66. if (pmd != NULL) {
  67. for (i=0; i < PTRS_PER_PMD; i++)
  68. pmd_clear(pmd+i);
  69. }
  70. return pmd;
  71. }
  72. static inline void pmd_free (pmd_t *pmd)
  73. {
  74. free_pages((unsigned long) pmd, 2);
  75. }
  76. #define __pmd_free_tlb(tlb,pmd) \
  77. do { \
  78. tlb_flush_mmu(tlb, 0, 0); \
  79. pmd_free(pmd); \
  80. } while (0)
  81. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  82. {
  83. pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
  84. }
  85. #endif /* __s390x__ */
  86. static inline void
  87. pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
  88. {
  89. #ifndef __s390x__
  90. pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
  91. pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
  92. pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
  93. pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
  94. #else /* __s390x__ */
  95. pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
  96. pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
  97. #endif /* __s390x__ */
  98. }
  99. static inline void
  100. pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
  101. {
  102. pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
  103. }
  104. /*
  105. * page table entry allocation/free routines.
  106. */
  107. static inline pte_t *
  108. pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
  109. {
  110. pte_t *pte;
  111. int i;
  112. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  113. if (pte != NULL) {
  114. for (i=0; i < PTRS_PER_PTE; i++) {
  115. pte_clear(mm, vmaddr, pte+i);
  116. vmaddr += PAGE_SIZE;
  117. }
  118. }
  119. return pte;
  120. }
  121. static inline struct page *
  122. pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  123. {
  124. pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
  125. if (pte)
  126. return virt_to_page(pte);
  127. return 0;
  128. }
  129. static inline void pte_free_kernel(pte_t *pte)
  130. {
  131. free_page((unsigned long) pte);
  132. }
  133. static inline void pte_free(struct page *pte)
  134. {
  135. __free_page(pte);
  136. }
  137. #define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte)
  138. #endif /* _S390_PGALLOC_H */