pgalloc.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /*
  2. * include/asm-s390/pgalloc.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. * Author(s): Hartmut Penner (hp@de.ibm.com)
  7. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  8. *
  9. * Derived from "include/asm-i386/pgalloc.h"
  10. * Copyright (C) 1994 Linus Torvalds
  11. */
  12. #ifndef _S390_PGALLOC_H
  13. #define _S390_PGALLOC_H
  14. #include <linux/threads.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #define check_pgt_cache() do {} while (0)
  18. extern void diag10(unsigned long addr);
  19. /*
  20. * Allocate and free page tables. The xxx_kernel() versions are
  21. * used to allocate a kernel page table - this turns on ASN bits
  22. * if any.
  23. */
  24. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  25. {
  26. pgd_t *pgd;
  27. int i;
  28. #ifndef __s390x__
  29. pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
  30. if (pgd != NULL)
  31. for (i = 0; i < USER_PTRS_PER_PGD; i++)
  32. pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
  33. #else /* __s390x__ */
  34. pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
  35. if (pgd != NULL)
  36. for (i = 0; i < PTRS_PER_PGD; i++)
  37. pgd_clear(pgd + i);
  38. #endif /* __s390x__ */
  39. return pgd;
  40. }
  41. static inline void pgd_free(pgd_t *pgd)
  42. {
  43. #ifndef __s390x__
  44. free_pages((unsigned long) pgd, 1);
  45. #else /* __s390x__ */
  46. free_pages((unsigned long) pgd, 2);
  47. #endif /* __s390x__ */
  48. }
  49. #ifndef __s390x__
  50. /*
  51. * page middle directory allocation/free routines.
  52. * We use pmd cache only on s390x, so these are dummy routines. This
  53. * code never triggers because the pgd will always be present.
  54. */
  55. #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
  56. #define pmd_free(x) do { } while (0)
  57. #define __pmd_free_tlb(tlb,x) do { } while (0)
  58. #define pgd_populate(mm, pmd, pte) BUG()
  59. #else /* __s390x__ */
  60. static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  61. {
  62. pmd_t *pmd;
  63. int i;
  64. pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, 2);
  65. if (pmd != NULL) {
  66. for (i=0; i < PTRS_PER_PMD; i++)
  67. pmd_clear(pmd+i);
  68. }
  69. return pmd;
  70. }
  71. static inline void pmd_free (pmd_t *pmd)
  72. {
  73. free_pages((unsigned long) pmd, 2);
  74. }
  75. #define __pmd_free_tlb(tlb,pmd) \
  76. do { \
  77. tlb_flush_mmu(tlb, 0, 0); \
  78. pmd_free(pmd); \
  79. } while (0)
  80. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
  81. {
  82. pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
  83. }
  84. #endif /* __s390x__ */
  85. static inline void
  86. pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
  87. {
  88. #ifndef __s390x__
  89. pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
  90. pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
  91. pmd_val(pmd[2]) = _PAGE_TABLE + __pa(pte+512);
  92. pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
  93. #else /* __s390x__ */
  94. pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
  95. pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
  96. #endif /* __s390x__ */
  97. }
  98. static inline void
  99. pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
  100. {
  101. pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
  102. }
  103. /*
  104. * page table entry allocation/free routines.
  105. */
  106. static inline pte_t *
  107. pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
  108. {
  109. pte_t *pte;
  110. int i;
  111. pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  112. if (pte != NULL) {
  113. for (i=0; i < PTRS_PER_PTE; i++) {
  114. pte_clear(mm, vmaddr, pte+i);
  115. vmaddr += PAGE_SIZE;
  116. }
  117. }
  118. return pte;
  119. }
  120. static inline struct page *
  121. pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
  122. {
  123. pte_t *pte = pte_alloc_one_kernel(mm, vmaddr);
  124. if (pte)
  125. return virt_to_page(pte);
  126. return NULL;
  127. }
  128. static inline void pte_free_kernel(pte_t *pte)
  129. {
  130. free_page((unsigned long) pte);
  131. }
  132. static inline void pte_free(struct page *pte)
  133. {
  134. __free_page(pte);
  135. }
  136. #define __pte_free_tlb(tlb,pte) tlb_remove_page(tlb,pte)
  137. #endif /* _S390_PGALLOC_H */