pgalloc-64.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166
  1. #ifndef _ASM_POWERPC_PGALLOC_64_H
  2. #define _ASM_POWERPC_PGALLOC_64_H
  3. /*
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/slab.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/percpu.h>
  13. #ifndef CONFIG_PPC_SUBPAGE_PROT
  14. static inline void subpage_prot_free(pgd_t *pgd) {}
  15. #endif
  16. extern struct kmem_cache *pgtable_cache[];
  17. #define PGD_CACHE_NUM 0
  18. #define PUD_CACHE_NUM 1
  19. #define PMD_CACHE_NUM 1
  20. #define HUGEPTE_CACHE_NUM 2
  21. #define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
  22. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  23. {
  24. return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
  25. }
  26. static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  27. {
  28. subpage_prot_free(pgd);
  29. kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
  30. }
  31. #ifndef CONFIG_PPC_64K_PAGES
  32. #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
  33. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  34. {
  35. return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
  36. GFP_KERNEL|__GFP_REPEAT);
  37. }
  38. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  39. {
  40. kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
  41. }
  42. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  43. {
  44. pud_set(pud, (unsigned long)pmd);
  45. }
  46. #define pmd_populate(mm, pmd, pte_page) \
  47. pmd_populate_kernel(mm, pmd, page_address(pte_page))
  48. #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
  49. #define pmd_pgtable(pmd) pmd_page(pmd)
  50. #else /* CONFIG_PPC_64K_PAGES */
  51. #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
  52. static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
  53. pte_t *pte)
  54. {
  55. pmd_set(pmd, (unsigned long)pte);
  56. }
  57. #define pmd_populate(mm, pmd, pte_page) \
  58. pmd_populate_kernel(mm, pmd, page_address(pte_page))
  59. #define pmd_pgtable(pmd) pmd_page(pmd)
  60. #endif /* CONFIG_PPC_64K_PAGES */
  61. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  62. {
  63. return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
  64. GFP_KERNEL|__GFP_REPEAT);
  65. }
  66. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  67. {
  68. kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
  69. }
  70. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  71. unsigned long address)
  72. {
  73. return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
  74. }
  75. static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
  76. unsigned long address)
  77. {
  78. struct page *page;
  79. pte_t *pte;
  80. pte = pte_alloc_one_kernel(mm, address);
  81. if (!pte)
  82. return NULL;
  83. page = virt_to_page(pte);
  84. pgtable_page_ctor(page);
  85. return page;
  86. }
  87. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  88. {
  89. free_page((unsigned long)pte);
  90. }
  91. static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
  92. {
  93. pgtable_page_dtor(ptepage);
  94. __free_page(ptepage);
  95. }
  96. #define PGF_CACHENUM_MASK 0x7
  97. typedef struct pgtable_free {
  98. unsigned long val;
  99. } pgtable_free_t;
  100. static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
  101. unsigned long mask)
  102. {
  103. BUG_ON(cachenum > PGF_CACHENUM_MASK);
  104. return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
  105. }
  106. static inline void pgtable_free(pgtable_free_t pgf)
  107. {
  108. void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
  109. int cachenum = pgf.val & PGF_CACHENUM_MASK;
  110. if (cachenum == PTE_NONCACHE_NUM)
  111. free_page((unsigned long)p);
  112. else
  113. kmem_cache_free(pgtable_cache[cachenum], p);
  114. }
  115. extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
  116. #define __pte_free_tlb(tlb,ptepage) \
  117. do { \
  118. pgtable_page_dtor(ptepage); \
  119. pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
  120. PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
  121. } while (0)
  122. #define __pmd_free_tlb(tlb, pmd) \
  123. pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
  124. PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
  125. #ifndef CONFIG_PPC_64K_PAGES
  126. #define __pud_free_tlb(tlb, pud) \
  127. pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
  128. PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
  129. #endif /* CONFIG_PPC_64K_PAGES */
  130. #define check_pgt_cache() do { } while (0)
  131. #endif /* _ASM_POWERPC_PGALLOC_64_H */