pgalloc-64.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153
  1. #ifndef _ASM_POWERPC_PGALLOC_64_H
  2. #define _ASM_POWERPC_PGALLOC_64_H
  3. /*
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation; either version
  7. * 2 of the License, or (at your option) any later version.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/slab.h>
  11. #include <linux/cpumask.h>
  12. #include <linux/percpu.h>
  13. #ifndef CONFIG_PPC_SUBPAGE_PROT
  14. static inline void subpage_prot_free(pgd_t *pgd) {}
  15. #endif
  16. extern struct kmem_cache *pgtable_cache[];
  17. #define PGD_CACHE_NUM 0
  18. #define PUD_CACHE_NUM 1
  19. #define PMD_CACHE_NUM 1
  20. #define HUGEPTE_CACHE_NUM 2
  21. #define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */
  22. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  23. {
  24. return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
  25. }
  26. static inline void pgd_free(pgd_t *pgd)
  27. {
  28. subpage_prot_free(pgd);
  29. kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
  30. }
  31. #ifndef CONFIG_PPC_64K_PAGES
  32. #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
  33. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  34. {
  35. return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
  36. GFP_KERNEL|__GFP_REPEAT);
  37. }
  38. static inline void pud_free(pud_t *pud)
  39. {
  40. kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
  41. }
  42. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  43. {
  44. pud_set(pud, (unsigned long)pmd);
  45. }
  46. #define pmd_populate(mm, pmd, pte_page) \
  47. pmd_populate_kernel(mm, pmd, page_address(pte_page))
  48. #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
  49. #else /* CONFIG_PPC_64K_PAGES */
  50. #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
  51. static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
  52. pte_t *pte)
  53. {
  54. pmd_set(pmd, (unsigned long)pte);
  55. }
  56. #define pmd_populate(mm, pmd, pte_page) \
  57. pmd_populate_kernel(mm, pmd, page_address(pte_page))
  58. #endif /* CONFIG_PPC_64K_PAGES */
  59. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  60. {
  61. return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
  62. GFP_KERNEL|__GFP_REPEAT);
  63. }
  64. static inline void pmd_free(pmd_t *pmd)
  65. {
  66. kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
  67. }
  68. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  69. unsigned long address)
  70. {
  71. return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
  72. }
  73. static inline struct page *pte_alloc_one(struct mm_struct *mm,
  74. unsigned long address)
  75. {
  76. pte_t *pte = pte_alloc_one_kernel(mm, address);
  77. return pte ? virt_to_page(pte) : NULL;
  78. }
  79. static inline void pte_free_kernel(pte_t *pte)
  80. {
  81. free_page((unsigned long)pte);
  82. }
  83. static inline void pte_free(struct page *ptepage)
  84. {
  85. __free_page(ptepage);
  86. }
  87. #define PGF_CACHENUM_MASK 0x3
  88. typedef struct pgtable_free {
  89. unsigned long val;
  90. } pgtable_free_t;
  91. static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
  92. unsigned long mask)
  93. {
  94. BUG_ON(cachenum > PGF_CACHENUM_MASK);
  95. return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
  96. }
  97. static inline void pgtable_free(pgtable_free_t pgf)
  98. {
  99. void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
  100. int cachenum = pgf.val & PGF_CACHENUM_MASK;
  101. if (cachenum == PTE_NONCACHE_NUM)
  102. free_page((unsigned long)p);
  103. else
  104. kmem_cache_free(pgtable_cache[cachenum], p);
  105. }
  106. extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
  107. #define __pte_free_tlb(tlb, ptepage) \
  108. pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
  109. PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
  110. #define __pmd_free_tlb(tlb, pmd) \
  111. pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
  112. PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
  113. #ifndef CONFIG_PPC_64K_PAGES
  114. #define __pud_free_tlb(tlb, pud) \
  115. pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
  116. PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
  117. #endif /* CONFIG_PPC_64K_PAGES */
  118. #define check_pgt_cache() do { } while (0)
  119. #endif /* _ASM_POWERPC_PGALLOC_64_H */