pgalloc.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109
  1. #ifndef _ASM_X86_PGALLOC_H
  2. #define _ASM_X86_PGALLOC_H
  3. #include <linux/threads.h>
  4. #include <linux/mm.h> /* for struct page */
  5. #include <linux/pagemap.h>
  6. #ifdef CONFIG_PARAVIRT
  7. #include <asm/paravirt.h>
  8. #else
  9. #define paravirt_alloc_pte(mm, pfn) do { } while (0)
  10. #define paravirt_alloc_pmd(mm, pfn) do { } while (0)
  11. #define paravirt_alloc_pmd_clone(pfn, clonepfn, start, count) do { } while (0)
  12. #define paravirt_alloc_pud(mm, pfn) do { } while (0)
  13. #define paravirt_release_pte(pfn) do { } while (0)
  14. #define paravirt_release_pmd(pfn) do { } while (0)
  15. #define paravirt_release_pud(pfn) do { } while (0)
  16. #endif
  17. /*
  18. * Allocate and free page tables.
  19. */
  20. extern pgd_t *pgd_alloc(struct mm_struct *);
  21. extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  22. extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  23. extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  24. /* Should really implement gc for free page table pages. This could be
  25. done with a reference count in struct page. */
  26. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  27. {
  28. BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  29. free_page((unsigned long)pte);
  30. }
  31. static inline void pte_free(struct mm_struct *mm, struct page *pte)
  32. {
  33. __free_page(pte);
  34. }
  35. extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
  36. static inline void pmd_populate_kernel(struct mm_struct *mm,
  37. pmd_t *pmd, pte_t *pte)
  38. {
  39. paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
  40. set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
  41. }
  42. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  43. struct page *pte)
  44. {
  45. unsigned long pfn = page_to_pfn(pte);
  46. paravirt_alloc_pte(mm, pfn);
  47. set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
  48. }
  49. #define pmd_pgtable(pmd) pmd_page(pmd)
  50. #if PAGETABLE_LEVELS > 2
  51. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  52. {
  53. return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  54. }
  55. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  56. {
  57. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  58. free_page((unsigned long)pmd);
  59. }
  60. extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
  61. #ifdef CONFIG_X86_PAE
  62. extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
  63. #else /* !CONFIG_X86_PAE */
  64. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  65. {
  66. paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
  67. set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
  68. }
  69. #endif /* CONFIG_X86_PAE */
  70. #if PAGETABLE_LEVELS > 3
  71. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  72. {
  73. paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
  74. set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
  75. }
  76. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  77. {
  78. return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  79. }
  80. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  81. {
  82. BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
  83. free_page((unsigned long)pud);
  84. }
  85. extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
  86. #endif /* PAGETABLE_LEVELS > 3 */
  87. #endif /* PAGETABLE_LEVELS > 2 */
  88. #endif /* _ASM_X86_PGALLOC_H */