pgalloc.h 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106
  1. #ifndef _ASM_X86_PGALLOC_H
  2. #define _ASM_X86_PGALLOC_H
  3. #include <linux/threads.h>
  4. #include <linux/mm.h> /* for struct page */
  5. #include <linux/pagemap.h>
  6. #ifdef CONFIG_PARAVIRT
  7. #include <asm/paravirt.h>
  8. #else
  9. #define paravirt_alloc_pt(mm, pfn) do { } while (0)
  10. #define paravirt_alloc_pd(mm, pfn) do { } while (0)
  11. #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
  12. #define paravirt_release_pt(pfn) do { } while (0)
  13. #define paravirt_release_pd(pfn) do { } while (0)
  14. #endif
  15. /*
  16. * Allocate and free page tables.
  17. */
  18. extern pgd_t *pgd_alloc(struct mm_struct *);
  19. extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  20. extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  21. extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  22. /* Should really implement gc for free page table pages. This could be
  23. done with a reference count in struct page. */
  24. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  25. {
  26. BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  27. free_page((unsigned long)pte);
  28. }
  29. static inline void pte_free(struct mm_struct *mm, struct page *pte)
  30. {
  31. __free_page(pte);
  32. }
  33. extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
  34. static inline void pmd_populate_kernel(struct mm_struct *mm,
  35. pmd_t *pmd, pte_t *pte)
  36. {
  37. paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
  38. set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
  39. }
  40. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  41. struct page *pte)
  42. {
  43. unsigned long pfn = page_to_pfn(pte);
  44. paravirt_alloc_pt(mm, pfn);
  45. set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
  46. }
  47. #define pmd_pgtable(pmd) pmd_page(pmd)
  48. #if PAGETABLE_LEVELS > 2
  49. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  50. {
  51. return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  52. }
  53. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  54. {
  55. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  56. free_page((unsigned long)pmd);
  57. }
  58. extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
  59. #ifdef CONFIG_X86_PAE
  60. extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
  61. #else /* !CONFIG_X86_PAE */
  62. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  63. {
  64. paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
  65. set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
  66. }
  67. #endif /* CONFIG_X86_PAE */
  68. #if PAGETABLE_LEVELS > 3
  69. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  70. {
  71. set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
  72. }
  73. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  74. {
  75. return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  76. }
  77. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  78. {
  79. BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
  80. free_page((unsigned long)pud);
  81. }
  82. extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
  83. #endif /* PAGETABLE_LEVELS > 3 */
  84. #endif /* PAGETABLE_LEVELS > 2 */
  85. #endif /* _ASM_X86_PGALLOC_H */