pgalloc.h 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. #ifndef _ASM_X86_PGALLOC_H
  2. #define _ASM_X86_PGALLOC_H
  3. #include <linux/threads.h>
  4. #include <linux/mm.h> /* for struct page */
  5. #include <linux/pagemap.h>
  6. #ifdef CONFIG_PARAVIRT
  7. #include <asm/paravirt.h>
  8. #else
  9. static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
  10. static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
  11. static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
  12. unsigned long start, unsigned long count) {}
  13. static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
  14. static inline void paravirt_release_pte(unsigned long pfn) {}
  15. static inline void paravirt_release_pmd(unsigned long pfn) {}
  16. static inline void paravirt_release_pud(unsigned long pfn) {}
  17. #endif
  18. /*
  19. * Allocate and free page tables.
  20. */
  21. extern pgd_t *pgd_alloc(struct mm_struct *);
  22. extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  23. extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  24. extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  25. /* Should really implement gc for free page table pages. This could be
  26. done with a reference count in struct page. */
  27. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  28. {
  29. BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  30. free_page((unsigned long)pte);
  31. }
  32. static inline void pte_free(struct mm_struct *mm, struct page *pte)
  33. {
  34. __free_page(pte);
  35. }
  36. extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
  37. static inline void pmd_populate_kernel(struct mm_struct *mm,
  38. pmd_t *pmd, pte_t *pte)
  39. {
  40. paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
  41. set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
  42. }
  43. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
  44. struct page *pte)
  45. {
  46. unsigned long pfn = page_to_pfn(pte);
  47. paravirt_alloc_pte(mm, pfn);
  48. set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
  49. }
  50. #define pmd_pgtable(pmd) pmd_page(pmd)
  51. #if PAGETABLE_LEVELS > 2
  52. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  53. {
  54. return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  55. }
  56. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  57. {
  58. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  59. free_page((unsigned long)pmd);
  60. }
  61. extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
  62. #ifdef CONFIG_X86_PAE
  63. extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
  64. #else /* !CONFIG_X86_PAE */
  65. static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
  66. {
  67. paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
  68. set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
  69. }
  70. #endif /* CONFIG_X86_PAE */
  71. #if PAGETABLE_LEVELS > 3
  72. static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
  73. {
  74. paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
  75. set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
  76. }
  77. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  78. {
  79. return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  80. }
  81. static inline void pud_free(struct mm_struct *mm, pud_t *pud)
  82. {
  83. BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
  84. free_page((unsigned long)pud);
  85. }
  86. extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
  87. #endif /* PAGETABLE_LEVELS > 3 */
  88. #endif /* PAGETABLE_LEVELS > 2 */
  89. #endif /* _ASM_X86_PGALLOC_H */