pgalloc_32.h 2.6 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495
  1. #ifndef _I386_PGALLOC_H
  2. #define _I386_PGALLOC_H
  3. #include <linux/threads.h>
  4. #include <linux/mm.h> /* for struct page */
  5. #include <linux/pagemap.h>
  6. #include <asm/tlb.h>
  7. #include <asm-generic/tlb.h>
  8. #ifdef CONFIG_PARAVIRT
  9. #include <asm/paravirt.h>
  10. #else
  11. #define paravirt_alloc_pt(mm, pfn) do { } while (0)
  12. #define paravirt_alloc_pd(mm, pfn) do { } while (0)
  13. #define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
  14. #define paravirt_release_pt(pfn) do { } while (0)
  15. #define paravirt_release_pd(pfn) do { } while (0)
  16. #endif
  17. static inline void pmd_populate_kernel(struct mm_struct *mm,
  18. pmd_t *pmd, pte_t *pte)
  19. {
  20. paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
  21. set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
  22. }
  23. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
  24. {
  25. unsigned long pfn = page_to_pfn(pte);
  26. paravirt_alloc_pt(mm, pfn);
  27. set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
  28. }
  29. #define pmd_pgtable(pmd) pmd_page(pmd)
  30. /*
  31. * Allocate and free page tables.
  32. */
  33. extern pgd_t *pgd_alloc(struct mm_struct *);
  34. extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
  35. extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
  36. extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
  37. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  38. {
  39. free_page((unsigned long)pte);
  40. }
  41. static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  42. {
  43. pgtable_page_dtor(pte);
  44. __free_page(pte);
  45. }
  46. extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
  47. #ifdef CONFIG_X86_PAE
  48. /*
  49. * In the PAE case we free the pmds as part of the pgd.
  50. */
  51. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  52. {
  53. return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  54. }
  55. static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
  56. {
  57. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  58. free_page((unsigned long)pmd);
  59. }
  60. extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
  61. static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
  62. {
  63. paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
  64. /* Note: almost everything apart from _PAGE_PRESENT is
  65. reserved at the pmd (PDPT) level. */
  66. set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
  67. /*
  68. * According to Intel App note "TLBs, Paging-Structure Caches,
  69. * and Their Invalidation", April 2007, document 317080-001,
  70. * section 8.1: in PAE mode we explicitly have to flush the
  71. * TLB via cr3 if the top-level pgd is changed...
  72. */
  73. if (mm == current->active_mm)
  74. write_cr3(read_cr3());
  75. }
  76. #endif /* CONFIG_X86_PAE */
  77. #endif /* _I386_PGALLOC_H */