pgalloc.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. #ifndef _X86_64_PGALLOC_H
  2. #define _X86_64_PGALLOC_H
  3. #include <asm/pda.h>
  4. #include <linux/threads.h>
  5. #include <linux/mm.h>
  6. #define pmd_populate_kernel(mm, pmd, pte) \
  7. set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
  8. #define pud_populate(mm, pud, pmd) \
  9. set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)))
  10. #define pgd_populate(mm, pgd, pud) \
  11. set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)))
  12. static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
  13. {
  14. set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
  15. }
  16. static inline void pmd_free(pmd_t *pmd)
  17. {
  18. BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
  19. free_page((unsigned long)pmd);
  20. }
  21. static inline pmd_t *pmd_alloc_one (struct mm_struct *mm, unsigned long addr)
  22. {
  23. return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  24. }
  25. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  26. {
  27. return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  28. }
  29. static inline void pud_free (pud_t *pud)
  30. {
  31. BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
  32. free_page((unsigned long)pud);
  33. }
  34. static inline void pgd_list_add(pgd_t *pgd)
  35. {
  36. struct page *page = virt_to_page(pgd);
  37. spin_lock(&pgd_lock);
  38. page->index = (pgoff_t)pgd_list;
  39. if (pgd_list)
  40. pgd_list->private = (unsigned long)&page->index;
  41. pgd_list = page;
  42. page->private = (unsigned long)&pgd_list;
  43. spin_unlock(&pgd_lock);
  44. }
  45. static inline void pgd_list_del(pgd_t *pgd)
  46. {
  47. struct page *next, **pprev, *page = virt_to_page(pgd);
  48. spin_lock(&pgd_lock);
  49. next = (struct page *)page->index;
  50. pprev = (struct page **)page->private;
  51. *pprev = next;
  52. if (next)
  53. next->private = (unsigned long)pprev;
  54. spin_unlock(&pgd_lock);
  55. }
  56. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  57. {
  58. unsigned boundary;
  59. pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  60. if (!pgd)
  61. return NULL;
  62. pgd_list_add(pgd);
  63. /*
  64. * Copy kernel pointers in from init.
  65. * Could keep a freelist or slab cache of those because the kernel
  66. * part never changes.
  67. */
  68. boundary = pgd_index(__PAGE_OFFSET);
  69. memset(pgd, 0, boundary * sizeof(pgd_t));
  70. memcpy(pgd + boundary,
  71. init_level4_pgt + boundary,
  72. (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
  73. return pgd;
  74. }
  75. static inline void pgd_free(pgd_t *pgd)
  76. {
  77. BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
  78. pgd_list_del(pgd);
  79. free_page((unsigned long)pgd);
  80. }
  81. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  82. {
  83. return (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  84. }
  85. static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  86. {
  87. void *p = (void *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  88. if (!p)
  89. return NULL;
  90. return virt_to_page(p);
  91. }
  92. /* Should really implement gc for free page table pages. This could be
  93. done with a reference count in struct page. */
  94. static inline void pte_free_kernel(pte_t *pte)
  95. {
  96. BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
  97. free_page((unsigned long)pte);
  98. }
  99. static inline void pte_free(struct page *pte)
  100. {
  101. __free_page(pte);
  102. }
  103. #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
  104. #define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
  105. #define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
  106. #endif /* _X86_64_PGALLOC_H */