pgalloc.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163
  1. #ifndef _ASM_IA64_PGALLOC_H
  2. #define _ASM_IA64_PGALLOC_H
  3. /*
  4. * This file contains the functions and defines necessary to allocate
  5. * page tables.
  6. *
  7. * This hopefully works with any (fixed) ia-64 page-size, as defined
  8. * in <asm/page.h> (currently 8192).
  9. *
  10. * Copyright (C) 1998-2001 Hewlett-Packard Co
  11. * David Mosberger-Tang <davidm@hpl.hp.com>
  12. * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
  13. */
  14. #include <linux/compiler.h>
  15. #include <linux/mm.h>
  16. #include <linux/page-flags.h>
  17. #include <linux/threads.h>
  18. #include <asm/mmu_context.h>
  19. DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
  20. #define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
  21. DECLARE_PER_CPU(long, __pgtable_quicklist_size);
  22. #define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
  23. static inline long pgtable_quicklist_total_size(void)
  24. {
  25. long ql_size = 0;
  26. int cpuid;
  27. for_each_online_cpu(cpuid) {
  28. ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
  29. }
  30. return ql_size;
  31. }
  32. static inline void *pgtable_quicklist_alloc(void)
  33. {
  34. unsigned long *ret = NULL;
  35. preempt_disable();
  36. ret = pgtable_quicklist;
  37. if (likely(ret != NULL)) {
  38. pgtable_quicklist = (unsigned long *)(*ret);
  39. ret[0] = 0;
  40. --pgtable_quicklist_size;
  41. preempt_enable();
  42. } else {
  43. preempt_enable();
  44. ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
  45. }
  46. return ret;
  47. }
  48. static inline void pgtable_quicklist_free(void *pgtable_entry)
  49. {
  50. #ifdef CONFIG_NUMA
  51. unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
  52. if (unlikely(nid != numa_node_id())) {
  53. free_page((unsigned long)pgtable_entry);
  54. return;
  55. }
  56. #endif
  57. preempt_disable();
  58. *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
  59. pgtable_quicklist = (unsigned long *)pgtable_entry;
  60. ++pgtable_quicklist_size;
  61. preempt_enable();
  62. }
  63. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  64. {
  65. return pgtable_quicklist_alloc();
  66. }
  67. static inline void pgd_free(pgd_t * pgd)
  68. {
  69. pgtable_quicklist_free(pgd);
  70. }
  71. #ifdef CONFIG_PGTABLE_4
  72. static inline void
  73. pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
  74. {
  75. pgd_val(*pgd_entry) = __pa(pud);
  76. }
  77. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  78. {
  79. return pgtable_quicklist_alloc();
  80. }
  81. static inline void pud_free(pud_t * pud)
  82. {
  83. pgtable_quicklist_free(pud);
  84. }
  85. #define __pud_free_tlb(tlb, pud) pud_free(pud)
  86. #endif /* CONFIG_PGTABLE_4 */
  87. static inline void
  88. pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
  89. {
  90. pud_val(*pud_entry) = __pa(pmd);
  91. }
  92. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  93. {
  94. return pgtable_quicklist_alloc();
  95. }
  96. static inline void pmd_free(pmd_t * pmd)
  97. {
  98. pgtable_quicklist_free(pmd);
  99. }
  100. #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
  101. static inline void
  102. pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
  103. {
  104. pmd_val(*pmd_entry) = page_to_phys(pte);
  105. }
  106. static inline void
  107. pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
  108. {
  109. pmd_val(*pmd_entry) = __pa(pte);
  110. }
  111. static inline struct page *pte_alloc_one(struct mm_struct *mm,
  112. unsigned long addr)
  113. {
  114. return virt_to_page(pgtable_quicklist_alloc());
  115. }
  116. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  117. unsigned long addr)
  118. {
  119. return pgtable_quicklist_alloc();
  120. }
  121. static inline void pte_free(struct page *pte)
  122. {
  123. pgtable_quicklist_free(page_address(pte));
  124. }
  125. static inline void pte_free_kernel(pte_t * pte)
  126. {
  127. pgtable_quicklist_free(pte);
  128. }
  129. #define __pte_free_tlb(tlb, pte) pte_free(pte)
  130. extern void check_pgt_cache(void);
  131. #endif /* _ASM_IA64_PGALLOC_H */