pgalloc.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. #ifndef _ASM_IA64_PGALLOC_H
  2. #define _ASM_IA64_PGALLOC_H
  3. /*
  4. * This file contains the functions and defines necessary to allocate
  5. * page tables.
  6. *
  7. * This hopefully works with any (fixed) ia-64 page-size, as defined
  8. * in <asm/page.h> (currently 8192).
  9. *
  10. * Copyright (C) 1998-2001 Hewlett-Packard Co
  11. * David Mosberger-Tang <davidm@hpl.hp.com>
  12. * Copyright (C) 2000, Goutham Rao <goutham.rao@intel.com>
  13. */
  14. #include <linux/config.h>
  15. #include <linux/compiler.h>
  16. #include <linux/mm.h>
  17. #include <linux/page-flags.h>
  18. #include <linux/threads.h>
  19. #include <asm/mmu_context.h>
  20. DECLARE_PER_CPU(unsigned long *, __pgtable_quicklist);
  21. #define pgtable_quicklist __ia64_per_cpu_var(__pgtable_quicklist)
  22. DECLARE_PER_CPU(long, __pgtable_quicklist_size);
  23. #define pgtable_quicklist_size __ia64_per_cpu_var(__pgtable_quicklist_size)
  24. static inline long pgtable_quicklist_total_size(void)
  25. {
  26. long ql_size = 0;
  27. int cpuid;
  28. for_each_online_cpu(cpuid) {
  29. ql_size += per_cpu(__pgtable_quicklist_size, cpuid);
  30. }
  31. return ql_size;
  32. }
  33. static inline void *pgtable_quicklist_alloc(void)
  34. {
  35. unsigned long *ret = NULL;
  36. preempt_disable();
  37. ret = pgtable_quicklist;
  38. if (likely(ret != NULL)) {
  39. pgtable_quicklist = (unsigned long *)(*ret);
  40. ret[0] = 0;
  41. --pgtable_quicklist_size;
  42. preempt_enable();
  43. } else {
  44. preempt_enable();
  45. ret = (unsigned long *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
  46. }
  47. return ret;
  48. }
  49. static inline void pgtable_quicklist_free(void *pgtable_entry)
  50. {
  51. #ifdef CONFIG_NUMA
  52. unsigned long nid = page_to_nid(virt_to_page(pgtable_entry));
  53. if (unlikely(nid != numa_node_id())) {
  54. free_page((unsigned long)pgtable_entry);
  55. return;
  56. }
  57. #endif
  58. preempt_disable();
  59. *(unsigned long *)pgtable_entry = (unsigned long)pgtable_quicklist;
  60. pgtable_quicklist = (unsigned long *)pgtable_entry;
  61. ++pgtable_quicklist_size;
  62. preempt_enable();
  63. }
  64. static inline pgd_t *pgd_alloc(struct mm_struct *mm)
  65. {
  66. return pgtable_quicklist_alloc();
  67. }
  68. static inline void pgd_free(pgd_t * pgd)
  69. {
  70. pgtable_quicklist_free(pgd);
  71. }
  72. #ifdef CONFIG_PGTABLE_4
  73. static inline void
  74. pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
  75. {
  76. pgd_val(*pgd_entry) = __pa(pud);
  77. }
  78. static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
  79. {
  80. return pgtable_quicklist_alloc();
  81. }
  82. static inline void pud_free(pud_t * pud)
  83. {
  84. pgtable_quicklist_free(pud);
  85. }
  86. #define __pud_free_tlb(tlb, pud) pud_free(pud)
  87. #endif /* CONFIG_PGTABLE_4 */
  88. static inline void
  89. pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
  90. {
  91. pud_val(*pud_entry) = __pa(pmd);
  92. }
  93. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
  94. {
  95. return pgtable_quicklist_alloc();
  96. }
  97. static inline void pmd_free(pmd_t * pmd)
  98. {
  99. pgtable_quicklist_free(pmd);
  100. }
  101. #define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
  102. static inline void
  103. pmd_populate(struct mm_struct *mm, pmd_t * pmd_entry, struct page *pte)
  104. {
  105. pmd_val(*pmd_entry) = page_to_phys(pte);
  106. }
  107. static inline void
  108. pmd_populate_kernel(struct mm_struct *mm, pmd_t * pmd_entry, pte_t * pte)
  109. {
  110. pmd_val(*pmd_entry) = __pa(pte);
  111. }
  112. static inline struct page *pte_alloc_one(struct mm_struct *mm,
  113. unsigned long addr)
  114. {
  115. return virt_to_page(pgtable_quicklist_alloc());
  116. }
  117. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
  118. unsigned long addr)
  119. {
  120. return pgtable_quicklist_alloc();
  121. }
  122. static inline void pte_free(struct page *pte)
  123. {
  124. pgtable_quicklist_free(page_address(pte));
  125. }
  126. static inline void pte_free_kernel(pte_t * pte)
  127. {
  128. pgtable_quicklist_free(pte);
  129. }
  130. #define __pte_free_tlb(tlb, pte) pte_free(pte)
  131. extern void check_pgt_cache(void);
  132. #endif /* _ASM_IA64_PGALLOC_H */