pgalloc.h 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
  2. #ifndef _SPARC64_PGALLOC_H
  3. #define _SPARC64_PGALLOC_H
  4. #include <linux/config.h>
  5. #include <linux/kernel.h>
  6. #include <linux/sched.h>
  7. #include <linux/mm.h>
  8. #include <asm/spitfire.h>
  9. #include <asm/cpudata.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/page.h>
  12. /* Page table allocation/freeing. */
  13. #ifdef CONFIG_SMP
  14. /* Sliiiicck */
  15. #define pgt_quicklists local_cpu_data()
  16. #else
  17. extern struct pgtable_cache_struct {
  18. unsigned long *pgd_cache;
  19. unsigned long *pte_cache;
  20. unsigned int pgcache_size;
  21. } pgt_quicklists;
  22. #endif
  23. #define pgd_quicklist (pgt_quicklists.pgd_cache)
  24. #define pte_quicklist (pgt_quicklists.pte_cache)
  25. #define pgtable_cache_size (pgt_quicklists.pgcache_size)
  26. static inline void free_pgd_fast(pgd_t *pgd)
  27. {
  28. preempt_disable();
  29. *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
  30. pgd_quicklist = (unsigned long *) pgd;
  31. pgtable_cache_size++;
  32. preempt_enable();
  33. }
  34. static inline pgd_t *get_pgd_fast(void)
  35. {
  36. unsigned long *ret;
  37. preempt_disable();
  38. if((ret = pgd_quicklist) != NULL) {
  39. pgd_quicklist = (unsigned long *)(*ret);
  40. ret[0] = 0;
  41. pgtable_cache_size--;
  42. preempt_enable();
  43. } else {
  44. preempt_enable();
  45. ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
  46. if(ret)
  47. memset(ret, 0, PAGE_SIZE);
  48. }
  49. return (pgd_t *)ret;
  50. }
  51. static inline void free_pgd_slow(pgd_t *pgd)
  52. {
  53. free_page((unsigned long)pgd);
  54. }
  55. #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
  56. static inline pmd_t *pmd_alloc_one_fast(void)
  57. {
  58. unsigned long *ret;
  59. preempt_disable();
  60. ret = (unsigned long *) pte_quicklist;
  61. if (likely(ret)) {
  62. pte_quicklist = (unsigned long *)(*ret);
  63. ret[0] = 0;
  64. pgtable_cache_size--;
  65. }
  66. preempt_enable();
  67. return (pmd_t *) ret;
  68. }
  69. static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  70. {
  71. pmd_t *pmd;
  72. pmd = pmd_alloc_one_fast();
  73. if (unlikely(!pmd)) {
  74. pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  75. if (pmd)
  76. memset(pmd, 0, PAGE_SIZE);
  77. }
  78. return pmd;
  79. }
  80. static inline void free_pmd_fast(pmd_t *pmd)
  81. {
  82. preempt_disable();
  83. *(unsigned long *)pmd = (unsigned long) pte_quicklist;
  84. pte_quicklist = (unsigned long *) pmd;
  85. pgtable_cache_size++;
  86. preempt_enable();
  87. }
  88. static inline void free_pmd_slow(pmd_t *pmd)
  89. {
  90. free_page((unsigned long)pmd);
  91. }
  92. #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
  93. #define pmd_populate(MM,PMD,PTE_PAGE) \
  94. pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
  95. static inline pte_t *pte_alloc_one_fast(void)
  96. {
  97. unsigned long *ret;
  98. preempt_disable();
  99. ret = (unsigned long *) pte_quicklist;
  100. if (likely(ret)) {
  101. pte_quicklist = (unsigned long *)(*ret);
  102. ret[0] = 0;
  103. pgtable_cache_size--;
  104. }
  105. preempt_enable();
  106. return (pte_t *) ret;
  107. }
  108. static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  109. {
  110. pte_t *ptep = pte_alloc_one_fast();
  111. if (likely(ptep))
  112. return ptep;
  113. return (pte_t *) get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
  114. }
  115. static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  116. {
  117. pte_t *pte = pte_alloc_one_fast();
  118. if (likely(pte))
  119. return virt_to_page(pte);
  120. return alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
  121. }
  122. static inline void free_pte_fast(pte_t *pte)
  123. {
  124. preempt_disable();
  125. *(unsigned long *)pte = (unsigned long) pte_quicklist;
  126. pte_quicklist = (unsigned long *) pte;
  127. pgtable_cache_size++;
  128. preempt_enable();
  129. }
  130. static inline void free_pte_slow(pte_t *pte)
  131. {
  132. free_page((unsigned long) pte);
  133. }
  134. static inline void pte_free_kernel(pte_t *pte)
  135. {
  136. free_pte_fast(pte);
  137. }
  138. static inline void pte_free(struct page *ptepage)
  139. {
  140. free_pte_fast(page_address(ptepage));
  141. }
  142. #define pmd_free(pmd) free_pmd_fast(pmd)
  143. #define pgd_free(pgd) free_pgd_fast(pgd)
  144. #define pgd_alloc(mm) get_pgd_fast()
  145. #endif /* _SPARC64_PGALLOC_H */