pgalloc.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
  2. #ifndef _SPARC64_PGALLOC_H
  3. #define _SPARC64_PGALLOC_H
  4. #include <linux/config.h>
  5. #include <linux/kernel.h>
  6. #include <linux/sched.h>
  7. #include <linux/mm.h>
  8. #include <asm/spitfire.h>
  9. #include <asm/cpudata.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/page.h>
  12. /* Page table allocation/freeing. */
  13. #ifdef CONFIG_SMP
  14. /* Sliiiicck */
  15. #define pgt_quicklists local_cpu_data()
  16. #else
  17. extern struct pgtable_cache_struct {
  18. unsigned long *pgd_cache;
  19. unsigned long *pte_cache[2];
  20. unsigned int pgcache_size;
  21. } pgt_quicklists;
  22. #endif
  23. #define pgd_quicklist (pgt_quicklists.pgd_cache)
  24. #define pmd_quicklist ((unsigned long *)0)
  25. #define pte_quicklist (pgt_quicklists.pte_cache)
  26. #define pgtable_cache_size (pgt_quicklists.pgcache_size)
  27. static __inline__ void free_pgd_fast(pgd_t *pgd)
  28. {
  29. preempt_disable();
  30. *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
  31. pgd_quicklist = (unsigned long *) pgd;
  32. pgtable_cache_size++;
  33. preempt_enable();
  34. }
  35. static __inline__ pgd_t *get_pgd_fast(void)
  36. {
  37. unsigned long *ret;
  38. preempt_disable();
  39. if((ret = pgd_quicklist) != NULL) {
  40. pgd_quicklist = (unsigned long *)(*ret);
  41. ret[0] = 0;
  42. pgtable_cache_size--;
  43. preempt_enable();
  44. } else {
  45. preempt_enable();
  46. ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
  47. if(ret)
  48. memset(ret, 0, PAGE_SIZE);
  49. }
  50. return (pgd_t *)ret;
  51. }
  52. static __inline__ void free_pgd_slow(pgd_t *pgd)
  53. {
  54. free_page((unsigned long)pgd);
  55. }
  56. #ifdef DCACHE_ALIASING_POSSIBLE
  57. #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
  58. #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
  59. #else
  60. #define VPTE_COLOR(address) 0
  61. #define DCACHE_COLOR(address) 0
  62. #endif
  63. #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
  64. static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
  65. {
  66. unsigned long *ret;
  67. int color = 0;
  68. preempt_disable();
  69. if (pte_quicklist[color] == NULL)
  70. color = 1;
  71. if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
  72. pte_quicklist[color] = (unsigned long *)(*ret);
  73. ret[0] = 0;
  74. pgtable_cache_size--;
  75. }
  76. preempt_enable();
  77. return (pmd_t *)ret;
  78. }
  79. static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  80. {
  81. pmd_t *pmd;
  82. pmd = pmd_alloc_one_fast(mm, address);
  83. if (!pmd) {
  84. pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  85. if (pmd)
  86. memset(pmd, 0, PAGE_SIZE);
  87. }
  88. return pmd;
  89. }
  90. static __inline__ void free_pmd_fast(pmd_t *pmd)
  91. {
  92. unsigned long color = DCACHE_COLOR((unsigned long)pmd);
  93. preempt_disable();
  94. *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
  95. pte_quicklist[color] = (unsigned long *) pmd;
  96. pgtable_cache_size++;
  97. preempt_enable();
  98. }
  99. static __inline__ void free_pmd_slow(pmd_t *pmd)
  100. {
  101. free_page((unsigned long)pmd);
  102. }
  103. #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
  104. #define pmd_populate(MM,PMD,PTE_PAGE) \
  105. pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
  106. extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
  107. static inline struct page *
  108. pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  109. {
  110. pte_t *pte = pte_alloc_one_kernel(mm, addr);
  111. if (pte)
  112. return virt_to_page(pte);
  113. return NULL;
  114. }
  115. static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
  116. {
  117. unsigned long color = VPTE_COLOR(address);
  118. unsigned long *ret;
  119. preempt_disable();
  120. if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
  121. pte_quicklist[color] = (unsigned long *)(*ret);
  122. ret[0] = 0;
  123. pgtable_cache_size--;
  124. }
  125. preempt_enable();
  126. return (pte_t *)ret;
  127. }
  128. static __inline__ void free_pte_fast(pte_t *pte)
  129. {
  130. unsigned long color = DCACHE_COLOR((unsigned long)pte);
  131. preempt_disable();
  132. *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
  133. pte_quicklist[color] = (unsigned long *) pte;
  134. pgtable_cache_size++;
  135. preempt_enable();
  136. }
  137. static __inline__ void free_pte_slow(pte_t *pte)
  138. {
  139. free_page((unsigned long)pte);
  140. }
  141. static inline void pte_free_kernel(pte_t *pte)
  142. {
  143. free_pte_fast(pte);
  144. }
  145. static inline void pte_free(struct page *ptepage)
  146. {
  147. free_pte_fast(page_address(ptepage));
  148. }
  149. #define pmd_free(pmd) free_pmd_fast(pmd)
  150. #define pgd_free(pgd) free_pgd_fast(pgd)
  151. #define pgd_alloc(mm) get_pgd_fast()
  152. #endif /* _SPARC64_PGALLOC_H */