pgalloc.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
  2. #ifndef _SPARC64_PGALLOC_H
  3. #define _SPARC64_PGALLOC_H
  4. #include <linux/config.h>
  5. #include <linux/kernel.h>
  6. #include <linux/sched.h>
  7. #include <linux/mm.h>
  8. #include <asm/spitfire.h>
  9. #include <asm/cpudata.h>
  10. #include <asm/cacheflush.h>
  11. #include <asm/page.h>
  12. /* Page table allocation/freeing. */
  13. #ifdef CONFIG_SMP
  14. /* Sliiiicck */
  15. #define pgt_quicklists local_cpu_data()
  16. #else
  17. extern struct pgtable_cache_struct {
  18. unsigned long *pgd_cache;
  19. unsigned long *pte_cache[2];
  20. unsigned int pgcache_size;
  21. } pgt_quicklists;
  22. #endif
  23. #define pgd_quicklist (pgt_quicklists.pgd_cache)
  24. #define pmd_quicklist ((unsigned long *)0)
  25. #define pte_quicklist (pgt_quicklists.pte_cache)
  26. #define pgtable_cache_size (pgt_quicklists.pgcache_size)
  27. static __inline__ void free_pgd_fast(pgd_t *pgd)
  28. {
  29. preempt_disable();
  30. *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
  31. pgd_quicklist = (unsigned long *) pgd;
  32. pgtable_cache_size++;
  33. preempt_enable();
  34. }
  35. static __inline__ pgd_t *get_pgd_fast(void)
  36. {
  37. unsigned long *ret;
  38. preempt_disable();
  39. if((ret = pgd_quicklist) != NULL) {
  40. pgd_quicklist = (unsigned long *)(*ret);
  41. ret[0] = 0;
  42. pgtable_cache_size--;
  43. preempt_enable();
  44. } else {
  45. preempt_enable();
  46. ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
  47. if(ret)
  48. memset(ret, 0, PAGE_SIZE);
  49. }
  50. return (pgd_t *)ret;
  51. }
  52. static __inline__ void free_pgd_slow(pgd_t *pgd)
  53. {
  54. free_page((unsigned long)pgd);
  55. }
  56. /* XXX This crap can die, no longer using virtual page tables... */
  57. #ifdef DCACHE_ALIASING_POSSIBLE
  58. #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
  59. #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
  60. #else
  61. #define VPTE_COLOR(address) 0
  62. #define DCACHE_COLOR(address) 0
  63. #endif
  64. #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
  65. static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
  66. {
  67. unsigned long *ret;
  68. int color = 0;
  69. preempt_disable();
  70. if (pte_quicklist[color] == NULL)
  71. color = 1;
  72. if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
  73. pte_quicklist[color] = (unsigned long *)(*ret);
  74. ret[0] = 0;
  75. pgtable_cache_size--;
  76. }
  77. preempt_enable();
  78. return (pmd_t *)ret;
  79. }
  80. static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  81. {
  82. pmd_t *pmd;
  83. pmd = pmd_alloc_one_fast(mm, address);
  84. if (!pmd) {
  85. pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  86. if (pmd)
  87. memset(pmd, 0, PAGE_SIZE);
  88. }
  89. return pmd;
  90. }
  91. static __inline__ void free_pmd_fast(pmd_t *pmd)
  92. {
  93. unsigned long color = DCACHE_COLOR((unsigned long)pmd);
  94. preempt_disable();
  95. *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
  96. pte_quicklist[color] = (unsigned long *) pmd;
  97. pgtable_cache_size++;
  98. preempt_enable();
  99. }
  100. static __inline__ void free_pmd_slow(pmd_t *pmd)
  101. {
  102. free_page((unsigned long)pmd);
  103. }
  104. #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
  105. #define pmd_populate(MM,PMD,PTE_PAGE) \
  106. pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
  107. extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
  108. static inline struct page *
  109. pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  110. {
  111. pte_t *pte = pte_alloc_one_kernel(mm, addr);
  112. if (pte)
  113. return virt_to_page(pte);
  114. return NULL;
  115. }
  116. static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
  117. {
  118. unsigned long color = VPTE_COLOR(address);
  119. unsigned long *ret;
  120. preempt_disable();
  121. if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
  122. pte_quicklist[color] = (unsigned long *)(*ret);
  123. ret[0] = 0;
  124. pgtable_cache_size--;
  125. }
  126. preempt_enable();
  127. return (pte_t *)ret;
  128. }
  129. static __inline__ void free_pte_fast(pte_t *pte)
  130. {
  131. unsigned long color = DCACHE_COLOR((unsigned long)pte);
  132. preempt_disable();
  133. *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
  134. pte_quicklist[color] = (unsigned long *) pte;
  135. pgtable_cache_size++;
  136. preempt_enable();
  137. }
  138. static __inline__ void free_pte_slow(pte_t *pte)
  139. {
  140. free_page((unsigned long)pte);
  141. }
  142. static inline void pte_free_kernel(pte_t *pte)
  143. {
  144. free_pte_fast(pte);
  145. }
  146. static inline void pte_free(struct page *ptepage)
  147. {
  148. free_pte_fast(page_address(ptepage));
  149. }
  150. #define pmd_free(pmd) free_pmd_fast(pmd)
  151. #define pgd_free(pgd) free_pgd_fast(pgd)
  152. #define pgd_alloc(mm) get_pgd_fast()
  153. #endif /* _SPARC64_PGALLOC_H */