pgalloc.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. /* $Id: pgalloc.h,v 1.30 2001/12/21 04:56:17 davem Exp $ */
  2. #ifndef _SPARC64_PGALLOC_H
  3. #define _SPARC64_PGALLOC_H
  4. #include <linux/config.h>
  5. #include <linux/kernel.h>
  6. #include <linux/sched.h>
  7. #include <linux/mm.h>
  8. #include <asm/spitfire.h>
  9. #include <asm/cpudata.h>
  10. #include <asm/cacheflush.h>
  11. /* Page table allocation/freeing. */
  12. #ifdef CONFIG_SMP
  13. /* Sliiiicck */
  14. #define pgt_quicklists local_cpu_data()
  15. #else
  16. extern struct pgtable_cache_struct {
  17. unsigned long *pgd_cache;
  18. unsigned long *pte_cache[2];
  19. unsigned int pgcache_size;
  20. } pgt_quicklists;
  21. #endif
  22. #define pgd_quicklist (pgt_quicklists.pgd_cache)
  23. #define pmd_quicklist ((unsigned long *)0)
  24. #define pte_quicklist (pgt_quicklists.pte_cache)
  25. #define pgtable_cache_size (pgt_quicklists.pgcache_size)
  26. static __inline__ void free_pgd_fast(pgd_t *pgd)
  27. {
  28. preempt_disable();
  29. *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
  30. pgd_quicklist = (unsigned long *) pgd;
  31. pgtable_cache_size++;
  32. preempt_enable();
  33. }
  34. static __inline__ pgd_t *get_pgd_fast(void)
  35. {
  36. unsigned long *ret;
  37. preempt_disable();
  38. if((ret = pgd_quicklist) != NULL) {
  39. pgd_quicklist = (unsigned long *)(*ret);
  40. ret[0] = 0;
  41. pgtable_cache_size--;
  42. preempt_enable();
  43. } else {
  44. preempt_enable();
  45. ret = (unsigned long *) __get_free_page(GFP_KERNEL|__GFP_REPEAT);
  46. if(ret)
  47. memset(ret, 0, PAGE_SIZE);
  48. }
  49. return (pgd_t *)ret;
  50. }
  51. static __inline__ void free_pgd_slow(pgd_t *pgd)
  52. {
  53. free_page((unsigned long)pgd);
  54. }
  55. #ifdef DCACHE_ALIASING_POSSIBLE
  56. #define VPTE_COLOR(address) (((address) >> (PAGE_SHIFT + 10)) & 1UL)
  57. #define DCACHE_COLOR(address) (((address) >> PAGE_SHIFT) & 1UL)
  58. #else
  59. #define VPTE_COLOR(address) 0
  60. #define DCACHE_COLOR(address) 0
  61. #endif
  62. #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
  63. static __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
  64. {
  65. unsigned long *ret;
  66. int color = 0;
  67. preempt_disable();
  68. if (pte_quicklist[color] == NULL)
  69. color = 1;
  70. if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
  71. pte_quicklist[color] = (unsigned long *)(*ret);
  72. ret[0] = 0;
  73. pgtable_cache_size--;
  74. }
  75. preempt_enable();
  76. return (pmd_t *)ret;
  77. }
  78. static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
  79. {
  80. pmd_t *pmd;
  81. pmd = pmd_alloc_one_fast(mm, address);
  82. if (!pmd) {
  83. pmd = (pmd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  84. if (pmd)
  85. memset(pmd, 0, PAGE_SIZE);
  86. }
  87. return pmd;
  88. }
  89. static __inline__ void free_pmd_fast(pmd_t *pmd)
  90. {
  91. unsigned long color = DCACHE_COLOR((unsigned long)pmd);
  92. preempt_disable();
  93. *(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
  94. pte_quicklist[color] = (unsigned long *) pmd;
  95. pgtable_cache_size++;
  96. preempt_enable();
  97. }
  98. static __inline__ void free_pmd_slow(pmd_t *pmd)
  99. {
  100. free_page((unsigned long)pmd);
  101. }
  102. #define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
  103. #define pmd_populate(MM,PMD,PTE_PAGE) \
  104. pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
  105. extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address);
  106. static inline struct page *
  107. pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  108. {
  109. pte_t *pte = pte_alloc_one_kernel(mm, addr);
  110. if (pte)
  111. return virt_to_page(pte);
  112. return NULL;
  113. }
  114. static __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
  115. {
  116. unsigned long color = VPTE_COLOR(address);
  117. unsigned long *ret;
  118. preempt_disable();
  119. if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
  120. pte_quicklist[color] = (unsigned long *)(*ret);
  121. ret[0] = 0;
  122. pgtable_cache_size--;
  123. }
  124. preempt_enable();
  125. return (pte_t *)ret;
  126. }
  127. static __inline__ void free_pte_fast(pte_t *pte)
  128. {
  129. unsigned long color = DCACHE_COLOR((unsigned long)pte);
  130. preempt_disable();
  131. *(unsigned long *)pte = (unsigned long) pte_quicklist[color];
  132. pte_quicklist[color] = (unsigned long *) pte;
  133. pgtable_cache_size++;
  134. preempt_enable();
  135. }
  136. static __inline__ void free_pte_slow(pte_t *pte)
  137. {
  138. free_page((unsigned long)pte);
  139. }
  140. static inline void pte_free_kernel(pte_t *pte)
  141. {
  142. free_pte_fast(pte);
  143. }
  144. static inline void pte_free(struct page *ptepage)
  145. {
  146. free_pte_fast(page_address(ptepage));
  147. }
  148. #define pmd_free(pmd) free_pmd_fast(pmd)
  149. #define pgd_free(pgd) free_pgd_fast(pgd)
  150. #define pgd_alloc(mm) get_pgd_fast()
  151. #endif /* _SPARC64_PGALLOC_H */