tsb.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /* arch/sparc64/mm/tsb.c
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <asm/system.h>
  7. #include <asm/page.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/tlb.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/pgtable.h>
  12. /* We use an 8K TSB for the whole kernel, this allows to
  13. * handle about 4MB of modules and vmalloc mappings without
  14. * incurring many hash conflicts.
  15. */
  16. #define KERNEL_TSB_SIZE_BYTES 8192
  17. #define KERNEL_TSB_NENTRIES \
  18. (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb))
  19. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  20. static inline unsigned long tsb_hash(unsigned long vaddr, unsigned long nentries)
  21. {
  22. vaddr >>= PAGE_SHIFT;
  23. return vaddr & (nentries - 1);
  24. }
  25. static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
  26. {
  27. if (context == ~0UL)
  28. return 1;
  29. return (entry->tag == ((vaddr >> 22) | (context << 48)));
  30. }
  31. /* TSB flushes need only occur on the processor initiating the address
  32. * space modification, not on each cpu the address space has run on.
  33. * Only the TLB flush needs that treatment.
  34. */
  35. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  36. {
  37. unsigned long v;
  38. for (v = start; v < end; v += PAGE_SIZE) {
  39. unsigned long hash = tsb_hash(v, KERNEL_TSB_NENTRIES);
  40. struct tsb *ent = &swapper_tsb[hash];
  41. if (tag_compare(ent, v, 0)) {
  42. ent->tag = 0UL;
  43. membar_storeload_storestore();
  44. }
  45. }
  46. }
  47. void flush_tsb_user(struct mmu_gather *mp)
  48. {
  49. struct mm_struct *mm = mp->mm;
  50. struct tsb *tsb = mm->context.tsb;
  51. unsigned long ctx = ~0UL;
  52. unsigned long nentries = mm->context.tsb_nentries;
  53. int i;
  54. if (CTX_VALID(mm->context))
  55. ctx = CTX_HWBITS(mm->context);
  56. for (i = 0; i < mp->tlb_nr; i++) {
  57. unsigned long v = mp->vaddrs[i];
  58. struct tsb *ent;
  59. v &= ~0x1UL;
  60. ent = &tsb[tsb_hash(v, nentries)];
  61. if (tag_compare(ent, v, ctx)) {
  62. ent->tag = 0UL;
  63. membar_storeload_storestore();
  64. }
  65. }
  66. }
  67. static void setup_tsb_params(struct mm_struct *mm, unsigned long tsb_bytes)
  68. {
  69. unsigned long tsb_reg, base, tsb_paddr;
  70. unsigned long page_sz, tte;
  71. mm->context.tsb_nentries = tsb_bytes / sizeof(struct tsb);
  72. base = TSBMAP_BASE;
  73. tte = (_PAGE_VALID | _PAGE_L | _PAGE_CP |
  74. _PAGE_CV | _PAGE_P | _PAGE_W);
  75. tsb_paddr = __pa(mm->context.tsb);
  76. /* Use the smallest page size that can map the whole TSB
  77. * in one TLB entry.
  78. */
  79. switch (tsb_bytes) {
  80. case 8192 << 0:
  81. tsb_reg = 0x0UL;
  82. #ifdef DCACHE_ALIASING_POSSIBLE
  83. base += (tsb_paddr & 8192);
  84. #endif
  85. tte |= _PAGE_SZ8K;
  86. page_sz = 8192;
  87. break;
  88. case 8192 << 1:
  89. tsb_reg = 0x1UL;
  90. tte |= _PAGE_SZ64K;
  91. page_sz = 64 * 1024;
  92. break;
  93. case 8192 << 2:
  94. tsb_reg = 0x2UL;
  95. tte |= _PAGE_SZ64K;
  96. page_sz = 64 * 1024;
  97. break;
  98. case 8192 << 3:
  99. tsb_reg = 0x3UL;
  100. tte |= _PAGE_SZ64K;
  101. page_sz = 64 * 1024;
  102. break;
  103. case 8192 << 4:
  104. tsb_reg = 0x4UL;
  105. tte |= _PAGE_SZ512K;
  106. page_sz = 512 * 1024;
  107. break;
  108. case 8192 << 5:
  109. tsb_reg = 0x5UL;
  110. tte |= _PAGE_SZ512K;
  111. page_sz = 512 * 1024;
  112. break;
  113. case 8192 << 6:
  114. tsb_reg = 0x6UL;
  115. tte |= _PAGE_SZ512K;
  116. page_sz = 512 * 1024;
  117. break;
  118. case 8192 << 7:
  119. tsb_reg = 0x7UL;
  120. tte |= _PAGE_SZ4MB;
  121. page_sz = 4 * 1024 * 1024;
  122. break;
  123. };
  124. tsb_reg |= base;
  125. tsb_reg |= (tsb_paddr & (page_sz - 1UL));
  126. tte |= (tsb_paddr & ~(page_sz - 1UL));
  127. mm->context.tsb_reg_val = tsb_reg;
  128. mm->context.tsb_map_vaddr = base;
  129. mm->context.tsb_map_pte = tte;
  130. }
  131. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  132. {
  133. unsigned long page = get_zeroed_page(GFP_KERNEL);
  134. mm->context.sparc64_ctx_val = 0UL;
  135. if (unlikely(!page))
  136. return -ENOMEM;
  137. mm->context.tsb = (struct tsb *) page;
  138. setup_tsb_params(mm, PAGE_SIZE);
  139. return 0;
  140. }
  141. void destroy_context(struct mm_struct *mm)
  142. {
  143. free_page((unsigned long) mm->context.tsb);
  144. /* We can remove these later, but for now it's useful
  145. * to catch any bogus post-destroy_context() references
  146. * to the TSB.
  147. */
  148. mm->context.tsb = NULL;
  149. mm->context.tsb_reg_val = 0UL;
  150. spin_lock(&ctx_alloc_lock);
  151. if (CTX_VALID(mm->context)) {
  152. unsigned long nr = CTX_NRBITS(mm->context);
  153. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  154. }
  155. spin_unlock(&ctx_alloc_lock);
  156. }