tsb.c 2.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /* arch/sparc64/mm/tsb.c
  2. *
  3. * Copyright (C) 2006 David S. Miller <davem@davemloft.net>
  4. */
  5. #include <linux/kernel.h>
  6. #include <asm/system.h>
  7. #include <asm/page.h>
  8. #include <asm/tlbflush.h>
  9. #include <asm/tlb.h>
  10. #include <asm/mmu_context.h>
  11. #define TSB_ENTRY_ALIGNMENT 16
  12. struct tsb {
  13. unsigned long tag;
  14. unsigned long pte;
  15. } __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
  16. /* We use an 8K TSB for the whole kernel, this allows to
  17. * handle about 4MB of modules and vmalloc mappings without
  18. * incurring many hash conflicts.
  19. */
  20. #define KERNEL_TSB_SIZE_BYTES 8192
  21. #define KERNEL_TSB_NENTRIES \
  22. (KERNEL_TSB_SIZE_BYTES / sizeof(struct tsb))
  23. extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
  24. static inline unsigned long tsb_hash(unsigned long vaddr)
  25. {
  26. vaddr >>= PAGE_SHIFT;
  27. return vaddr & (KERNEL_TSB_NENTRIES - 1);
  28. }
  29. static inline int tag_compare(struct tsb *entry, unsigned long vaddr, unsigned long context)
  30. {
  31. if (context == ~0UL)
  32. return 1;
  33. return (entry->tag == ((vaddr >> 22) | (context << 48)));
  34. }
  35. /* TSB flushes need only occur on the processor initiating the address
  36. * space modification, not on each cpu the address space has run on.
  37. * Only the TLB flush needs that treatment.
  38. */
  39. void flush_tsb_kernel_range(unsigned long start, unsigned long end)
  40. {
  41. unsigned long v;
  42. for (v = start; v < end; v += PAGE_SIZE) {
  43. struct tsb *ent = &swapper_tsb[tsb_hash(v)];
  44. if (tag_compare(ent, v, 0)) {
  45. ent->tag = 0UL;
  46. membar_storeload_storestore();
  47. }
  48. }
  49. }
  50. void flush_tsb_user(struct mmu_gather *mp)
  51. {
  52. struct mm_struct *mm = mp->mm;
  53. struct tsb *tsb = (struct tsb *) mm->context.sparc64_tsb;
  54. unsigned long ctx = ~0UL;
  55. int i;
  56. if (CTX_VALID(mm->context))
  57. ctx = CTX_HWBITS(mm->context);
  58. for (i = 0; i < mp->tlb_nr; i++) {
  59. unsigned long v = mp->vaddrs[i];
  60. struct tsb *ent;
  61. v &= ~0x1UL;
  62. ent = &tsb[tsb_hash(v)];
  63. if (tag_compare(ent, v, ctx)) {
  64. ent->tag = 0UL;
  65. membar_storeload_storestore();
  66. }
  67. }
  68. }
  69. int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
  70. {
  71. unsigned long page = get_zeroed_page(GFP_KERNEL);
  72. mm->context.sparc64_ctx_val = 0UL;
  73. if (unlikely(!page))
  74. return -ENOMEM;
  75. mm->context.sparc64_tsb = (unsigned long *) page;
  76. return 0;
  77. }
  78. void destroy_context(struct mm_struct *mm)
  79. {
  80. free_page((unsigned long) mm->context.sparc64_tsb);
  81. spin_lock(&ctx_alloc_lock);
  82. if (CTX_VALID(mm->context)) {
  83. unsigned long nr = CTX_NRBITS(mm->context);
  84. mmu_context_bmap[nr>>6] &= ~(1UL << (nr & 63));
  85. }
  86. spin_unlock(&ctx_alloc_lock);
  87. }