tlb-flush.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * TLB flushing operations for SH with an MMU.
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (C) 2003 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/mm.h>
  12. #include <asm/mmu_context.h>
  13. #include <asm/tlbflush.h>
  14. void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  15. {
  16. if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) {
  17. unsigned long flags;
  18. unsigned long asid;
  19. unsigned long saved_asid = MMU_NO_ASID;
  20. asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK;
  21. page &= PAGE_MASK;
  22. local_irq_save(flags);
  23. if (vma->vm_mm != current->mm) {
  24. saved_asid = get_asid();
  25. set_asid(asid);
  26. }
  27. __flush_tlb_page(asid, page);
  28. if (saved_asid != MMU_NO_ASID)
  29. set_asid(saved_asid);
  30. local_irq_restore(flags);
  31. }
  32. }
  33. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  34. unsigned long end)
  35. {
  36. struct mm_struct *mm = vma->vm_mm;
  37. if (mm->context.id != NO_CONTEXT) {
  38. unsigned long flags;
  39. int size;
  40. local_irq_save(flags);
  41. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  42. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  43. mm->context.id = NO_CONTEXT;
  44. if (mm == current->mm)
  45. activate_context(mm);
  46. } else {
  47. unsigned long asid;
  48. unsigned long saved_asid = MMU_NO_ASID;
  49. asid = mm->context.id & MMU_CONTEXT_ASID_MASK;
  50. start &= PAGE_MASK;
  51. end += (PAGE_SIZE - 1);
  52. end &= PAGE_MASK;
  53. if (mm != current->mm) {
  54. saved_asid = get_asid();
  55. set_asid(asid);
  56. }
  57. while (start < end) {
  58. __flush_tlb_page(asid, start);
  59. start += PAGE_SIZE;
  60. }
  61. if (saved_asid != MMU_NO_ASID)
  62. set_asid(saved_asid);
  63. }
  64. local_irq_restore(flags);
  65. }
  66. }
  67. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  68. {
  69. unsigned long flags;
  70. int size;
  71. local_irq_save(flags);
  72. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  73. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  74. flush_tlb_all();
  75. } else {
  76. unsigned long asid;
  77. unsigned long saved_asid = get_asid();
  78. asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK;
  79. start &= PAGE_MASK;
  80. end += (PAGE_SIZE - 1);
  81. end &= PAGE_MASK;
  82. set_asid(asid);
  83. while (start < end) {
  84. __flush_tlb_page(asid, start);
  85. start += PAGE_SIZE;
  86. }
  87. set_asid(saved_asid);
  88. }
  89. local_irq_restore(flags);
  90. }
  91. void flush_tlb_mm(struct mm_struct *mm)
  92. {
  93. /* Invalidate all TLB of this process. */
  94. /* Instead of invalidating each TLB, we get new MMU context. */
  95. if (mm->context.id != NO_CONTEXT) {
  96. unsigned long flags;
  97. local_irq_save(flags);
  98. mm->context.id = NO_CONTEXT;
  99. if (mm == current->mm)
  100. activate_context(mm);
  101. local_irq_restore(flags);
  102. }
  103. }
  104. void flush_tlb_all(void)
  105. {
  106. unsigned long flags, status;
  107. /*
  108. * Flush all the TLB.
  109. *
  110. * Write to the MMU control register's bit:
  111. * TF-bit for SH-3, TI-bit for SH-4.
  112. * It's same position, bit #2.
  113. */
  114. local_irq_save(flags);
  115. status = ctrl_inl(MMUCR);
  116. status |= 0x04;
  117. ctrl_outl(status, MMUCR);
  118. ctrl_barrier();
  119. local_irq_restore(flags);
  120. }