tlb-flush.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /*
  2. * TLB flushing operations for SH with an MMU.
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (C) 2003 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/mm.h>
  12. #include <asm/mmu_context.h>
  13. #include <asm/tlbflush.h>
  14. void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  15. {
  16. if (vma->vm_mm && vma->vm_mm->context != NO_CONTEXT) {
  17. unsigned long flags;
  18. unsigned long asid;
  19. unsigned long saved_asid = MMU_NO_ASID;
  20. asid = vma->vm_mm->context & MMU_CONTEXT_ASID_MASK;
  21. page &= PAGE_MASK;
  22. local_irq_save(flags);
  23. if (vma->vm_mm != current->mm) {
  24. saved_asid = get_asid();
  25. set_asid(asid);
  26. }
  27. __flush_tlb_page(asid, page);
  28. if (saved_asid != MMU_NO_ASID)
  29. set_asid(saved_asid);
  30. local_irq_restore(flags);
  31. }
  32. }
  33. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  34. unsigned long end)
  35. {
  36. struct mm_struct *mm = vma->vm_mm;
  37. if (mm->context != NO_CONTEXT) {
  38. unsigned long flags;
  39. int size;
  40. local_irq_save(flags);
  41. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  42. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  43. mm->context = NO_CONTEXT;
  44. if (mm == current->mm)
  45. activate_context(mm);
  46. } else {
  47. unsigned long asid = mm->context&MMU_CONTEXT_ASID_MASK;
  48. unsigned long saved_asid = MMU_NO_ASID;
  49. start &= PAGE_MASK;
  50. end += (PAGE_SIZE - 1);
  51. end &= PAGE_MASK;
  52. if (mm != current->mm) {
  53. saved_asid = get_asid();
  54. set_asid(asid);
  55. }
  56. while (start < end) {
  57. __flush_tlb_page(asid, start);
  58. start += PAGE_SIZE;
  59. }
  60. if (saved_asid != MMU_NO_ASID)
  61. set_asid(saved_asid);
  62. }
  63. local_irq_restore(flags);
  64. }
  65. }
  66. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  67. {
  68. unsigned long flags;
  69. int size;
  70. local_irq_save(flags);
  71. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  72. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  73. flush_tlb_all();
  74. } else {
  75. unsigned long asid = init_mm.context&MMU_CONTEXT_ASID_MASK;
  76. unsigned long saved_asid = get_asid();
  77. start &= PAGE_MASK;
  78. end += (PAGE_SIZE - 1);
  79. end &= PAGE_MASK;
  80. set_asid(asid);
  81. while (start < end) {
  82. __flush_tlb_page(asid, start);
  83. start += PAGE_SIZE;
  84. }
  85. set_asid(saved_asid);
  86. }
  87. local_irq_restore(flags);
  88. }
  89. void flush_tlb_mm(struct mm_struct *mm)
  90. {
  91. /* Invalidate all TLB of this process. */
  92. /* Instead of invalidating each TLB, we get new MMU context. */
  93. if (mm->context != NO_CONTEXT) {
  94. unsigned long flags;
  95. local_irq_save(flags);
  96. mm->context = NO_CONTEXT;
  97. if (mm == current->mm)
  98. activate_context(mm);
  99. local_irq_restore(flags);
  100. }
  101. }
  102. void flush_tlb_all(void)
  103. {
  104. unsigned long flags, status;
  105. /*
  106. * Flush all the TLB.
  107. *
  108. * Write to the MMU control register's bit:
  109. * TF-bit for SH-3, TI-bit for SH-4.
  110. * It's same position, bit #2.
  111. */
  112. local_irq_save(flags);
  113. status = ctrl_inl(MMUCR);
  114. status |= 0x04;
  115. ctrl_outl(status, MMUCR);
  116. ctrl_barrier();
  117. local_irq_restore(flags);
  118. }