tlb-flush.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187
  1. /*
  2. * TLB flushing operations for SH with an MMU.
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (C) 2003 - 2006 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/mm.h>
  12. #include <linux/io.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/tlbflush.h>
  15. #include <asm/cacheflush.h>
  16. void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  17. {
  18. if (vma->vm_mm && vma->vm_mm->context.id != NO_CONTEXT) {
  19. unsigned long flags;
  20. unsigned long asid;
  21. unsigned long saved_asid = MMU_NO_ASID;
  22. asid = vma->vm_mm->context.id & MMU_CONTEXT_ASID_MASK;
  23. page &= PAGE_MASK;
  24. local_irq_save(flags);
  25. if (vma->vm_mm != current->mm) {
  26. saved_asid = get_asid();
  27. set_asid(asid);
  28. }
  29. __flush_tlb_page(asid, page);
  30. if (saved_asid != MMU_NO_ASID)
  31. set_asid(saved_asid);
  32. local_irq_restore(flags);
  33. }
  34. }
  35. void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  36. unsigned long end)
  37. {
  38. struct mm_struct *mm = vma->vm_mm;
  39. if (mm->context.id != NO_CONTEXT) {
  40. unsigned long flags;
  41. int size;
  42. local_irq_save(flags);
  43. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  44. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  45. mm->context.id = NO_CONTEXT;
  46. if (mm == current->mm)
  47. activate_context(mm);
  48. } else {
  49. unsigned long asid;
  50. unsigned long saved_asid = MMU_NO_ASID;
  51. asid = mm->context.id & MMU_CONTEXT_ASID_MASK;
  52. start &= PAGE_MASK;
  53. end += (PAGE_SIZE - 1);
  54. end &= PAGE_MASK;
  55. if (mm != current->mm) {
  56. saved_asid = get_asid();
  57. set_asid(asid);
  58. }
  59. while (start < end) {
  60. __flush_tlb_page(asid, start);
  61. start += PAGE_SIZE;
  62. }
  63. if (saved_asid != MMU_NO_ASID)
  64. set_asid(saved_asid);
  65. }
  66. local_irq_restore(flags);
  67. }
  68. }
  69. void flush_tlb_kernel_range(unsigned long start, unsigned long end)
  70. {
  71. unsigned long flags;
  72. int size;
  73. local_irq_save(flags);
  74. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  75. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  76. flush_tlb_all();
  77. } else {
  78. unsigned long asid;
  79. unsigned long saved_asid = get_asid();
  80. asid = init_mm.context.id & MMU_CONTEXT_ASID_MASK;
  81. start &= PAGE_MASK;
  82. end += (PAGE_SIZE - 1);
  83. end &= PAGE_MASK;
  84. set_asid(asid);
  85. while (start < end) {
  86. __flush_tlb_page(asid, start);
  87. start += PAGE_SIZE;
  88. }
  89. set_asid(saved_asid);
  90. }
  91. local_irq_restore(flags);
  92. }
  93. void flush_tlb_mm(struct mm_struct *mm)
  94. {
  95. /* Invalidate all TLB of this process. */
  96. /* Instead of invalidating each TLB, we get new MMU context. */
  97. if (mm->context.id != NO_CONTEXT) {
  98. unsigned long flags;
  99. local_irq_save(flags);
  100. mm->context.id = NO_CONTEXT;
  101. if (mm == current->mm)
  102. activate_context(mm);
  103. local_irq_restore(flags);
  104. }
  105. }
  106. void flush_tlb_all(void)
  107. {
  108. unsigned long flags, status;
  109. /*
  110. * Flush all the TLB.
  111. *
  112. * Write to the MMU control register's bit:
  113. * TF-bit for SH-3, TI-bit for SH-4.
  114. * It's same position, bit #2.
  115. */
  116. local_irq_save(flags);
  117. status = ctrl_inl(MMUCR);
  118. status |= 0x04;
  119. ctrl_outl(status, MMUCR);
  120. ctrl_barrier();
  121. local_irq_restore(flags);
  122. }
  123. void update_mmu_cache(struct vm_area_struct *vma,
  124. unsigned long address, pte_t pte)
  125. {
  126. unsigned long flags;
  127. unsigned long pteval;
  128. unsigned long vpn;
  129. struct page *page;
  130. unsigned long pfn = pte_pfn(pte);
  131. struct address_space *mapping;
  132. if (!pfn_valid(pfn))
  133. return;
  134. page = pfn_to_page(pfn);
  135. mapping = page_mapping(page);
  136. if (mapping) {
  137. unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
  138. int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
  139. if (dirty)
  140. __flush_wback_region((void *)P1SEGADDR(phys),
  141. PAGE_SIZE);
  142. }
  143. local_irq_save(flags);
  144. /* Set PTEH register */
  145. vpn = (address & MMU_VPN_MASK) | get_asid();
  146. ctrl_outl(vpn, MMU_PTEH);
  147. pteval = pte_val(pte);
  148. #ifdef CONFIG_CPU_HAS_PTEA
  149. /* Set PTEA register */
  150. /* TODO: make this look less hacky */
  151. ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
  152. #endif
  153. /* Set PTEL register */
  154. pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
  155. #ifdef CONFIG_SH_WRITETHROUGH
  156. pteval |= _PAGE_WT;
  157. #endif
  158. /* conveniently, we want all the software flags to be 0 anyway */
  159. ctrl_outl(pteval, MMU_PTEL);
  160. /* Load the TLB */
  161. asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
  162. local_irq_restore(flags);
  163. }