tlb-flush.c 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * TLB flushing operations for SH with an MMU.
  3. *
  4. * Copyright (C) 1999 Niibe Yutaka
  5. * Copyright (C) 2003 - 2006 Paul Mundt
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License. See the file "COPYING" in the main directory of this archive
  9. * for more details.
  10. */
  11. #include <linux/mm.h>
  12. #include <linux/io.h>
  13. #include <asm/mmu_context.h>
  14. #include <asm/tlbflush.h>
  15. #include <asm/cacheflush.h>
  16. void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
  17. {
  18. unsigned int cpu = smp_processor_id();
  19. if (vma->vm_mm && cpu_context(cpu, vma->vm_mm) != NO_CONTEXT) {
  20. unsigned long flags;
  21. unsigned long asid;
  22. unsigned long saved_asid = MMU_NO_ASID;
  23. asid = cpu_asid(cpu, vma->vm_mm);
  24. page &= PAGE_MASK;
  25. local_irq_save(flags);
  26. if (vma->vm_mm != current->mm) {
  27. saved_asid = get_asid();
  28. set_asid(asid);
  29. }
  30. local_flush_tlb_one(asid, page);
  31. if (saved_asid != MMU_NO_ASID)
  32. set_asid(saved_asid);
  33. local_irq_restore(flags);
  34. }
  35. }
  36. void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  37. unsigned long end)
  38. {
  39. struct mm_struct *mm = vma->vm_mm;
  40. unsigned int cpu = smp_processor_id();
  41. if (cpu_context(cpu, mm) != NO_CONTEXT) {
  42. unsigned long flags;
  43. int size;
  44. local_irq_save(flags);
  45. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  46. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  47. cpu_context(cpu, mm) = NO_CONTEXT;
  48. if (mm == current->mm)
  49. activate_context(mm, cpu);
  50. } else {
  51. unsigned long asid;
  52. unsigned long saved_asid = MMU_NO_ASID;
  53. asid = cpu_asid(cpu, mm);
  54. start &= PAGE_MASK;
  55. end += (PAGE_SIZE - 1);
  56. end &= PAGE_MASK;
  57. if (mm != current->mm) {
  58. saved_asid = get_asid();
  59. set_asid(asid);
  60. }
  61. while (start < end) {
  62. local_flush_tlb_one(asid, start);
  63. start += PAGE_SIZE;
  64. }
  65. if (saved_asid != MMU_NO_ASID)
  66. set_asid(saved_asid);
  67. }
  68. local_irq_restore(flags);
  69. }
  70. }
  71. void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  72. {
  73. unsigned int cpu = smp_processor_id();
  74. unsigned long flags;
  75. int size;
  76. local_irq_save(flags);
  77. size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
  78. if (size > (MMU_NTLB_ENTRIES/4)) { /* Too many TLB to flush */
  79. local_flush_tlb_all();
  80. } else {
  81. unsigned long asid;
  82. unsigned long saved_asid = get_asid();
  83. asid = cpu_asid(cpu, &init_mm);
  84. start &= PAGE_MASK;
  85. end += (PAGE_SIZE - 1);
  86. end &= PAGE_MASK;
  87. set_asid(asid);
  88. while (start < end) {
  89. local_flush_tlb_one(asid, start);
  90. start += PAGE_SIZE;
  91. }
  92. set_asid(saved_asid);
  93. }
  94. local_irq_restore(flags);
  95. }
  96. void local_flush_tlb_mm(struct mm_struct *mm)
  97. {
  98. unsigned int cpu = smp_processor_id();
  99. /* Invalidate all TLB of this process. */
  100. /* Instead of invalidating each TLB, we get new MMU context. */
  101. if (cpu_context(cpu, mm) != NO_CONTEXT) {
  102. unsigned long flags;
  103. local_irq_save(flags);
  104. cpu_context(cpu, mm) = NO_CONTEXT;
  105. if (mm == current->mm)
  106. activate_context(mm, cpu);
  107. local_irq_restore(flags);
  108. }
  109. }
  110. void local_flush_tlb_all(void)
  111. {
  112. unsigned long flags, status;
  113. /*
  114. * Flush all the TLB.
  115. *
  116. * Write to the MMU control register's bit:
  117. * TF-bit for SH-3, TI-bit for SH-4.
  118. * It's same position, bit #2.
  119. */
  120. local_irq_save(flags);
  121. status = ctrl_inl(MMUCR);
  122. status |= 0x04;
  123. ctrl_outl(status, MMUCR);
  124. ctrl_barrier();
  125. local_irq_restore(flags);
  126. }
  127. void update_mmu_cache(struct vm_area_struct *vma,
  128. unsigned long address, pte_t pte)
  129. {
  130. unsigned long flags;
  131. unsigned long pteval;
  132. unsigned long vpn;
  133. struct page *page;
  134. unsigned long pfn = pte_pfn(pte);
  135. struct address_space *mapping;
  136. if (!pfn_valid(pfn))
  137. return;
  138. page = pfn_to_page(pfn);
  139. mapping = page_mapping(page);
  140. if (mapping) {
  141. unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
  142. int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
  143. if (dirty)
  144. __flush_wback_region((void *)P1SEGADDR(phys),
  145. PAGE_SIZE);
  146. }
  147. local_irq_save(flags);
  148. /* Set PTEH register */
  149. vpn = (address & MMU_VPN_MASK) | get_asid();
  150. ctrl_outl(vpn, MMU_PTEH);
  151. pteval = pte_val(pte);
  152. #ifdef CONFIG_CPU_HAS_PTEA
  153. /* Set PTEA register */
  154. /* TODO: make this look less hacky */
  155. ctrl_outl(((pteval >> 28) & 0xe) | (pteval & 0x1), MMU_PTEA);
  156. #endif
  157. /* Set PTEL register */
  158. pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
  159. #if defined(CONFIG_SH_WRITETHROUGH) && defined(CONFIG_CPU_SH4)
  160. pteval |= _PAGE_WT;
  161. #endif
  162. /* conveniently, we want all the software flags to be 0 anyway */
  163. ctrl_outl(pteval, MMU_PTEL);
  164. /* Load the TLB */
  165. asm volatile("ldtlb": /* no output */ : /* no input */ : "memory");
  166. local_irq_restore(flags);
  167. }