tlbflush.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100
  1. #ifndef _ASM_M32R_TLBFLUSH_H
  2. #define _ASM_M32R_TLBFLUSH_H
  3. #include <asm/m32r.h>
  4. /*
  5. * TLB flushing:
  6. *
  7. * - flush_tlb() flushes the current mm struct TLBs
  8. * - flush_tlb_all() flushes all processes TLBs
  9. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  10. * - flush_tlb_page(vma, vmaddr) flushes one page
  11. * - flush_tlb_range(vma, start, end) flushes a range of pages
  12. * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
  13. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  14. */
  15. extern void local_flush_tlb_all(void);
  16. extern void local_flush_tlb_mm(struct mm_struct *);
  17. extern void local_flush_tlb_page(struct vm_area_struct *, unsigned long);
  18. extern void local_flush_tlb_range(struct vm_area_struct *, unsigned long,
  19. unsigned long);
  20. #ifndef CONFIG_SMP
  21. #ifdef CONFIG_MMU
  22. #define flush_tlb_all() local_flush_tlb_all()
  23. #define flush_tlb_mm(mm) local_flush_tlb_mm(mm)
  24. #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
  25. #define flush_tlb_range(vma, start, end) \
  26. local_flush_tlb_range(vma, start, end)
  27. #define flush_tlb_kernel_range(start, end) local_flush_tlb_all()
  28. #else /* CONFIG_MMU */
  29. #define flush_tlb_all() do { } while (0)
  30. #define flush_tlb_mm(mm) do { } while (0)
  31. #define flush_tlb_page(vma, vmaddr) do { } while (0)
  32. #define flush_tlb_range(vma, start, end) do { } while (0)
  33. #endif /* CONFIG_MMU */
  34. #else /* CONFIG_SMP */
  35. extern void smp_flush_tlb_all(void);
  36. extern void smp_flush_tlb_mm(struct mm_struct *);
  37. extern void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
  38. extern void smp_flush_tlb_range(struct vm_area_struct *, unsigned long,
  39. unsigned long);
  40. #define flush_tlb_all() smp_flush_tlb_all()
  41. #define flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
  42. #define flush_tlb_page(vma, page) smp_flush_tlb_page(vma, page)
  43. #define flush_tlb_range(vma, start, end) \
  44. smp_flush_tlb_range(vma, start, end)
  45. #define flush_tlb_kernel_range(start, end) smp_flush_tlb_all()
  46. #endif /* CONFIG_SMP */
  47. static __inline__ void __flush_tlb_page(unsigned long page)
  48. {
  49. unsigned int tmpreg0, tmpreg1, tmpreg2;
  50. __asm__ __volatile__ (
  51. "seth %0, #high(%4) \n\t"
  52. "st %3, @(%5, %0) \n\t"
  53. "ldi %1, #1 \n\t"
  54. "st %1, @(%6, %0) \n\t"
  55. "add3 %1, %0, %7 \n\t"
  56. ".fillinsn \n"
  57. "1: \n\t"
  58. "ld %2, @(%6, %0) \n\t"
  59. "bnez %2, 1b \n\t"
  60. "ld %0, @%1+ \n\t"
  61. "ld %1, @%1 \n\t"
  62. "st %2, @+%0 \n\t"
  63. "st %2, @+%1 \n\t"
  64. : "=&r" (tmpreg0), "=&r" (tmpreg1), "=&r" (tmpreg2)
  65. : "r" (page), "i" (MMU_REG_BASE), "i" (MSVA_offset),
  66. "i" (MTOP_offset), "i" (MIDXI_offset)
  67. : "memory"
  68. );
  69. }
  70. static __inline__ void __flush_tlb_all(void)
  71. {
  72. unsigned int tmpreg0, tmpreg1;
  73. __asm__ __volatile__ (
  74. "seth %0, #high(%2) \n\t"
  75. "or3 %0, %0, #low(%2) \n\t"
  76. "ldi %1, #0xc \n\t"
  77. "st %1, @%0 \n\t"
  78. ".fillinsn \n"
  79. "1: \n\t"
  80. "ld %1, @%0 \n\t"
  81. "bnez %1, 1b \n\t"
  82. : "=&r" (tmpreg0), "=&r" (tmpreg1)
  83. : "i" (MTOP) : "memory"
  84. );
  85. }
  86. #define flush_tlb_pgtables(mm, start, end) do { } while (0)
  87. extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
  88. #endif /* _ASM_M32R_TLBFLUSH_H */