tlbflush.h 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /* MN10300 TLB flushing functions
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #ifndef _ASM_TLBFLUSH_H
  12. #define _ASM_TLBFLUSH_H
  13. #include <asm/processor.h>
  14. struct tlb_state {
  15. struct mm_struct *active_mm;
  16. int state;
  17. };
  18. DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
  19. /**
  20. * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
  21. */
  22. static inline void local_flush_tlb(void)
  23. {
  24. int w;
  25. asm volatile(
  26. " mov %1,%0 \n"
  27. " or %2,%0 \n"
  28. " mov %0,%1 \n"
  29. : "=d"(w)
  30. : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
  31. : "cc", "memory");
  32. }
  33. /**
  34. * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
  35. */
  36. static inline void local_flush_tlb_all(void)
  37. {
  38. local_flush_tlb();
  39. }
  40. /**
  41. * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
  42. */
  43. static inline void local_flush_tlb_one(unsigned long addr)
  44. {
  45. local_flush_tlb();
  46. }
  47. /**
  48. * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
  49. * @mm: The MM to flush for
  50. * @addr: The address of the target page in RAM (not its page struct)
  51. */
  52. static inline
  53. void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
  54. {
  55. unsigned long pteu, flags, cnx;
  56. addr &= PAGE_MASK;
  57. local_irq_save(flags);
  58. cnx = 1;
  59. #ifdef CONFIG_MN10300_TLB_USE_PIDR
  60. cnx = mm->context.tlbpid[smp_processor_id()];
  61. #endif
  62. if (cnx) {
  63. pteu = addr;
  64. #ifdef CONFIG_MN10300_TLB_USE_PIDR
  65. pteu |= cnx & xPTEU_PID;
  66. #endif
  67. IPTEU = pteu;
  68. DPTEU = pteu;
  69. if (IPTEL & xPTEL_V)
  70. IPTEL = 0;
  71. if (DPTEL & xPTEL_V)
  72. DPTEL = 0;
  73. }
  74. local_irq_restore(flags);
  75. }
  76. /*
  77. * TLB flushing:
  78. *
  79. * - flush_tlb() flushes the current mm struct TLBs
  80. * - flush_tlb_all() flushes all processes TLBs
  81. * - flush_tlb_mm(mm) flushes the specified mm context TLB's
  82. * - flush_tlb_page(vma, vmaddr) flushes one page
  83. * - flush_tlb_range(mm, start, end) flushes a range of pages
  84. * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  85. */
  86. #define flush_tlb_all() \
  87. do { \
  88. preempt_disable(); \
  89. local_flush_tlb_all(); \
  90. preempt_enable(); \
  91. } while (0)
  92. #define flush_tlb_mm(mm) \
  93. do { \
  94. preempt_disable(); \
  95. local_flush_tlb_all(); \
  96. preempt_enable(); \
  97. } while (0)
  98. #define flush_tlb_range(vma, start, end) \
  99. do { \
  100. unsigned long __s __attribute__((unused)) = (start); \
  101. unsigned long __e __attribute__((unused)) = (end); \
  102. preempt_disable(); \
  103. local_flush_tlb_all(); \
  104. preempt_enable(); \
  105. } while (0)
  106. #define flush_tlb_page(vma, addr) local_flush_tlb_page((vma)->vm_mm, addr)
  107. #define flush_tlb() flush_tlb_all()
  108. #define flush_tlb_kernel_range(start, end) \
  109. do { \
  110. unsigned long __s __attribute__((unused)) = (start); \
  111. unsigned long __e __attribute__((unused)) = (end); \
  112. flush_tlb_all(); \
  113. } while (0)
  114. #define flush_tlb_pgtables(mm, start, end) do {} while (0)
  115. #endif /* _ASM_TLBFLUSH_H */