hazards.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2003, 2004 Ralf Baechle
  7. */
  8. #ifndef _ASM_HAZARDS_H
  9. #define _ASM_HAZARDS_H
  10. #include <linux/config.h>
  11. #ifdef __ASSEMBLY__
  12. .macro _ssnop
  13. sll $0, $0, 1
  14. .endm
  15. .macro _ehb
  16. sll $0, $0, 3
  17. .endm
  18. /*
  19. * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
  20. * use of the JTLB for instructions should not occur for 4 cpu cycles and use
  21. * for data translations should not occur for 3 cpu cycles.
  22. */
  23. #ifdef CONFIG_CPU_RM9000
  24. .macro mtc0_tlbw_hazard
  25. .set push
  26. .set mips32
  27. _ssnop; _ssnop; _ssnop; _ssnop
  28. .set pop
  29. .endm
  30. .macro tlbw_eret_hazard
  31. .set push
  32. .set mips32
  33. _ssnop; _ssnop; _ssnop; _ssnop
  34. .set pop
  35. .endm
  36. #else
  37. /*
  38. * The taken branch will result in a two cycle penalty for the two killed
  39. * instructions on R4000 / R4400. Other processors only have a single cycle
  40. * hazard so this is nice trick to have an optimal code for a range of
  41. * processors.
  42. */
  43. .macro mtc0_tlbw_hazard
  44. b . + 8
  45. .endm
  46. .macro tlbw_eret_hazard
  47. .endm
  48. #endif
  49. /*
  50. * mtc0->mfc0 hazard
  51. * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
  52. * It is a MIPS32R2 processor so ehb will clear the hazard.
  53. */
  54. #ifdef CONFIG_CPU_MIPSR2
  55. /*
  56. * Use a macro for ehb unless explicit support for MIPSR2 is enabled
  57. */
  58. #define irq_enable_hazard
  59. _ehb
  60. #define irq_disable_hazard
  61. _ehb
  62. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
  63. /*
  64. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  65. */
  66. #define irq_enable_hazard
  67. #define irq_disable_hazard
  68. #else
  69. /*
  70. * Classic MIPS needs 1 - 3 nops or ssnops
  71. */
  72. #define irq_enable_hazard
  73. #define irq_disable_hazard \
  74. _ssnop; _ssnop; _ssnop
  75. #endif
  76. #else /* __ASSEMBLY__ */
  77. __asm__(
  78. " .macro _ssnop \n\t"
  79. " sll $0, $2, 1 \n\t"
  80. " .endm \n\t"
  81. " \n\t"
  82. " .macro _ehb \n\t"
  83. " sll $0, $0, 3 \n\t"
  84. " .endm \n\t");
  85. #ifdef CONFIG_CPU_RM9000
  86. /*
  87. * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
  88. * use of the JTLB for instructions should not occur for 4 cpu cycles and use
  89. * for data translations should not occur for 3 cpu cycles.
  90. */
  91. #define mtc0_tlbw_hazard() \
  92. __asm__ __volatile__( \
  93. ".set\tmips32\n\t" \
  94. "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \
  95. ".set\tmips0")
  96. #define tlbw_use_hazard() \
  97. __asm__ __volatile__( \
  98. ".set\tmips32\n\t" \
  99. "_ssnop; _ssnop; _ssnop; _ssnop\n\t" \
  100. ".set\tmips0")
  101. #define back_to_back_c0_hazard() do { } while (0)
  102. #else
  103. /*
  104. * Overkill warning ...
  105. */
  106. #define mtc0_tlbw_hazard() \
  107. __asm__ __volatile__( \
  108. ".set noreorder\n\t" \
  109. "nop; nop; nop; nop; nop; nop;\n\t" \
  110. ".set reorder\n\t")
  111. #define tlbw_use_hazard() \
  112. __asm__ __volatile__( \
  113. ".set noreorder\n\t" \
  114. "nop; nop; nop; nop; nop; nop;\n\t" \
  115. ".set reorder\n\t")
  116. #define back_to_back_c0_hazard() \
  117. __asm__ __volatile__( \
  118. " .set noreorder \n" \
  119. " nop; nop; nop \n" \
  120. " .set reorder \n")
  121. #endif
  122. /*
  123. * mtc0->mfc0 hazard
  124. * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
  125. * It is a MIPS32R2 processor so ehb will clear the hazard.
  126. */
  127. #ifdef CONFIG_CPU_MIPSR2
  128. /*
  129. * Use a macro for ehb unless explicit support for MIPSR2 is enabled
  130. */
  131. __asm__(
  132. " .macro\tirq_enable_hazard \n\t"
  133. " _ehb \n\t"
  134. " .endm \n\t"
  135. " \n\t"
  136. " .macro\tirq_disable_hazard \n\t"
  137. " _ehb \n\t"
  138. " .endm");
  139. #define irq_enable_hazard() \
  140. __asm__ __volatile__( \
  141. "_ehb\t\t\t\t# irq_enable_hazard")
  142. #define irq_disable_hazard() \
  143. __asm__ __volatile__( \
  144. "_ehb\t\t\t\t# irq_disable_hazard")
  145. #define back_to_back_c0_hazard() \
  146. __asm__ __volatile__( \
  147. "_ehb\t\t\t\t# back_to_back_c0_hazard")
  148. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
  149. /*
  150. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  151. */
  152. __asm__(
  153. " .macro\tirq_enable_hazard \n\t"
  154. " .endm \n\t"
  155. " \n\t"
  156. " .macro\tirq_disable_hazard \n\t"
  157. " .endm");
  158. #define irq_enable_hazard() do { } while (0)
  159. #define irq_disable_hazard() do { } while (0)
  160. #define back_to_back_c0_hazard() do { } while (0)
  161. #else
  162. /*
  163. * Default for classic MIPS processors. Assume worst case hazards but don't
  164. * care about the irq_enable_hazard - sooner or later the hardware will
  165. * enable it and we don't care when exactly.
  166. */
  167. __asm__(
  168. " # \n\t"
  169. " # There is a hazard but we do not care \n\t"
  170. " # \n\t"
  171. " .macro\tirq_enable_hazard \n\t"
  172. " .endm \n\t"
  173. " \n\t"
  174. " .macro\tirq_disable_hazard \n\t"
  175. " _ssnop; _ssnop; _ssnop \n\t"
  176. " .endm");
  177. #define irq_enable_hazard() do { } while (0)
  178. #define irq_disable_hazard() \
  179. __asm__ __volatile__( \
  180. "_ssnop; _ssnop; _ssnop;\t\t# irq_disable_hazard")
  181. #define back_to_back_c0_hazard() \
  182. __asm__ __volatile__( \
  183. " .set noreorder \n" \
  184. " nop; nop; nop \n" \
  185. " .set reorder \n")
  186. #endif
  187. #endif /* __ASSEMBLY__ */
  188. #endif /* _ASM_HAZARDS_H */