hazards.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
  7. * Copyright (C) MIPS Technologies, Inc.
  8. * written by Ralf Baechle <ralf@linux-mips.org>
  9. */
  10. #ifndef _ASM_HAZARDS_H
  11. #define _ASM_HAZARDS_H
  12. #ifdef __ASSEMBLY__
  13. .macro _ssnop
  14. sll $0, $0, 1
  15. .endm
  16. .macro _ehb
  17. sll $0, $0, 3
  18. .endm
  19. /*
  20. * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
  21. * use of the JTLB for instructions should not occur for 4 cpu cycles and use
  22. * for data translations should not occur for 3 cpu cycles.
  23. */
  24. #ifdef CONFIG_CPU_RM9000
  25. .macro mtc0_tlbw_hazard
  26. .set push
  27. .set mips32
  28. _ssnop; _ssnop; _ssnop; _ssnop
  29. .set pop
  30. .endm
  31. .macro tlbw_eret_hazard
  32. .set push
  33. .set mips32
  34. _ssnop; _ssnop; _ssnop; _ssnop
  35. .set pop
  36. .endm
  37. #else
  38. /*
  39. * The taken branch will result in a two cycle penalty for the two killed
  40. * instructions on R4000 / R4400. Other processors only have a single cycle
  41. * hazard so this is nice trick to have an optimal code for a range of
  42. * processors.
  43. */
  44. .macro mtc0_tlbw_hazard
  45. b . + 8
  46. .endm
  47. .macro tlbw_eret_hazard
  48. .endm
  49. #endif
  50. /*
  51. * mtc0->mfc0 hazard
  52. * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
  53. * It is a MIPS32R2 processor so ehb will clear the hazard.
  54. */
  55. #ifdef CONFIG_CPU_MIPSR2
  56. /*
  57. * Use a macro for ehb unless explicit support for MIPSR2 is enabled
  58. */
  59. #define irq_enable_hazard \
  60. _ehb
  61. #define irq_disable_hazard \
  62. _ehb
  63. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
  64. /*
  65. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  66. */
  67. #define irq_enable_hazard
  68. #define irq_disable_hazard
  69. #else
  70. /*
  71. * Classic MIPS needs 1 - 3 nops or ssnops
  72. */
  73. #define irq_enable_hazard
  74. #define irq_disable_hazard \
  75. _ssnop; _ssnop; _ssnop
  76. #endif
  77. #else /* __ASSEMBLY__ */
  78. __asm__(
  79. " .macro _ssnop \n"
  80. " sll $0, $0, 1 \n"
  81. " .endm \n"
  82. " \n"
  83. " .macro _ehb \n"
  84. " sll $0, $0, 3 \n"
  85. " .endm \n");
  86. #ifdef CONFIG_CPU_RM9000
  87. /*
  88. * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
  89. * use of the JTLB for instructions should not occur for 4 cpu cycles and use
  90. * for data translations should not occur for 3 cpu cycles.
  91. */
  92. #define mtc0_tlbw_hazard() \
  93. __asm__ __volatile__( \
  94. " .set mips32 \n" \
  95. " _ssnop \n" \
  96. " _ssnop \n" \
  97. " _ssnop \n" \
  98. " _ssnop \n" \
  99. " .set mips0 \n")
  100. #define tlbw_use_hazard() \
  101. __asm__ __volatile__( \
  102. " .set mips32 \n" \
  103. " _ssnop \n" \
  104. " _ssnop \n" \
  105. " _ssnop \n" \
  106. " _ssnop \n" \
  107. " .set mips0 \n")
  108. #else
  109. /*
  110. * Overkill warning ...
  111. */
  112. #define mtc0_tlbw_hazard() \
  113. __asm__ __volatile__( \
  114. " .set noreorder \n" \
  115. " nop \n" \
  116. " nop \n" \
  117. " nop \n" \
  118. " nop \n" \
  119. " nop \n" \
  120. " nop \n" \
  121. " .set reorder \n")
  122. #define tlbw_use_hazard() \
  123. __asm__ __volatile__( \
  124. " .set noreorder \n" \
  125. " nop \n" \
  126. " nop \n" \
  127. " nop \n" \
  128. " nop \n" \
  129. " nop \n" \
  130. " nop \n" \
  131. " .set reorder \n")
  132. #endif
  133. /*
  134. * Interrupt enable/disable hazards
  135. * Some processors have hazards when modifying
  136. * the status register to change the interrupt state
  137. */
  138. #ifdef CONFIG_CPU_MIPSR2
  139. __asm__(" .macro irq_enable_hazard \n"
  140. " _ehb \n"
  141. " .endm \n"
  142. " \n"
  143. " .macro irq_disable_hazard \n"
  144. " _ehb \n"
  145. " .endm \n");
  146. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
  147. /*
  148. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  149. */
  150. __asm__(
  151. " .macro irq_enable_hazard \n"
  152. " .endm \n"
  153. " \n"
  154. " .macro irq_disable_hazard \n"
  155. " .endm \n");
  156. #else
  157. /*
  158. * Default for classic MIPS processors. Assume worst case hazards but don't
  159. * care about the irq_enable_hazard - sooner or later the hardware will
  160. * enable it and we don't care when exactly.
  161. */
  162. __asm__(
  163. " # \n"
  164. " # There is a hazard but we do not care \n"
  165. " # \n"
  166. " .macro\tirq_enable_hazard \n"
  167. " .endm \n"
  168. " \n"
  169. " .macro\tirq_disable_hazard \n"
  170. " _ssnop \n"
  171. " _ssnop \n"
  172. " _ssnop \n"
  173. " .endm \n");
  174. #endif
  175. #define irq_enable_hazard() \
  176. __asm__ __volatile__("irq_enable_hazard")
  177. #define irq_disable_hazard() \
  178. __asm__ __volatile__("irq_disable_hazard")
  179. /*
  180. * Back-to-back hazards -
  181. *
  182. * What is needed to separate a move to cp0 from a subsequent read from the
  183. * same cp0 register?
  184. */
  185. #ifdef CONFIG_CPU_MIPSR2
  186. __asm__(" .macro back_to_back_c0_hazard \n"
  187. " _ehb \n"
  188. " .endm \n");
  189. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
  190. defined(CONFIG_CPU_SB1)
  191. __asm__(" .macro back_to_back_c0_hazard \n"
  192. " .endm \n");
  193. #else
  194. __asm__(" .macro back_to_back_c0_hazard \n"
  195. " .set noreorder \n"
  196. " _ssnop \n"
  197. " _ssnop \n"
  198. " _ssnop \n"
  199. " .set reorder \n"
  200. " .endm");
  201. #endif
  202. #define back_to_back_c0_hazard() \
  203. __asm__ __volatile__("back_to_back_c0_hazard")
  204. /*
  205. * Instruction execution hazard
  206. */
  207. #ifdef CONFIG_CPU_MIPSR2
  208. /*
  209. * gcc has a tradition of misscompiling the previous construct using the
  210. * address of a label as argument to inline assembler. Gas otoh has the
  211. * annoying difference between la and dla which are only usable for 32-bit
  212. * rsp. 64-bit code, so can't be used without conditional compilation.
  213. * The alterantive is switching the assembler to 64-bit code which happens
  214. * to work right even for 32-bit code ...
  215. */
  216. #define instruction_hazard() \
  217. do { \
  218. unsigned long tmp; \
  219. \
  220. __asm__ __volatile__( \
  221. " .set mips64r2 \n" \
  222. " dla %0, 1f \n" \
  223. " jr.hb %0 \n" \
  224. " .set mips0 \n" \
  225. "1: \n" \
  226. : "=r" (tmp)); \
  227. } while (0)
  228. #else
  229. #define instruction_hazard() do { } while (0)
  230. #endif
  231. extern void mips_ihb(void);
  232. #endif /* __ASSEMBLY__ */
  233. #endif /* _ASM_HAZARDS_H */