hazards.h 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 2003, 2004 Ralf Baechle <ralf@linux-mips.org>
  7. * Copyright (C) MIPS Technologies, Inc.
  8. * written by Ralf Baechle <ralf@linux-mips.org>
  9. */
  10. #ifndef _ASM_HAZARDS_H
  11. #define _ASM_HAZARDS_H
  12. #include <linux/config.h>
  13. #ifdef __ASSEMBLY__
  14. .macro _ssnop
  15. sll $0, $0, 1
  16. .endm
  17. .macro _ehb
  18. sll $0, $0, 3
  19. .endm
  20. /*
  21. * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
  22. * use of the JTLB for instructions should not occur for 4 cpu cycles and use
  23. * for data translations should not occur for 3 cpu cycles.
  24. */
  25. #ifdef CONFIG_CPU_RM9000
  26. .macro mtc0_tlbw_hazard
  27. .set push
  28. .set mips32
  29. _ssnop; _ssnop; _ssnop; _ssnop
  30. .set pop
  31. .endm
  32. .macro tlbw_eret_hazard
  33. .set push
  34. .set mips32
  35. _ssnop; _ssnop; _ssnop; _ssnop
  36. .set pop
  37. .endm
  38. #else
  39. /*
  40. * The taken branch will result in a two cycle penalty for the two killed
  41. * instructions on R4000 / R4400. Other processors only have a single cycle
  42. * hazard so this is nice trick to have an optimal code for a range of
  43. * processors.
  44. */
  45. .macro mtc0_tlbw_hazard
  46. b . + 8
  47. .endm
  48. .macro tlbw_eret_hazard
  49. .endm
  50. #endif
  51. /*
  52. * mtc0->mfc0 hazard
  53. * The 24K has a 2 cycle mtc0/mfc0 execution hazard.
  54. * It is a MIPS32R2 processor so ehb will clear the hazard.
  55. */
  56. #ifdef CONFIG_CPU_MIPSR2
  57. /*
  58. * Use a macro for ehb unless explicit support for MIPSR2 is enabled
  59. */
  60. #define irq_enable_hazard
  61. _ehb
  62. #define irq_disable_hazard
  63. _ehb
  64. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
  65. /*
  66. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  67. */
  68. #define irq_enable_hazard
  69. #define irq_disable_hazard
  70. #else
  71. /*
  72. * Classic MIPS needs 1 - 3 nops or ssnops
  73. */
  74. #define irq_enable_hazard
  75. #define irq_disable_hazard \
  76. _ssnop; _ssnop; _ssnop
  77. #endif
  78. #else /* __ASSEMBLY__ */
  79. __asm__(
  80. " .macro _ssnop \n"
  81. " sll $0, $0, 1 \n"
  82. " .endm \n"
  83. " \n"
  84. " .macro _ehb \n"
  85. " sll $0, $0, 3 \n"
  86. " .endm \n");
  87. #ifdef CONFIG_CPU_RM9000
  88. /*
  89. * RM9000 hazards. When the JTLB is updated by tlbwi or tlbwr, a subsequent
  90. * use of the JTLB for instructions should not occur for 4 cpu cycles and use
  91. * for data translations should not occur for 3 cpu cycles.
  92. */
  93. #define mtc0_tlbw_hazard() \
  94. __asm__ __volatile__( \
  95. " .set mips32 \n" \
  96. " _ssnop \n" \
  97. " _ssnop \n" \
  98. " _ssnop \n" \
  99. " _ssnop \n" \
  100. " .set mips0 \n")
  101. #define tlbw_use_hazard() \
  102. __asm__ __volatile__( \
  103. " .set mips32 \n" \
  104. " _ssnop \n" \
  105. " _ssnop \n" \
  106. " _ssnop \n" \
  107. " _ssnop \n" \
  108. " .set mips0 \n")
  109. #else
  110. /*
  111. * Overkill warning ...
  112. */
  113. #define mtc0_tlbw_hazard() \
  114. __asm__ __volatile__( \
  115. " .set noreorder \n" \
  116. " nop \n" \
  117. " nop \n" \
  118. " nop \n" \
  119. " nop \n" \
  120. " nop \n" \
  121. " nop \n" \
  122. " .set reorder \n")
  123. #define tlbw_use_hazard() \
  124. __asm__ __volatile__( \
  125. " .set noreorder \n" \
  126. " nop \n" \
  127. " nop \n" \
  128. " nop \n" \
  129. " nop \n" \
  130. " nop \n" \
  131. " nop \n" \
  132. " .set reorder \n")
  133. #endif
  134. /*
  135. * Interrupt enable/disable hazards
  136. * Some processors have hazards when modifying
  137. * the status register to change the interrupt state
  138. */
  139. #ifdef CONFIG_CPU_MIPSR2
  140. __asm__(" .macro irq_enable_hazard \n"
  141. " _ehb \n"
  142. " .endm \n"
  143. " \n"
  144. " .macro irq_disable_hazard \n"
  145. " _ehb \n"
  146. " .endm \n");
  147. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000)
  148. /*
  149. * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
  150. */
  151. __asm__(
  152. " .macro irq_enable_hazard \n"
  153. " .endm \n"
  154. " \n"
  155. " .macro irq_disable_hazard \n"
  156. " .endm \n");
  157. #else
  158. /*
  159. * Default for classic MIPS processors. Assume worst case hazards but don't
  160. * care about the irq_enable_hazard - sooner or later the hardware will
  161. * enable it and we don't care when exactly.
  162. */
  163. __asm__(
  164. " # \n"
  165. " # There is a hazard but we do not care \n"
  166. " # \n"
  167. " .macro\tirq_enable_hazard \n"
  168. " .endm \n"
  169. " \n"
  170. " .macro\tirq_disable_hazard \n"
  171. " _ssnop \n"
  172. " _ssnop \n"
  173. " _ssnop \n"
  174. " .endm \n");
  175. #endif
  176. #define irq_enable_hazard() \
  177. __asm__ __volatile__("irq_enable_hazard")
  178. #define irq_disable_hazard() \
  179. __asm__ __volatile__("irq_disable_hazard")
  180. /*
  181. * Back-to-back hazards -
  182. *
  183. * What is needed to separate a move to cp0 from a subsequent read from the
  184. * same cp0 register?
  185. */
  186. #ifdef CONFIG_CPU_MIPSR2
  187. __asm__(" .macro back_to_back_c0_hazard \n"
  188. " _ehb \n"
  189. " .endm \n");
  190. #elif defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_RM9000) || \
  191. defined(CONFIG_CPU_SB1)
  192. __asm__(" .macro back_to_back_c0_hazard \n"
  193. " .endm \n");
  194. #else
  195. __asm__(" .macro back_to_back_c0_hazard \n"
  196. " .set noreorder \n"
  197. " _ssnop \n"
  198. " _ssnop \n"
  199. " _ssnop \n"
  200. " .set reorder \n"
  201. " .endm");
  202. #endif
  203. #define back_to_back_c0_hazard() \
  204. __asm__ __volatile__("back_to_back_c0_hazard")
  205. /*
  206. * Instruction execution hazard
  207. */
  208. #ifdef CONFIG_CPU_MIPSR2
  209. /*
  210. * gcc has a tradition of misscompiling the previous construct using the
  211. * address of a label as argument to inline assembler. Gas otoh has the
  212. * annoying difference between la and dla which are only usable for 32-bit
  213. * rsp. 64-bit code, so can't be used without conditional compilation.
  214. * The alterantive is switching the assembler to 64-bit code which happens
  215. * to work right even for 32-bit code ...
  216. */
  217. #define instruction_hazard() \
  218. do { \
  219. unsigned long tmp; \
  220. \
  221. __asm__ __volatile__( \
  222. " .set mips64r2 \n" \
  223. " dla %0, 1f \n" \
  224. " jr.hb %0 \n" \
  225. " .set mips0 \n" \
  226. "1: \n" \
  227. : "=r" (tmp)); \
  228. } while (0)
  229. #else
  230. #define instruction_hazard() do { } while (0)
  231. #endif
  232. extern void mips_ihb(void);
  233. #endif /* __ASSEMBLY__ */
  234. #endif /* _ASM_HAZARDS_H */