genex.S 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
  7. * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
  8. * Copyright (C) 2002, 2007 Maciej W. Rozycki
  9. * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
  10. */
  11. #include <linux/init.h>
  12. #include <asm/asm.h>
  13. #include <asm/asmmacro.h>
  14. #include <asm/cacheops.h>
  15. #include <asm/irqflags.h>
  16. #include <asm/regdef.h>
  17. #include <asm/fpregdef.h>
  18. #include <asm/mipsregs.h>
  19. #include <asm/stackframe.h>
  20. #include <asm/war.h>
  21. #include <asm/thread_info.h>
  22. #ifdef CONFIG_MIPS_MT_SMTC
  23. #define PANIC_PIC(msg) \
  24. .set push; \
  25. .set nomicromips; \
  26. .set reorder; \
  27. PTR_LA a0,8f; \
  28. .set noat; \
  29. PTR_LA AT, panic; \
  30. jr AT; \
  31. 9: b 9b; \
  32. .set pop; \
  33. TEXT(msg)
  34. #endif
  35. __INIT
  36. /*
  37. * General exception vector for all other CPUs.
  38. *
  39. * Be careful when changing this, it has to be at most 128 bytes
  40. * to fit into space reserved for the exception handler.
  41. */
  42. NESTED(except_vec3_generic, 0, sp)
  43. .set push
  44. .set noat
  45. #if R5432_CP0_INTERRUPT_WAR
  46. mfc0 k0, CP0_INDEX
  47. #endif
  48. mfc0 k1, CP0_CAUSE
  49. andi k1, k1, 0x7c
  50. #ifdef CONFIG_64BIT
  51. dsll k1, k1, 1
  52. #endif
  53. PTR_L k0, exception_handlers(k1)
  54. jr k0
  55. .set pop
  56. END(except_vec3_generic)
  57. /*
  58. * General exception handler for CPUs with virtual coherency exception.
  59. *
  60. * Be careful when changing this, it has to be at most 256 (as a special
  61. * exception) bytes to fit into space reserved for the exception handler.
  62. */
  63. NESTED(except_vec3_r4000, 0, sp)
  64. .set push
  65. .set mips3
  66. .set noat
  67. mfc0 k1, CP0_CAUSE
  68. li k0, 31<<2
  69. andi k1, k1, 0x7c
  70. .set push
  71. .set noreorder
  72. .set nomacro
  73. beq k1, k0, handle_vced
  74. li k0, 14<<2
  75. beq k1, k0, handle_vcei
  76. #ifdef CONFIG_64BIT
  77. dsll k1, k1, 1
  78. #endif
  79. .set pop
  80. PTR_L k0, exception_handlers(k1)
  81. jr k0
  82. /*
  83. * Big shit, we now may have two dirty primary cache lines for the same
  84. * physical address. We can safely invalidate the line pointed to by
  85. * c0_badvaddr because after return from this exception handler the
  86. * load / store will be re-executed.
  87. */
  88. handle_vced:
  89. MFC0 k0, CP0_BADVADDR
  90. li k1, -4 # Is this ...
  91. and k0, k1 # ... really needed?
  92. mtc0 zero, CP0_TAGLO
  93. cache Index_Store_Tag_D, (k0)
  94. cache Hit_Writeback_Inv_SD, (k0)
  95. #ifdef CONFIG_PROC_FS
  96. PTR_LA k0, vced_count
  97. lw k1, (k0)
  98. addiu k1, 1
  99. sw k1, (k0)
  100. #endif
  101. eret
  102. handle_vcei:
  103. MFC0 k0, CP0_BADVADDR
  104. cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
  105. #ifdef CONFIG_PROC_FS
  106. PTR_LA k0, vcei_count
  107. lw k1, (k0)
  108. addiu k1, 1
  109. sw k1, (k0)
  110. #endif
  111. eret
  112. .set pop
  113. END(except_vec3_r4000)
  114. __FINIT
  115. .align 5 /* 32 byte rollback region */
  116. LEAF(__r4k_wait)
  117. .set push
  118. .set noreorder
  119. /* start of rollback region */
  120. LONG_L t0, TI_FLAGS($28)
  121. nop
  122. andi t0, _TIF_NEED_RESCHED
  123. bnez t0, 1f
  124. nop
  125. nop
  126. nop
  127. #ifdef CONFIG_CPU_MICROMIPS
  128. nop
  129. nop
  130. nop
  131. nop
  132. #endif
  133. .set mips3
  134. wait
  135. /* end of rollback region (the region size must be power of two) */
  136. 1:
  137. jr ra
  138. nop
  139. .set pop
  140. END(__r4k_wait)
  141. .macro BUILD_ROLLBACK_PROLOGUE handler
  142. FEXPORT(rollback_\handler)
  143. .set push
  144. .set noat
  145. MFC0 k0, CP0_EPC
  146. PTR_LA k1, __r4k_wait
  147. ori k0, 0x1f /* 32 byte rollback region */
  148. xori k0, 0x1f
  149. bne k0, k1, 9f
  150. MTC0 k0, CP0_EPC
  151. 9:
  152. .set pop
  153. .endm
  154. .align 5
  155. BUILD_ROLLBACK_PROLOGUE handle_int
  156. NESTED(handle_int, PT_SIZE, sp)
  157. #ifdef CONFIG_TRACE_IRQFLAGS
  158. /*
  159. * Check to see if the interrupted code has just disabled
  160. * interrupts and ignore this interrupt for now if so.
  161. *
  162. * local_irq_disable() disables interrupts and then calls
  163. * trace_hardirqs_off() to track the state. If an interrupt is taken
  164. * after interrupts are disabled but before the state is updated
  165. * it will appear to restore_all that it is incorrectly returning with
  166. * interrupts disabled
  167. */
  168. .set push
  169. .set noat
  170. mfc0 k0, CP0_STATUS
  171. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  172. and k0, ST0_IEP
  173. bnez k0, 1f
  174. mfc0 k0, CP0_EPC
  175. .set noreorder
  176. j k0
  177. rfe
  178. #else
  179. and k0, ST0_IE
  180. bnez k0, 1f
  181. eret
  182. #endif
  183. 1:
  184. .set pop
  185. #endif
  186. SAVE_ALL
  187. CLI
  188. TRACE_IRQS_OFF
  189. LONG_L s0, TI_REGS($28)
  190. LONG_S sp, TI_REGS($28)
  191. PTR_LA ra, ret_from_irq
  192. PTR_LA v0, plat_irq_dispatch
  193. jr v0
  194. #ifdef CONFIG_CPU_MICROMIPS
  195. nop
  196. #endif
  197. END(handle_int)
  198. __INIT
  199. /*
  200. * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
  201. * This is a dedicated interrupt exception vector which reduces the
  202. * interrupt processing overhead. The jump instruction will be replaced
  203. * at the initialization time.
  204. *
  205. * Be careful when changing this, it has to be at most 128 bytes
  206. * to fit into space reserved for the exception handler.
  207. */
  208. NESTED(except_vec4, 0, sp)
  209. 1: j 1b /* Dummy, will be replaced */
  210. END(except_vec4)
  211. /*
  212. * EJTAG debug exception handler.
  213. * The EJTAG debug exception entry point is 0xbfc00480, which
  214. * normally is in the boot PROM, so the boot PROM must do an
  215. * unconditional jump to this vector.
  216. */
  217. NESTED(except_vec_ejtag_debug, 0, sp)
  218. j ejtag_debug_handler
  219. #ifdef CONFIG_CPU_MICROMIPS
  220. nop
  221. #endif
  222. END(except_vec_ejtag_debug)
  223. __FINIT
  224. /*
  225. * Vectored interrupt handler.
  226. * This prototype is copied to ebase + n*IntCtl.VS and patched
  227. * to invoke the handler
  228. */
  229. BUILD_ROLLBACK_PROLOGUE except_vec_vi
  230. NESTED(except_vec_vi, 0, sp)
  231. SAVE_SOME
  232. SAVE_AT
  233. .set push
  234. .set noreorder
  235. #ifdef CONFIG_MIPS_MT_SMTC
  236. /*
  237. * To keep from blindly blocking *all* interrupts
  238. * during service by SMTC kernel, we also want to
  239. * pass the IM value to be cleared.
  240. */
  241. FEXPORT(except_vec_vi_mori)
  242. ori a0, $0, 0
  243. #endif /* CONFIG_MIPS_MT_SMTC */
  244. PTR_LA v1, except_vec_vi_handler
  245. FEXPORT(except_vec_vi_lui)
  246. lui v0, 0 /* Patched */
  247. jr v1
  248. FEXPORT(except_vec_vi_ori)
  249. ori v0, 0 /* Patched */
  250. .set pop
  251. END(except_vec_vi)
  252. EXPORT(except_vec_vi_end)
  253. /*
  254. * Common Vectored Interrupt code
  255. * Complete the register saves and invoke the handler which is passed in $v0
  256. */
  257. NESTED(except_vec_vi_handler, 0, sp)
  258. SAVE_TEMP
  259. SAVE_STATIC
  260. #ifdef CONFIG_MIPS_MT_SMTC
  261. /*
  262. * SMTC has an interesting problem that interrupts are level-triggered,
  263. * and the CLI macro will clear EXL, potentially causing a duplicate
  264. * interrupt service invocation. So we need to clear the associated
  265. * IM bit of Status prior to doing CLI, and restore it after the
  266. * service routine has been invoked - we must assume that the
  267. * service routine will have cleared the state, and any active
  268. * level represents a new or otherwised unserviced event...
  269. */
  270. mfc0 t1, CP0_STATUS
  271. and t0, a0, t1
  272. #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
  273. mfc0 t2, CP0_TCCONTEXT
  274. or t2, t0, t2
  275. mtc0 t2, CP0_TCCONTEXT
  276. #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
  277. xor t1, t1, t0
  278. mtc0 t1, CP0_STATUS
  279. _ehb
  280. #endif /* CONFIG_MIPS_MT_SMTC */
  281. CLI
  282. #ifdef CONFIG_TRACE_IRQFLAGS
  283. move s0, v0
  284. #ifdef CONFIG_MIPS_MT_SMTC
  285. move s1, a0
  286. #endif
  287. TRACE_IRQS_OFF
  288. #ifdef CONFIG_MIPS_MT_SMTC
  289. move a0, s1
  290. #endif
  291. move v0, s0
  292. #endif
  293. LONG_L s0, TI_REGS($28)
  294. LONG_S sp, TI_REGS($28)
  295. PTR_LA ra, ret_from_irq
  296. jr v0
  297. END(except_vec_vi_handler)
  298. /*
  299. * EJTAG debug exception handler.
  300. */
  301. NESTED(ejtag_debug_handler, PT_SIZE, sp)
  302. .set push
  303. .set noat
  304. MTC0 k0, CP0_DESAVE
  305. mfc0 k0, CP0_DEBUG
  306. sll k0, k0, 30 # Check for SDBBP.
  307. bgez k0, ejtag_return
  308. PTR_LA k0, ejtag_debug_buffer
  309. LONG_S k1, 0(k0)
  310. SAVE_ALL
  311. move a0, sp
  312. jal ejtag_exception_handler
  313. RESTORE_ALL
  314. PTR_LA k0, ejtag_debug_buffer
  315. LONG_L k1, 0(k0)
  316. ejtag_return:
  317. MFC0 k0, CP0_DESAVE
  318. .set mips32
  319. deret
  320. .set pop
  321. END(ejtag_debug_handler)
  322. /*
  323. * This buffer is reserved for the use of the EJTAG debug
  324. * handler.
  325. */
  326. .data
  327. EXPORT(ejtag_debug_buffer)
  328. .fill LONGSIZE
  329. .previous
  330. __INIT
  331. /*
  332. * NMI debug exception handler for MIPS reference boards.
  333. * The NMI debug exception entry point is 0xbfc00000, which
  334. * normally is in the boot PROM, so the boot PROM must do a
  335. * unconditional jump to this vector.
  336. */
  337. NESTED(except_vec_nmi, 0, sp)
  338. j nmi_handler
  339. #ifdef CONFIG_CPU_MICROMIPS
  340. nop
  341. #endif
  342. END(except_vec_nmi)
  343. __FINIT
  344. NESTED(nmi_handler, PT_SIZE, sp)
  345. .set push
  346. .set noat
  347. SAVE_ALL
  348. move a0, sp
  349. jal nmi_exception_handler
  350. RESTORE_ALL
  351. .set mips3
  352. eret
  353. .set pop
  354. END(nmi_handler)
  355. .macro __build_clear_none
  356. .endm
  357. .macro __build_clear_sti
  358. TRACE_IRQS_ON
  359. STI
  360. .endm
  361. .macro __build_clear_cli
  362. CLI
  363. TRACE_IRQS_OFF
  364. .endm
  365. .macro __build_clear_fpe
  366. .set push
  367. /* gas fails to assemble cfc1 for some archs (octeon).*/ \
  368. .set mips1
  369. cfc1 a1, fcr31
  370. li a2, ~(0x3f << 12)
  371. and a2, a1
  372. ctc1 a2, fcr31
  373. .set pop
  374. TRACE_IRQS_ON
  375. STI
  376. .endm
  377. .macro __build_clear_ade
  378. MFC0 t0, CP0_BADVADDR
  379. PTR_S t0, PT_BVADDR(sp)
  380. KMODE
  381. .endm
  382. .macro __BUILD_silent exception
  383. .endm
  384. /* Gas tries to parse the PRINT argument as a string containing
  385. string escapes and emits bogus warnings if it believes to
  386. recognize an unknown escape code. So make the arguments
  387. start with an n and gas will believe \n is ok ... */
  388. .macro __BUILD_verbose nexception
  389. LONG_L a1, PT_EPC(sp)
  390. #ifdef CONFIG_32BIT
  391. PRINT("Got \nexception at %08lx\012")
  392. #endif
  393. #ifdef CONFIG_64BIT
  394. PRINT("Got \nexception at %016lx\012")
  395. #endif
  396. .endm
  397. .macro __BUILD_count exception
  398. LONG_L t0,exception_count_\exception
  399. LONG_ADDIU t0, 1
  400. LONG_S t0,exception_count_\exception
  401. .comm exception_count\exception, 8, 8
  402. .endm
  403. .macro __BUILD_HANDLER exception handler clear verbose ext
  404. .align 5
  405. NESTED(handle_\exception, PT_SIZE, sp)
  406. .set noat
  407. SAVE_ALL
  408. FEXPORT(handle_\exception\ext)
  409. __BUILD_clear_\clear
  410. .set at
  411. __BUILD_\verbose \exception
  412. move a0, sp
  413. PTR_LA ra, ret_from_exception
  414. j do_\handler
  415. END(handle_\exception)
  416. .endm
  417. .macro BUILD_HANDLER exception handler clear verbose
  418. __BUILD_HANDLER \exception \handler \clear \verbose _int
  419. .endm
  420. BUILD_HANDLER adel ade ade silent /* #4 */
  421. BUILD_HANDLER ades ade ade silent /* #5 */
  422. BUILD_HANDLER ibe be cli silent /* #6 */
  423. BUILD_HANDLER dbe be cli silent /* #7 */
  424. BUILD_HANDLER bp bp sti silent /* #9 */
  425. BUILD_HANDLER ri ri sti silent /* #10 */
  426. BUILD_HANDLER cpu cpu sti silent /* #11 */
  427. BUILD_HANDLER ov ov sti silent /* #12 */
  428. BUILD_HANDLER tr tr sti silent /* #13 */
  429. BUILD_HANDLER fpe fpe fpe silent /* #15 */
  430. BUILD_HANDLER mdmx mdmx sti silent /* #22 */
  431. #ifdef CONFIG_HARDWARE_WATCHPOINTS
  432. /*
  433. * For watch, interrupts will be enabled after the watch
  434. * registers are read.
  435. */
  436. BUILD_HANDLER watch watch cli silent /* #23 */
  437. #else
  438. BUILD_HANDLER watch watch sti verbose /* #23 */
  439. #endif
  440. BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
  441. BUILD_HANDLER mt mt sti silent /* #25 */
  442. BUILD_HANDLER dsp dsp sti silent /* #26 */
  443. BUILD_HANDLER reserved reserved sti verbose /* others */
  444. .align 5
  445. LEAF(handle_ri_rdhwr_vivt)
  446. #ifdef CONFIG_MIPS_MT_SMTC
  447. PANIC_PIC("handle_ri_rdhwr_vivt called")
  448. #else
  449. .set push
  450. .set noat
  451. .set noreorder
  452. /* check if TLB contains a entry for EPC */
  453. MFC0 k1, CP0_ENTRYHI
  454. andi k1, 0xff /* ASID_MASK */
  455. MFC0 k0, CP0_EPC
  456. PTR_SRL k0, _PAGE_SHIFT + 1
  457. PTR_SLL k0, _PAGE_SHIFT + 1
  458. or k1, k0
  459. MTC0 k1, CP0_ENTRYHI
  460. mtc0_tlbw_hazard
  461. tlbp
  462. tlb_probe_hazard
  463. mfc0 k1, CP0_INDEX
  464. .set pop
  465. bltz k1, handle_ri /* slow path */
  466. /* fall thru */
  467. #endif
  468. END(handle_ri_rdhwr_vivt)
  469. LEAF(handle_ri_rdhwr)
  470. .set push
  471. .set noat
  472. .set noreorder
  473. /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
  474. /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
  475. MFC0 k1, CP0_EPC
  476. #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
  477. and k0, k1, 1
  478. beqz k0, 1f
  479. xor k1, k0
  480. lhu k0, (k1)
  481. lhu k1, 2(k1)
  482. ins k1, k0, 16, 16
  483. lui k0, 0x007d
  484. b docheck
  485. ori k0, 0x6b3c
  486. 1:
  487. lui k0, 0x7c03
  488. lw k1, (k1)
  489. ori k0, 0xe83b
  490. #else
  491. andi k0, k1, 1
  492. bnez k0, handle_ri
  493. lui k0, 0x7c03
  494. lw k1, (k1)
  495. ori k0, 0xe83b
  496. #endif
  497. .set reorder
  498. docheck:
  499. bne k0, k1, handle_ri /* if not ours */
  500. isrdhwr:
  501. /* The insn is rdhwr. No need to check CAUSE.BD here. */
  502. get_saved_sp /* k1 := current_thread_info */
  503. .set noreorder
  504. MFC0 k0, CP0_EPC
  505. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  506. ori k1, _THREAD_MASK
  507. xori k1, _THREAD_MASK
  508. LONG_L v1, TI_TP_VALUE(k1)
  509. LONG_ADDIU k0, 4
  510. jr k0
  511. rfe
  512. #else
  513. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  514. LONG_ADDIU k0, 4 /* stall on $k0 */
  515. #else
  516. .set at=v1
  517. LONG_ADDIU k0, 4
  518. .set noat
  519. #endif
  520. MTC0 k0, CP0_EPC
  521. /* I hope three instructions between MTC0 and ERET are enough... */
  522. ori k1, _THREAD_MASK
  523. xori k1, _THREAD_MASK
  524. LONG_L v1, TI_TP_VALUE(k1)
  525. .set mips3
  526. eret
  527. .set mips0
  528. #endif
  529. .set pop
  530. END(handle_ri_rdhwr)
  531. #ifdef CONFIG_64BIT
  532. /* A temporary overflow handler used by check_daddi(). */
  533. __INIT
  534. BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
  535. #endif