stackframe.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. /*
  19. * For SMTC kernel, global IE should be left set, and interrupts
  20. * controlled exclusively via IXMT.
  21. */
  22. #ifdef CONFIG_MIPS_MT_SMTC
  23. #define STATMASK 0x1e
  24. #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  25. #define STATMASK 0x3f
  26. #else
  27. #define STATMASK 0x1f
  28. #endif
  29. #ifdef CONFIG_MIPS_MT_SMTC
  30. #include <asm/mipsmtregs.h>
  31. #endif /* CONFIG_MIPS_MT_SMTC */
  32. .macro SAVE_AT
  33. .set push
  34. .set noat
  35. LONG_S $1, PT_R1(sp)
  36. .set pop
  37. .endm
  38. .macro SAVE_TEMP
  39. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  40. mflhxu v1
  41. LONG_S v1, PT_LO(sp)
  42. mflhxu v1
  43. LONG_S v1, PT_HI(sp)
  44. mflhxu v1
  45. LONG_S v1, PT_ACX(sp)
  46. #else
  47. mfhi v1
  48. #endif
  49. #ifdef CONFIG_32BIT
  50. LONG_S $8, PT_R8(sp)
  51. LONG_S $9, PT_R9(sp)
  52. #endif
  53. LONG_S $10, PT_R10(sp)
  54. LONG_S $11, PT_R11(sp)
  55. LONG_S $12, PT_R12(sp)
  56. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  57. LONG_S v1, PT_HI(sp)
  58. mflo v1
  59. #endif
  60. LONG_S $13, PT_R13(sp)
  61. LONG_S $14, PT_R14(sp)
  62. LONG_S $15, PT_R15(sp)
  63. LONG_S $24, PT_R24(sp)
  64. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  65. LONG_S v1, PT_LO(sp)
  66. #endif
  67. .endm
  68. .macro SAVE_STATIC
  69. LONG_S $16, PT_R16(sp)
  70. LONG_S $17, PT_R17(sp)
  71. LONG_S $18, PT_R18(sp)
  72. LONG_S $19, PT_R19(sp)
  73. LONG_S $20, PT_R20(sp)
  74. LONG_S $21, PT_R21(sp)
  75. LONG_S $22, PT_R22(sp)
  76. LONG_S $23, PT_R23(sp)
  77. LONG_S $30, PT_R30(sp)
  78. .endm
  79. #ifdef CONFIG_SMP
  80. #ifdef CONFIG_MIPS_MT_SMTC
  81. #define PTEBASE_SHIFT 19 /* TCBIND */
  82. #define CPU_ID_REG CP0_TCBIND
  83. #define CPU_ID_MFC0 mfc0
  84. #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
  85. #define PTEBASE_SHIFT 48 /* XCONTEXT */
  86. #define CPU_ID_REG CP0_XCONTEXT
  87. #define CPU_ID_MFC0 MFC0
  88. #else
  89. #define PTEBASE_SHIFT 23 /* CONTEXT */
  90. #define CPU_ID_REG CP0_CONTEXT
  91. #define CPU_ID_MFC0 MFC0
  92. #endif
  93. .macro get_saved_sp /* SMP variation */
  94. CPU_ID_MFC0 k0, CPU_ID_REG
  95. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  96. lui k1, %hi(kernelsp)
  97. #else
  98. lui k1, %highest(kernelsp)
  99. daddiu k1, %higher(kernelsp)
  100. dsll k1, 16
  101. daddiu k1, %hi(kernelsp)
  102. dsll k1, 16
  103. #endif
  104. LONG_SRL k0, PTEBASE_SHIFT
  105. LONG_ADDU k1, k0
  106. LONG_L k1, %lo(kernelsp)(k1)
  107. .endm
  108. .macro set_saved_sp stackp temp temp2
  109. CPU_ID_MFC0 \temp, CPU_ID_REG
  110. LONG_SRL \temp, PTEBASE_SHIFT
  111. LONG_S \stackp, kernelsp(\temp)
  112. .endm
  113. #else
  114. .macro get_saved_sp /* Uniprocessor variation */
  115. #ifdef CONFIG_CPU_LOONGSON2F
  116. /*
  117. * Clear BTB (branch target buffer), forbid RAS (return address
  118. * stack) to workaround the Out-of-order Issue in Loongson2F
  119. * via its diagnostic register.
  120. */
  121. move k0, ra
  122. jal 1f
  123. nop
  124. 1: jal 1f
  125. nop
  126. 1: jal 1f
  127. nop
  128. 1: jal 1f
  129. nop
  130. 1: move ra, k0
  131. li k0, 3
  132. mtc0 k0, $22
  133. #endif /* CONFIG_CPU_LOONGSON2F */
  134. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  135. lui k1, %hi(kernelsp)
  136. #else
  137. lui k1, %highest(kernelsp)
  138. daddiu k1, %higher(kernelsp)
  139. dsll k1, k1, 16
  140. daddiu k1, %hi(kernelsp)
  141. dsll k1, k1, 16
  142. #endif
  143. LONG_L k1, %lo(kernelsp)(k1)
  144. .endm
  145. .macro set_saved_sp stackp temp temp2
  146. LONG_S \stackp, kernelsp
  147. .endm
  148. #endif
  149. .macro SAVE_SOME
  150. .set push
  151. .set noat
  152. .set reorder
  153. mfc0 k0, CP0_STATUS
  154. sll k0, 3 /* extract cu0 bit */
  155. .set noreorder
  156. bltz k0, 8f
  157. move k1, sp
  158. .set reorder
  159. /* Called from user mode, new stack. */
  160. get_saved_sp
  161. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  162. 8: move k0, sp
  163. PTR_SUBU sp, k1, PT_SIZE
  164. #else
  165. .set at=k0
  166. 8: PTR_SUBU k1, PT_SIZE
  167. .set noat
  168. move k0, sp
  169. move sp, k1
  170. #endif
  171. LONG_S k0, PT_R29(sp)
  172. LONG_S $3, PT_R3(sp)
  173. /*
  174. * You might think that you don't need to save $0,
  175. * but the FPU emulator and gdb remote debug stub
  176. * need it to operate correctly
  177. */
  178. LONG_S $0, PT_R0(sp)
  179. mfc0 v1, CP0_STATUS
  180. LONG_S $2, PT_R2(sp)
  181. #ifdef CONFIG_MIPS_MT_SMTC
  182. /*
  183. * Ideally, these instructions would be shuffled in
  184. * to cover the pipeline delay.
  185. */
  186. .set mips32
  187. mfc0 v1, CP0_TCSTATUS
  188. .set mips0
  189. LONG_S v1, PT_TCSTATUS(sp)
  190. #endif /* CONFIG_MIPS_MT_SMTC */
  191. LONG_S $4, PT_R4(sp)
  192. LONG_S $5, PT_R5(sp)
  193. LONG_S v1, PT_STATUS(sp)
  194. mfc0 v1, CP0_CAUSE
  195. LONG_S $6, PT_R6(sp)
  196. LONG_S $7, PT_R7(sp)
  197. LONG_S v1, PT_CAUSE(sp)
  198. MFC0 v1, CP0_EPC
  199. #ifdef CONFIG_64BIT
  200. LONG_S $8, PT_R8(sp)
  201. LONG_S $9, PT_R9(sp)
  202. #endif
  203. LONG_S $25, PT_R25(sp)
  204. LONG_S $28, PT_R28(sp)
  205. LONG_S $31, PT_R31(sp)
  206. LONG_S v1, PT_EPC(sp)
  207. ori $28, sp, _THREAD_MASK
  208. xori $28, _THREAD_MASK
  209. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  210. .set mips64
  211. pref 0, 0($28) /* Prefetch the current pointer */
  212. pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
  213. /* The Octeon multiplier state is affected by general multiply
  214. instructions. It must be saved before and kernel code might
  215. corrupt it */
  216. jal octeon_mult_save
  217. LONG_L v1, 0($28) /* Load the current pointer */
  218. /* Restore $31(ra) that was changed by the jal */
  219. LONG_L ra, PT_R31(sp)
  220. pref 0, 0(v1) /* Prefetch the current thread */
  221. #endif
  222. .set pop
  223. .endm
  224. .macro SAVE_ALL
  225. SAVE_SOME
  226. SAVE_AT
  227. SAVE_TEMP
  228. SAVE_STATIC
  229. .endm
  230. .macro RESTORE_AT
  231. .set push
  232. .set noat
  233. LONG_L $1, PT_R1(sp)
  234. .set pop
  235. .endm
  236. .macro RESTORE_TEMP
  237. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  238. LONG_L $24, PT_ACX(sp)
  239. mtlhx $24
  240. LONG_L $24, PT_HI(sp)
  241. mtlhx $24
  242. LONG_L $24, PT_LO(sp)
  243. mtlhx $24
  244. #else
  245. LONG_L $24, PT_LO(sp)
  246. mtlo $24
  247. LONG_L $24, PT_HI(sp)
  248. mthi $24
  249. #endif
  250. #ifdef CONFIG_32BIT
  251. LONG_L $8, PT_R8(sp)
  252. LONG_L $9, PT_R9(sp)
  253. #endif
  254. LONG_L $10, PT_R10(sp)
  255. LONG_L $11, PT_R11(sp)
  256. LONG_L $12, PT_R12(sp)
  257. LONG_L $13, PT_R13(sp)
  258. LONG_L $14, PT_R14(sp)
  259. LONG_L $15, PT_R15(sp)
  260. LONG_L $24, PT_R24(sp)
  261. .endm
  262. .macro RESTORE_STATIC
  263. LONG_L $16, PT_R16(sp)
  264. LONG_L $17, PT_R17(sp)
  265. LONG_L $18, PT_R18(sp)
  266. LONG_L $19, PT_R19(sp)
  267. LONG_L $20, PT_R20(sp)
  268. LONG_L $21, PT_R21(sp)
  269. LONG_L $22, PT_R22(sp)
  270. LONG_L $23, PT_R23(sp)
  271. LONG_L $30, PT_R30(sp)
  272. .endm
  273. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  274. .macro RESTORE_SOME
  275. .set push
  276. .set reorder
  277. .set noat
  278. mfc0 a0, CP0_STATUS
  279. li v1, 0xff00
  280. ori a0, STATMASK
  281. xori a0, STATMASK
  282. mtc0 a0, CP0_STATUS
  283. and a0, v1
  284. LONG_L v0, PT_STATUS(sp)
  285. nor v1, $0, v1
  286. and v0, v1
  287. or v0, a0
  288. mtc0 v0, CP0_STATUS
  289. LONG_L $31, PT_R31(sp)
  290. LONG_L $28, PT_R28(sp)
  291. LONG_L $25, PT_R25(sp)
  292. LONG_L $7, PT_R7(sp)
  293. LONG_L $6, PT_R6(sp)
  294. LONG_L $5, PT_R5(sp)
  295. LONG_L $4, PT_R4(sp)
  296. LONG_L $3, PT_R3(sp)
  297. LONG_L $2, PT_R2(sp)
  298. .set pop
  299. .endm
  300. .macro RESTORE_SP_AND_RET
  301. .set push
  302. .set noreorder
  303. LONG_L k0, PT_EPC(sp)
  304. LONG_L sp, PT_R29(sp)
  305. jr k0
  306. rfe
  307. .set pop
  308. .endm
  309. #else
  310. .macro RESTORE_SOME
  311. .set push
  312. .set reorder
  313. .set noat
  314. #ifdef CONFIG_MIPS_MT_SMTC
  315. .set mips32r2
  316. /*
  317. * We need to make sure the read-modify-write
  318. * of Status below isn't perturbed by an interrupt
  319. * or cross-TC access, so we need to do at least a DMT,
  320. * protected by an interrupt-inhibit. But setting IXMT
  321. * also creates a few-cycle window where an IPI could
  322. * be queued and not be detected before potentially
  323. * returning to a WAIT or user-mode loop. It must be
  324. * replayed.
  325. *
  326. * We're in the middle of a context switch, and
  327. * we can't dispatch it directly without trashing
  328. * some registers, so we'll try to detect this unlikely
  329. * case and program a software interrupt in the VPE,
  330. * as would be done for a cross-VPE IPI. To accomodate
  331. * the handling of that case, we're doing a DVPE instead
  332. * of just a DMT here to protect against other threads.
  333. * This is a lot of cruft to cover a tiny window.
  334. * If you can find a better design, implement it!
  335. *
  336. */
  337. mfc0 v0, CP0_TCSTATUS
  338. ori v0, TCSTATUS_IXMT
  339. mtc0 v0, CP0_TCSTATUS
  340. _ehb
  341. DVPE 5 # dvpe a1
  342. jal mips_ihb
  343. #endif /* CONFIG_MIPS_MT_SMTC */
  344. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  345. /* Restore the Octeon multiplier state */
  346. jal octeon_mult_restore
  347. #endif
  348. mfc0 a0, CP0_STATUS
  349. ori a0, STATMASK
  350. xori a0, STATMASK
  351. mtc0 a0, CP0_STATUS
  352. li v1, 0xff00
  353. and a0, v1
  354. LONG_L v0, PT_STATUS(sp)
  355. nor v1, $0, v1
  356. and v0, v1
  357. or v0, a0
  358. mtc0 v0, CP0_STATUS
  359. #ifdef CONFIG_MIPS_MT_SMTC
  360. /*
  361. * Only after EXL/ERL have been restored to status can we
  362. * restore TCStatus.IXMT.
  363. */
  364. LONG_L v1, PT_TCSTATUS(sp)
  365. _ehb
  366. mfc0 a0, CP0_TCSTATUS
  367. andi v1, TCSTATUS_IXMT
  368. bnez v1, 0f
  369. /*
  370. * We'd like to detect any IPIs queued in the tiny window
  371. * above and request an software interrupt to service them
  372. * when we ERET.
  373. *
  374. * Computing the offset into the IPIQ array of the executing
  375. * TC's IPI queue in-line would be tedious. We use part of
  376. * the TCContext register to hold 16 bits of offset that we
  377. * can add in-line to find the queue head.
  378. */
  379. mfc0 v0, CP0_TCCONTEXT
  380. la a2, IPIQ
  381. srl v0, v0, 16
  382. addu a2, a2, v0
  383. LONG_L v0, 0(a2)
  384. beqz v0, 0f
  385. /*
  386. * If we have a queue, provoke dispatch within the VPE by setting C_SW1
  387. */
  388. mfc0 v0, CP0_CAUSE
  389. ori v0, v0, C_SW1
  390. mtc0 v0, CP0_CAUSE
  391. 0:
  392. /*
  393. * This test should really never branch but
  394. * let's be prudent here. Having atomized
  395. * the shared register modifications, we can
  396. * now EVPE, and must do so before interrupts
  397. * are potentially re-enabled.
  398. */
  399. andi a1, a1, MVPCONTROL_EVP
  400. beqz a1, 1f
  401. evpe
  402. 1:
  403. /* We know that TCStatua.IXMT should be set from above */
  404. xori a0, a0, TCSTATUS_IXMT
  405. or a0, a0, v1
  406. mtc0 a0, CP0_TCSTATUS
  407. _ehb
  408. .set mips0
  409. #endif /* CONFIG_MIPS_MT_SMTC */
  410. LONG_L v1, PT_EPC(sp)
  411. MTC0 v1, CP0_EPC
  412. LONG_L $31, PT_R31(sp)
  413. LONG_L $28, PT_R28(sp)
  414. LONG_L $25, PT_R25(sp)
  415. #ifdef CONFIG_64BIT
  416. LONG_L $8, PT_R8(sp)
  417. LONG_L $9, PT_R9(sp)
  418. #endif
  419. LONG_L $7, PT_R7(sp)
  420. LONG_L $6, PT_R6(sp)
  421. LONG_L $5, PT_R5(sp)
  422. LONG_L $4, PT_R4(sp)
  423. LONG_L $3, PT_R3(sp)
  424. LONG_L $2, PT_R2(sp)
  425. .set pop
  426. .endm
  427. .macro RESTORE_SP_AND_RET
  428. LONG_L sp, PT_R29(sp)
  429. .set mips3
  430. eret
  431. .set mips0
  432. .endm
  433. #endif
  434. .macro RESTORE_SP
  435. LONG_L sp, PT_R29(sp)
  436. .endm
  437. .macro RESTORE_ALL
  438. RESTORE_TEMP
  439. RESTORE_STATIC
  440. RESTORE_AT
  441. RESTORE_SOME
  442. RESTORE_SP
  443. .endm
  444. .macro RESTORE_ALL_AND_RET
  445. RESTORE_TEMP
  446. RESTORE_STATIC
  447. RESTORE_AT
  448. RESTORE_SOME
  449. RESTORE_SP_AND_RET
  450. .endm
  451. /*
  452. * Move to kernel mode and disable interrupts.
  453. * Set cp0 enable bit as sign that we're running on the kernel stack
  454. */
  455. .macro CLI
  456. #if !defined(CONFIG_MIPS_MT_SMTC)
  457. mfc0 t0, CP0_STATUS
  458. li t1, ST0_CU0 | STATMASK
  459. or t0, t1
  460. xori t0, STATMASK
  461. mtc0 t0, CP0_STATUS
  462. #else /* CONFIG_MIPS_MT_SMTC */
  463. /*
  464. * For SMTC, we need to set privilege
  465. * and disable interrupts only for the
  466. * current TC, using the TCStatus register.
  467. */
  468. mfc0 t0, CP0_TCSTATUS
  469. /* Fortunately CU 0 is in the same place in both registers */
  470. /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
  471. li t1, ST0_CU0 | 0x08001c00
  472. or t0, t1
  473. /* Clear TKSU, leave IXMT */
  474. xori t0, 0x00001800
  475. mtc0 t0, CP0_TCSTATUS
  476. _ehb
  477. /* We need to leave the global IE bit set, but clear EXL...*/
  478. mfc0 t0, CP0_STATUS
  479. ori t0, ST0_EXL | ST0_ERL
  480. xori t0, ST0_EXL | ST0_ERL
  481. mtc0 t0, CP0_STATUS
  482. #endif /* CONFIG_MIPS_MT_SMTC */
  483. irq_disable_hazard
  484. .endm
  485. /*
  486. * Move to kernel mode and enable interrupts.
  487. * Set cp0 enable bit as sign that we're running on the kernel stack
  488. */
  489. .macro STI
  490. #if !defined(CONFIG_MIPS_MT_SMTC)
  491. mfc0 t0, CP0_STATUS
  492. li t1, ST0_CU0 | STATMASK
  493. or t0, t1
  494. xori t0, STATMASK & ~1
  495. mtc0 t0, CP0_STATUS
  496. #else /* CONFIG_MIPS_MT_SMTC */
  497. /*
  498. * For SMTC, we need to set privilege
  499. * and enable interrupts only for the
  500. * current TC, using the TCStatus register.
  501. */
  502. _ehb
  503. mfc0 t0, CP0_TCSTATUS
  504. /* Fortunately CU 0 is in the same place in both registers */
  505. /* Set TCU0, TKSU (for later inversion) and IXMT */
  506. li t1, ST0_CU0 | 0x08001c00
  507. or t0, t1
  508. /* Clear TKSU *and* IXMT */
  509. xori t0, 0x00001c00
  510. mtc0 t0, CP0_TCSTATUS
  511. _ehb
  512. /* We need to leave the global IE bit set, but clear EXL...*/
  513. mfc0 t0, CP0_STATUS
  514. ori t0, ST0_EXL
  515. xori t0, ST0_EXL
  516. mtc0 t0, CP0_STATUS
  517. /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
  518. #endif /* CONFIG_MIPS_MT_SMTC */
  519. irq_enable_hazard
  520. .endm
  521. /*
  522. * Just move to kernel mode and leave interrupts as they are. Note
  523. * for the R3000 this means copying the previous enable from IEp.
  524. * Set cp0 enable bit as sign that we're running on the kernel stack
  525. */
  526. .macro KMODE
  527. #ifdef CONFIG_MIPS_MT_SMTC
  528. /*
  529. * This gets baroque in SMTC. We want to
  530. * protect the non-atomic clearing of EXL
  531. * with DMT/EMT, but we don't want to take
  532. * an interrupt while DMT is still in effect.
  533. */
  534. /* KMODE gets invoked from both reorder and noreorder code */
  535. .set push
  536. .set mips32r2
  537. .set noreorder
  538. mfc0 v0, CP0_TCSTATUS
  539. andi v1, v0, TCSTATUS_IXMT
  540. ori v0, TCSTATUS_IXMT
  541. mtc0 v0, CP0_TCSTATUS
  542. _ehb
  543. DMT 2 # dmt v0
  544. /*
  545. * We don't know a priori if ra is "live"
  546. */
  547. move t0, ra
  548. jal mips_ihb
  549. nop /* delay slot */
  550. move ra, t0
  551. #endif /* CONFIG_MIPS_MT_SMTC */
  552. mfc0 t0, CP0_STATUS
  553. li t1, ST0_CU0 | (STATMASK & ~1)
  554. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  555. andi t2, t0, ST0_IEP
  556. srl t2, 2
  557. or t0, t2
  558. #endif
  559. or t0, t1
  560. xori t0, STATMASK & ~1
  561. mtc0 t0, CP0_STATUS
  562. #ifdef CONFIG_MIPS_MT_SMTC
  563. _ehb
  564. andi v0, v0, VPECONTROL_TE
  565. beqz v0, 2f
  566. nop /* delay slot */
  567. emt
  568. 2:
  569. mfc0 v0, CP0_TCSTATUS
  570. /* Clear IXMT, then OR in previous value */
  571. ori v0, TCSTATUS_IXMT
  572. xori v0, TCSTATUS_IXMT
  573. or v0, v1, v0
  574. mtc0 v0, CP0_TCSTATUS
  575. /*
  576. * irq_disable_hazard below should expand to EHB
  577. * on 24K/34K CPUS
  578. */
  579. .set pop
  580. #endif /* CONFIG_MIPS_MT_SMTC */
  581. irq_disable_hazard
  582. .endm
  583. #endif /* _ASM_STACKFRAME_H */