stackframe.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. /*
  19. * For SMTC kernel, global IE should be left set, and interrupts
  20. * controlled exclusively via IXMT.
  21. */
  22. #ifdef CONFIG_MIPS_MT_SMTC
  23. #define STATMASK 0x1e
  24. #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  25. #define STATMASK 0x3f
  26. #else
  27. #define STATMASK 0x1f
  28. #endif
  29. #ifdef CONFIG_MIPS_MT_SMTC
  30. #include <asm/mipsmtregs.h>
  31. #endif /* CONFIG_MIPS_MT_SMTC */
  32. .macro SAVE_AT
  33. .set push
  34. .set noat
  35. LONG_S $1, PT_R1(sp)
  36. .set pop
  37. .endm
  38. .macro SAVE_TEMP
  39. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  40. mflhxu v1
  41. LONG_S v1, PT_LO(sp)
  42. mflhxu v1
  43. LONG_S v1, PT_HI(sp)
  44. mflhxu v1
  45. LONG_S v1, PT_ACX(sp)
  46. #else
  47. mfhi v1
  48. #endif
  49. #ifdef CONFIG_32BIT
  50. LONG_S $8, PT_R8(sp)
  51. LONG_S $9, PT_R9(sp)
  52. #endif
  53. LONG_S $10, PT_R10(sp)
  54. LONG_S $11, PT_R11(sp)
  55. LONG_S $12, PT_R12(sp)
  56. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  57. LONG_S v1, PT_HI(sp)
  58. mflo v1
  59. #endif
  60. LONG_S $13, PT_R13(sp)
  61. LONG_S $14, PT_R14(sp)
  62. LONG_S $15, PT_R15(sp)
  63. LONG_S $24, PT_R24(sp)
  64. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  65. LONG_S v1, PT_LO(sp)
  66. #endif
  67. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  68. /*
  69. * The Octeon multiplier state is affected by general
  70. * multiply instructions. It must be saved before and
  71. * kernel code might corrupt it
  72. */
  73. jal octeon_mult_save
  74. #endif
  75. .endm
  76. .macro SAVE_STATIC
  77. LONG_S $16, PT_R16(sp)
  78. LONG_S $17, PT_R17(sp)
  79. LONG_S $18, PT_R18(sp)
  80. LONG_S $19, PT_R19(sp)
  81. LONG_S $20, PT_R20(sp)
  82. LONG_S $21, PT_R21(sp)
  83. LONG_S $22, PT_R22(sp)
  84. LONG_S $23, PT_R23(sp)
  85. LONG_S $30, PT_R30(sp)
  86. .endm
  87. #ifdef CONFIG_SMP
  88. #ifdef CONFIG_MIPS_MT_SMTC
  89. #define PTEBASE_SHIFT 19 /* TCBIND */
  90. #define CPU_ID_REG CP0_TCBIND
  91. #define CPU_ID_MFC0 mfc0
  92. #elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
  93. #define PTEBASE_SHIFT 48 /* XCONTEXT */
  94. #define CPU_ID_REG CP0_XCONTEXT
  95. #define CPU_ID_MFC0 MFC0
  96. #else
  97. #define PTEBASE_SHIFT 23 /* CONTEXT */
  98. #define CPU_ID_REG CP0_CONTEXT
  99. #define CPU_ID_MFC0 MFC0
  100. #endif
  101. .macro get_saved_sp /* SMP variation */
  102. CPU_ID_MFC0 k0, CPU_ID_REG
  103. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  104. lui k1, %hi(kernelsp)
  105. #else
  106. lui k1, %highest(kernelsp)
  107. daddiu k1, %higher(kernelsp)
  108. dsll k1, 16
  109. daddiu k1, %hi(kernelsp)
  110. dsll k1, 16
  111. #endif
  112. LONG_SRL k0, PTEBASE_SHIFT
  113. LONG_ADDU k1, k0
  114. LONG_L k1, %lo(kernelsp)(k1)
  115. .endm
  116. .macro set_saved_sp stackp temp temp2
  117. CPU_ID_MFC0 \temp, CPU_ID_REG
  118. LONG_SRL \temp, PTEBASE_SHIFT
  119. LONG_S \stackp, kernelsp(\temp)
  120. .endm
  121. #else
  122. .macro get_saved_sp /* Uniprocessor variation */
  123. #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
  124. /*
  125. * Clear BTB (branch target buffer), forbid RAS (return address
  126. * stack) to workaround the Out-of-order Issue in Loongson2F
  127. * via its diagnostic register.
  128. */
  129. move k0, ra
  130. jal 1f
  131. nop
  132. 1: jal 1f
  133. nop
  134. 1: jal 1f
  135. nop
  136. 1: jal 1f
  137. nop
  138. 1: move ra, k0
  139. li k0, 3
  140. mtc0 k0, $22
  141. #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
  142. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  143. lui k1, %hi(kernelsp)
  144. #else
  145. lui k1, %highest(kernelsp)
  146. daddiu k1, %higher(kernelsp)
  147. dsll k1, k1, 16
  148. daddiu k1, %hi(kernelsp)
  149. dsll k1, k1, 16
  150. #endif
  151. LONG_L k1, %lo(kernelsp)(k1)
  152. .endm
  153. .macro set_saved_sp stackp temp temp2
  154. LONG_S \stackp, kernelsp
  155. .endm
  156. #endif
  157. .macro SAVE_SOME
  158. .set push
  159. .set noat
  160. .set reorder
  161. mfc0 k0, CP0_STATUS
  162. sll k0, 3 /* extract cu0 bit */
  163. .set noreorder
  164. bltz k0, 8f
  165. move k1, sp
  166. .set reorder
  167. /* Called from user mode, new stack. */
  168. get_saved_sp
  169. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  170. 8: move k0, sp
  171. PTR_SUBU sp, k1, PT_SIZE
  172. #else
  173. .set at=k0
  174. 8: PTR_SUBU k1, PT_SIZE
  175. .set noat
  176. move k0, sp
  177. move sp, k1
  178. #endif
  179. LONG_S k0, PT_R29(sp)
  180. LONG_S $3, PT_R3(sp)
  181. /*
  182. * You might think that you don't need to save $0,
  183. * but the FPU emulator and gdb remote debug stub
  184. * need it to operate correctly
  185. */
  186. LONG_S $0, PT_R0(sp)
  187. mfc0 v1, CP0_STATUS
  188. LONG_S $2, PT_R2(sp)
  189. LONG_S v1, PT_STATUS(sp)
  190. #ifdef CONFIG_MIPS_MT_SMTC
  191. /*
  192. * Ideally, these instructions would be shuffled in
  193. * to cover the pipeline delay.
  194. */
  195. .set mips32
  196. mfc0 k0, CP0_TCSTATUS
  197. .set mips0
  198. LONG_S k0, PT_TCSTATUS(sp)
  199. #endif /* CONFIG_MIPS_MT_SMTC */
  200. LONG_S $4, PT_R4(sp)
  201. mfc0 v1, CP0_CAUSE
  202. LONG_S $5, PT_R5(sp)
  203. LONG_S v1, PT_CAUSE(sp)
  204. LONG_S $6, PT_R6(sp)
  205. MFC0 v1, CP0_EPC
  206. LONG_S $7, PT_R7(sp)
  207. #ifdef CONFIG_64BIT
  208. LONG_S $8, PT_R8(sp)
  209. LONG_S $9, PT_R9(sp)
  210. #endif
  211. LONG_S v1, PT_EPC(sp)
  212. LONG_S $25, PT_R25(sp)
  213. LONG_S $28, PT_R28(sp)
  214. LONG_S $31, PT_R31(sp)
  215. ori $28, sp, _THREAD_MASK
  216. xori $28, _THREAD_MASK
  217. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  218. .set mips64
  219. pref 0, 0($28) /* Prefetch the current pointer */
  220. #endif
  221. .set pop
  222. .endm
  223. .macro SAVE_ALL
  224. SAVE_SOME
  225. SAVE_AT
  226. SAVE_TEMP
  227. SAVE_STATIC
  228. .endm
  229. .macro RESTORE_AT
  230. .set push
  231. .set noat
  232. LONG_L $1, PT_R1(sp)
  233. .set pop
  234. .endm
  235. .macro RESTORE_TEMP
  236. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  237. /* Restore the Octeon multiplier state */
  238. jal octeon_mult_restore
  239. #endif
  240. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  241. LONG_L $24, PT_ACX(sp)
  242. mtlhx $24
  243. LONG_L $24, PT_HI(sp)
  244. mtlhx $24
  245. LONG_L $24, PT_LO(sp)
  246. mtlhx $24
  247. #else
  248. LONG_L $24, PT_LO(sp)
  249. mtlo $24
  250. LONG_L $24, PT_HI(sp)
  251. mthi $24
  252. #endif
  253. #ifdef CONFIG_32BIT
  254. LONG_L $8, PT_R8(sp)
  255. LONG_L $9, PT_R9(sp)
  256. #endif
  257. LONG_L $10, PT_R10(sp)
  258. LONG_L $11, PT_R11(sp)
  259. LONG_L $12, PT_R12(sp)
  260. LONG_L $13, PT_R13(sp)
  261. LONG_L $14, PT_R14(sp)
  262. LONG_L $15, PT_R15(sp)
  263. LONG_L $24, PT_R24(sp)
  264. .endm
  265. .macro RESTORE_STATIC
  266. LONG_L $16, PT_R16(sp)
  267. LONG_L $17, PT_R17(sp)
  268. LONG_L $18, PT_R18(sp)
  269. LONG_L $19, PT_R19(sp)
  270. LONG_L $20, PT_R20(sp)
  271. LONG_L $21, PT_R21(sp)
  272. LONG_L $22, PT_R22(sp)
  273. LONG_L $23, PT_R23(sp)
  274. LONG_L $30, PT_R30(sp)
  275. .endm
  276. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  277. .macro RESTORE_SOME
  278. .set push
  279. .set reorder
  280. .set noat
  281. mfc0 a0, CP0_STATUS
  282. li v1, 0xff00
  283. ori a0, STATMASK
  284. xori a0, STATMASK
  285. mtc0 a0, CP0_STATUS
  286. and a0, v1
  287. LONG_L v0, PT_STATUS(sp)
  288. nor v1, $0, v1
  289. and v0, v1
  290. or v0, a0
  291. mtc0 v0, CP0_STATUS
  292. LONG_L $31, PT_R31(sp)
  293. LONG_L $28, PT_R28(sp)
  294. LONG_L $25, PT_R25(sp)
  295. LONG_L $7, PT_R7(sp)
  296. LONG_L $6, PT_R6(sp)
  297. LONG_L $5, PT_R5(sp)
  298. LONG_L $4, PT_R4(sp)
  299. LONG_L $3, PT_R3(sp)
  300. LONG_L $2, PT_R2(sp)
  301. .set pop
  302. .endm
  303. .macro RESTORE_SP_AND_RET
  304. .set push
  305. .set noreorder
  306. LONG_L k0, PT_EPC(sp)
  307. LONG_L sp, PT_R29(sp)
  308. jr k0
  309. rfe
  310. .set pop
  311. .endm
  312. #else
  313. .macro RESTORE_SOME
  314. .set push
  315. .set reorder
  316. .set noat
  317. #ifdef CONFIG_MIPS_MT_SMTC
  318. .set mips32r2
  319. /*
  320. * We need to make sure the read-modify-write
  321. * of Status below isn't perturbed by an interrupt
  322. * or cross-TC access, so we need to do at least a DMT,
  323. * protected by an interrupt-inhibit. But setting IXMT
  324. * also creates a few-cycle window where an IPI could
  325. * be queued and not be detected before potentially
  326. * returning to a WAIT or user-mode loop. It must be
  327. * replayed.
  328. *
  329. * We're in the middle of a context switch, and
  330. * we can't dispatch it directly without trashing
  331. * some registers, so we'll try to detect this unlikely
  332. * case and program a software interrupt in the VPE,
  333. * as would be done for a cross-VPE IPI. To accommodate
  334. * the handling of that case, we're doing a DVPE instead
  335. * of just a DMT here to protect against other threads.
  336. * This is a lot of cruft to cover a tiny window.
  337. * If you can find a better design, implement it!
  338. *
  339. */
  340. mfc0 v0, CP0_TCSTATUS
  341. ori v0, TCSTATUS_IXMT
  342. mtc0 v0, CP0_TCSTATUS
  343. _ehb
  344. DVPE 5 # dvpe a1
  345. jal mips_ihb
  346. #endif /* CONFIG_MIPS_MT_SMTC */
  347. mfc0 a0, CP0_STATUS
  348. ori a0, STATMASK
  349. xori a0, STATMASK
  350. mtc0 a0, CP0_STATUS
  351. li v1, 0xff00
  352. and a0, v1
  353. LONG_L v0, PT_STATUS(sp)
  354. nor v1, $0, v1
  355. and v0, v1
  356. or v0, a0
  357. mtc0 v0, CP0_STATUS
  358. #ifdef CONFIG_MIPS_MT_SMTC
  359. /*
  360. * Only after EXL/ERL have been restored to status can we
  361. * restore TCStatus.IXMT.
  362. */
  363. LONG_L v1, PT_TCSTATUS(sp)
  364. _ehb
  365. mfc0 a0, CP0_TCSTATUS
  366. andi v1, TCSTATUS_IXMT
  367. bnez v1, 0f
  368. /*
  369. * We'd like to detect any IPIs queued in the tiny window
  370. * above and request an software interrupt to service them
  371. * when we ERET.
  372. *
  373. * Computing the offset into the IPIQ array of the executing
  374. * TC's IPI queue in-line would be tedious. We use part of
  375. * the TCContext register to hold 16 bits of offset that we
  376. * can add in-line to find the queue head.
  377. */
  378. mfc0 v0, CP0_TCCONTEXT
  379. la a2, IPIQ
  380. srl v0, v0, 16
  381. addu a2, a2, v0
  382. LONG_L v0, 0(a2)
  383. beqz v0, 0f
  384. /*
  385. * If we have a queue, provoke dispatch within the VPE by setting C_SW1
  386. */
  387. mfc0 v0, CP0_CAUSE
  388. ori v0, v0, C_SW1
  389. mtc0 v0, CP0_CAUSE
  390. 0:
  391. /*
  392. * This test should really never branch but
  393. * let's be prudent here. Having atomized
  394. * the shared register modifications, we can
  395. * now EVPE, and must do so before interrupts
  396. * are potentially re-enabled.
  397. */
  398. andi a1, a1, MVPCONTROL_EVP
  399. beqz a1, 1f
  400. evpe
  401. 1:
  402. /* We know that TCStatua.IXMT should be set from above */
  403. xori a0, a0, TCSTATUS_IXMT
  404. or a0, a0, v1
  405. mtc0 a0, CP0_TCSTATUS
  406. _ehb
  407. .set mips0
  408. #endif /* CONFIG_MIPS_MT_SMTC */
  409. LONG_L v1, PT_EPC(sp)
  410. MTC0 v1, CP0_EPC
  411. LONG_L $31, PT_R31(sp)
  412. LONG_L $28, PT_R28(sp)
  413. LONG_L $25, PT_R25(sp)
  414. #ifdef CONFIG_64BIT
  415. LONG_L $8, PT_R8(sp)
  416. LONG_L $9, PT_R9(sp)
  417. #endif
  418. LONG_L $7, PT_R7(sp)
  419. LONG_L $6, PT_R6(sp)
  420. LONG_L $5, PT_R5(sp)
  421. LONG_L $4, PT_R4(sp)
  422. LONG_L $3, PT_R3(sp)
  423. LONG_L $2, PT_R2(sp)
  424. .set pop
  425. .endm
  426. .macro RESTORE_SP_AND_RET
  427. LONG_L sp, PT_R29(sp)
  428. .set mips3
  429. eret
  430. .set mips0
  431. .endm
  432. #endif
  433. .macro RESTORE_SP
  434. LONG_L sp, PT_R29(sp)
  435. .endm
  436. .macro RESTORE_ALL
  437. RESTORE_TEMP
  438. RESTORE_STATIC
  439. RESTORE_AT
  440. RESTORE_SOME
  441. RESTORE_SP
  442. .endm
  443. .macro RESTORE_ALL_AND_RET
  444. RESTORE_TEMP
  445. RESTORE_STATIC
  446. RESTORE_AT
  447. RESTORE_SOME
  448. RESTORE_SP_AND_RET
  449. .endm
  450. /*
  451. * Move to kernel mode and disable interrupts.
  452. * Set cp0 enable bit as sign that we're running on the kernel stack
  453. */
  454. .macro CLI
  455. #if !defined(CONFIG_MIPS_MT_SMTC)
  456. mfc0 t0, CP0_STATUS
  457. li t1, ST0_CU0 | STATMASK
  458. or t0, t1
  459. xori t0, STATMASK
  460. mtc0 t0, CP0_STATUS
  461. #else /* CONFIG_MIPS_MT_SMTC */
  462. /*
  463. * For SMTC, we need to set privilege
  464. * and disable interrupts only for the
  465. * current TC, using the TCStatus register.
  466. */
  467. mfc0 t0, CP0_TCSTATUS
  468. /* Fortunately CU 0 is in the same place in both registers */
  469. /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
  470. li t1, ST0_CU0 | 0x08001c00
  471. or t0, t1
  472. /* Clear TKSU, leave IXMT */
  473. xori t0, 0x00001800
  474. mtc0 t0, CP0_TCSTATUS
  475. _ehb
  476. /* We need to leave the global IE bit set, but clear EXL...*/
  477. mfc0 t0, CP0_STATUS
  478. ori t0, ST0_EXL | ST0_ERL
  479. xori t0, ST0_EXL | ST0_ERL
  480. mtc0 t0, CP0_STATUS
  481. #endif /* CONFIG_MIPS_MT_SMTC */
  482. irq_disable_hazard
  483. .endm
  484. /*
  485. * Move to kernel mode and enable interrupts.
  486. * Set cp0 enable bit as sign that we're running on the kernel stack
  487. */
  488. .macro STI
  489. #if !defined(CONFIG_MIPS_MT_SMTC)
  490. mfc0 t0, CP0_STATUS
  491. li t1, ST0_CU0 | STATMASK
  492. or t0, t1
  493. xori t0, STATMASK & ~1
  494. mtc0 t0, CP0_STATUS
  495. #else /* CONFIG_MIPS_MT_SMTC */
  496. /*
  497. * For SMTC, we need to set privilege
  498. * and enable interrupts only for the
  499. * current TC, using the TCStatus register.
  500. */
  501. _ehb
  502. mfc0 t0, CP0_TCSTATUS
  503. /* Fortunately CU 0 is in the same place in both registers */
  504. /* Set TCU0, TKSU (for later inversion) and IXMT */
  505. li t1, ST0_CU0 | 0x08001c00
  506. or t0, t1
  507. /* Clear TKSU *and* IXMT */
  508. xori t0, 0x00001c00
  509. mtc0 t0, CP0_TCSTATUS
  510. _ehb
  511. /* We need to leave the global IE bit set, but clear EXL...*/
  512. mfc0 t0, CP0_STATUS
  513. ori t0, ST0_EXL
  514. xori t0, ST0_EXL
  515. mtc0 t0, CP0_STATUS
  516. /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
  517. #endif /* CONFIG_MIPS_MT_SMTC */
  518. irq_enable_hazard
  519. .endm
  520. /*
  521. * Just move to kernel mode and leave interrupts as they are. Note
  522. * for the R3000 this means copying the previous enable from IEp.
  523. * Set cp0 enable bit as sign that we're running on the kernel stack
  524. */
  525. .macro KMODE
  526. #ifdef CONFIG_MIPS_MT_SMTC
  527. /*
  528. * This gets baroque in SMTC. We want to
  529. * protect the non-atomic clearing of EXL
  530. * with DMT/EMT, but we don't want to take
  531. * an interrupt while DMT is still in effect.
  532. */
  533. /* KMODE gets invoked from both reorder and noreorder code */
  534. .set push
  535. .set mips32r2
  536. .set noreorder
  537. mfc0 v0, CP0_TCSTATUS
  538. andi v1, v0, TCSTATUS_IXMT
  539. ori v0, TCSTATUS_IXMT
  540. mtc0 v0, CP0_TCSTATUS
  541. _ehb
  542. DMT 2 # dmt v0
  543. /*
  544. * We don't know a priori if ra is "live"
  545. */
  546. move t0, ra
  547. jal mips_ihb
  548. nop /* delay slot */
  549. move ra, t0
  550. #endif /* CONFIG_MIPS_MT_SMTC */
  551. mfc0 t0, CP0_STATUS
  552. li t1, ST0_CU0 | (STATMASK & ~1)
  553. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  554. andi t2, t0, ST0_IEP
  555. srl t2, 2
  556. or t0, t2
  557. #endif
  558. or t0, t1
  559. xori t0, STATMASK & ~1
  560. mtc0 t0, CP0_STATUS
  561. #ifdef CONFIG_MIPS_MT_SMTC
  562. _ehb
  563. andi v0, v0, VPECONTROL_TE
  564. beqz v0, 2f
  565. nop /* delay slot */
  566. emt
  567. 2:
  568. mfc0 v0, CP0_TCSTATUS
  569. /* Clear IXMT, then OR in previous value */
  570. ori v0, TCSTATUS_IXMT
  571. xori v0, TCSTATUS_IXMT
  572. or v0, v1, v0
  573. mtc0 v0, CP0_TCSTATUS
  574. /*
  575. * irq_disable_hazard below should expand to EHB
  576. * on 24K/34K CPUS
  577. */
  578. .set pop
  579. #endif /* CONFIG_MIPS_MT_SMTC */
  580. irq_disable_hazard
  581. .endm
  582. #endif /* _ASM_STACKFRAME_H */