stackframe.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. /*
  19. * For SMTC kernel, global IE should be left set, and interrupts
  20. * controlled exclusively via IXMT.
  21. */
  22. #ifdef CONFIG_MIPS_MT_SMTC
  23. #define STATMASK 0x1e
  24. #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  25. #define STATMASK 0x3f
  26. #else
  27. #define STATMASK 0x1f
  28. #endif
  29. #ifdef CONFIG_MIPS_MT_SMTC
  30. #include <asm/mipsmtregs.h>
  31. #endif /* CONFIG_MIPS_MT_SMTC */
  32. .macro SAVE_AT
  33. .set push
  34. .set noat
  35. LONG_S $1, PT_R1(sp)
  36. .set pop
  37. .endm
  38. .macro SAVE_TEMP
  39. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  40. mflhxu v1
  41. LONG_S v1, PT_LO(sp)
  42. mflhxu v1
  43. LONG_S v1, PT_HI(sp)
  44. mflhxu v1
  45. LONG_S v1, PT_ACX(sp)
  46. #else
  47. mfhi v1
  48. LONG_S v1, PT_HI(sp)
  49. mflo v1
  50. LONG_S v1, PT_LO(sp)
  51. #endif
  52. #ifdef CONFIG_32BIT
  53. LONG_S $8, PT_R8(sp)
  54. LONG_S $9, PT_R9(sp)
  55. #endif
  56. LONG_S $10, PT_R10(sp)
  57. LONG_S $11, PT_R11(sp)
  58. LONG_S $12, PT_R12(sp)
  59. LONG_S $13, PT_R13(sp)
  60. LONG_S $14, PT_R14(sp)
  61. LONG_S $15, PT_R15(sp)
  62. LONG_S $24, PT_R24(sp)
  63. .endm
  64. .macro SAVE_STATIC
  65. LONG_S $16, PT_R16(sp)
  66. LONG_S $17, PT_R17(sp)
  67. LONG_S $18, PT_R18(sp)
  68. LONG_S $19, PT_R19(sp)
  69. LONG_S $20, PT_R20(sp)
  70. LONG_S $21, PT_R21(sp)
  71. LONG_S $22, PT_R22(sp)
  72. LONG_S $23, PT_R23(sp)
  73. LONG_S $30, PT_R30(sp)
  74. .endm
  75. #ifdef CONFIG_SMP
  76. #ifdef CONFIG_MIPS_MT_SMTC
  77. #define PTEBASE_SHIFT 19 /* TCBIND */
  78. #else
  79. #define PTEBASE_SHIFT 23 /* CONTEXT */
  80. #endif
  81. .macro get_saved_sp /* SMP variation */
  82. #ifdef CONFIG_MIPS_MT_SMTC
  83. mfc0 k0, CP0_TCBIND
  84. #else
  85. MFC0 k0, CP0_CONTEXT
  86. #endif
  87. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  88. lui k1, %hi(kernelsp)
  89. #else
  90. lui k1, %highest(kernelsp)
  91. daddiu k1, %higher(kernelsp)
  92. dsll k1, 16
  93. daddiu k1, %hi(kernelsp)
  94. dsll k1, 16
  95. #endif
  96. LONG_SRL k0, PTEBASE_SHIFT
  97. LONG_ADDU k1, k0
  98. LONG_L k1, %lo(kernelsp)(k1)
  99. .endm
  100. .macro set_saved_sp stackp temp temp2
  101. #ifdef CONFIG_MIPS_MT_SMTC
  102. mfc0 \temp, CP0_TCBIND
  103. #else
  104. MFC0 \temp, CP0_CONTEXT
  105. #endif
  106. LONG_SRL \temp, PTEBASE_SHIFT
  107. LONG_S \stackp, kernelsp(\temp)
  108. .endm
  109. #else
  110. .macro get_saved_sp /* Uniprocessor variation */
  111. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  112. lui k1, %hi(kernelsp)
  113. #else
  114. lui k1, %highest(kernelsp)
  115. daddiu k1, %higher(kernelsp)
  116. dsll k1, k1, 16
  117. daddiu k1, %hi(kernelsp)
  118. dsll k1, k1, 16
  119. #endif
  120. LONG_L k1, %lo(kernelsp)(k1)
  121. .endm
  122. .macro set_saved_sp stackp temp temp2
  123. LONG_S \stackp, kernelsp
  124. .endm
  125. #endif
  126. .macro SAVE_SOME
  127. .set push
  128. .set noat
  129. .set reorder
  130. mfc0 k0, CP0_STATUS
  131. sll k0, 3 /* extract cu0 bit */
  132. .set noreorder
  133. bltz k0, 8f
  134. move k1, sp
  135. .set reorder
  136. /* Called from user mode, new stack. */
  137. get_saved_sp
  138. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  139. 8: move k0, sp
  140. PTR_SUBU sp, k1, PT_SIZE
  141. #else
  142. .set at=k0
  143. 8: PTR_SUBU k1, PT_SIZE
  144. .set noat
  145. move k0, sp
  146. move sp, k1
  147. #endif
  148. LONG_S k0, PT_R29(sp)
  149. LONG_S $3, PT_R3(sp)
  150. /*
  151. * You might think that you don't need to save $0,
  152. * but the FPU emulator and gdb remote debug stub
  153. * need it to operate correctly
  154. */
  155. LONG_S $0, PT_R0(sp)
  156. mfc0 v1, CP0_STATUS
  157. LONG_S $2, PT_R2(sp)
  158. LONG_S v1, PT_STATUS(sp)
  159. #ifdef CONFIG_MIPS_MT_SMTC
  160. /*
  161. * Ideally, these instructions would be shuffled in
  162. * to cover the pipeline delay.
  163. */
  164. .set mips32
  165. mfc0 v1, CP0_TCSTATUS
  166. .set mips0
  167. LONG_S v1, PT_TCSTATUS(sp)
  168. #endif /* CONFIG_MIPS_MT_SMTC */
  169. LONG_S $4, PT_R4(sp)
  170. mfc0 v1, CP0_CAUSE
  171. LONG_S $5, PT_R5(sp)
  172. LONG_S v1, PT_CAUSE(sp)
  173. LONG_S $6, PT_R6(sp)
  174. MFC0 v1, CP0_EPC
  175. LONG_S $7, PT_R7(sp)
  176. #ifdef CONFIG_64BIT
  177. LONG_S $8, PT_R8(sp)
  178. LONG_S $9, PT_R9(sp)
  179. #endif
  180. LONG_S v1, PT_EPC(sp)
  181. LONG_S $25, PT_R25(sp)
  182. LONG_S $28, PT_R28(sp)
  183. LONG_S $31, PT_R31(sp)
  184. ori $28, sp, _THREAD_MASK
  185. xori $28, _THREAD_MASK
  186. .set pop
  187. .endm
  188. .macro SAVE_ALL
  189. SAVE_SOME
  190. SAVE_AT
  191. SAVE_TEMP
  192. SAVE_STATIC
  193. .endm
  194. .macro RESTORE_AT
  195. .set push
  196. .set noat
  197. LONG_L $1, PT_R1(sp)
  198. .set pop
  199. .endm
  200. .macro RESTORE_TEMP
  201. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  202. LONG_L $24, PT_ACX(sp)
  203. mtlhx $24
  204. LONG_L $24, PT_HI(sp)
  205. mtlhx $24
  206. LONG_L $24, PT_LO(sp)
  207. mtlhx $24
  208. #else
  209. LONG_L $24, PT_LO(sp)
  210. mtlo $24
  211. LONG_L $24, PT_HI(sp)
  212. mthi $24
  213. #endif
  214. #ifdef CONFIG_32BIT
  215. LONG_L $8, PT_R8(sp)
  216. LONG_L $9, PT_R9(sp)
  217. #endif
  218. LONG_L $10, PT_R10(sp)
  219. LONG_L $11, PT_R11(sp)
  220. LONG_L $12, PT_R12(sp)
  221. LONG_L $13, PT_R13(sp)
  222. LONG_L $14, PT_R14(sp)
  223. LONG_L $15, PT_R15(sp)
  224. LONG_L $24, PT_R24(sp)
  225. .endm
  226. .macro RESTORE_STATIC
  227. LONG_L $16, PT_R16(sp)
  228. LONG_L $17, PT_R17(sp)
  229. LONG_L $18, PT_R18(sp)
  230. LONG_L $19, PT_R19(sp)
  231. LONG_L $20, PT_R20(sp)
  232. LONG_L $21, PT_R21(sp)
  233. LONG_L $22, PT_R22(sp)
  234. LONG_L $23, PT_R23(sp)
  235. LONG_L $30, PT_R30(sp)
  236. .endm
  237. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  238. .macro RESTORE_SOME
  239. .set push
  240. .set reorder
  241. .set noat
  242. mfc0 a0, CP0_STATUS
  243. li v1, 0xff00
  244. ori a0, STATMASK
  245. xori a0, STATMASK
  246. mtc0 a0, CP0_STATUS
  247. and a0, v1
  248. LONG_L v0, PT_STATUS(sp)
  249. nor v1, $0, v1
  250. and v0, v1
  251. or v0, a0
  252. mtc0 v0, CP0_STATUS
  253. LONG_L $31, PT_R31(sp)
  254. LONG_L $28, PT_R28(sp)
  255. LONG_L $25, PT_R25(sp)
  256. LONG_L $7, PT_R7(sp)
  257. LONG_L $6, PT_R6(sp)
  258. LONG_L $5, PT_R5(sp)
  259. LONG_L $4, PT_R4(sp)
  260. LONG_L $3, PT_R3(sp)
  261. LONG_L $2, PT_R2(sp)
  262. .set pop
  263. .endm
  264. .macro RESTORE_SP_AND_RET
  265. .set push
  266. .set noreorder
  267. LONG_L k0, PT_EPC(sp)
  268. LONG_L sp, PT_R29(sp)
  269. jr k0
  270. rfe
  271. .set pop
  272. .endm
  273. #else
  274. .macro RESTORE_SOME
  275. .set push
  276. .set reorder
  277. .set noat
  278. #ifdef CONFIG_MIPS_MT_SMTC
  279. .set mips32r2
  280. /*
  281. * We need to make sure the read-modify-write
  282. * of Status below isn't perturbed by an interrupt
  283. * or cross-TC access, so we need to do at least a DMT,
  284. * protected by an interrupt-inhibit. But setting IXMT
  285. * also creates a few-cycle window where an IPI could
  286. * be queued and not be detected before potentially
  287. * returning to a WAIT or user-mode loop. It must be
  288. * replayed.
  289. *
  290. * We're in the middle of a context switch, and
  291. * we can't dispatch it directly without trashing
  292. * some registers, so we'll try to detect this unlikely
  293. * case and program a software interrupt in the VPE,
  294. * as would be done for a cross-VPE IPI. To accomodate
  295. * the handling of that case, we're doing a DVPE instead
  296. * of just a DMT here to protect against other threads.
  297. * This is a lot of cruft to cover a tiny window.
  298. * If you can find a better design, implement it!
  299. *
  300. */
  301. mfc0 v0, CP0_TCSTATUS
  302. ori v0, TCSTATUS_IXMT
  303. mtc0 v0, CP0_TCSTATUS
  304. _ehb
  305. DVPE 5 # dvpe a1
  306. jal mips_ihb
  307. #endif /* CONFIG_MIPS_MT_SMTC */
  308. mfc0 a0, CP0_STATUS
  309. ori a0, STATMASK
  310. xori a0, STATMASK
  311. mtc0 a0, CP0_STATUS
  312. li v1, 0xff00
  313. and a0, v1
  314. LONG_L v0, PT_STATUS(sp)
  315. nor v1, $0, v1
  316. and v0, v1
  317. or v0, a0
  318. mtc0 v0, CP0_STATUS
  319. #ifdef CONFIG_MIPS_MT_SMTC
  320. /*
  321. * Only after EXL/ERL have been restored to status can we
  322. * restore TCStatus.IXMT.
  323. */
  324. LONG_L v1, PT_TCSTATUS(sp)
  325. _ehb
  326. mfc0 a0, CP0_TCSTATUS
  327. andi v1, TCSTATUS_IXMT
  328. bnez v1, 0f
  329. /*
  330. * We'd like to detect any IPIs queued in the tiny window
  331. * above and request an software interrupt to service them
  332. * when we ERET.
  333. *
  334. * Computing the offset into the IPIQ array of the executing
  335. * TC's IPI queue in-line would be tedious. We use part of
  336. * the TCContext register to hold 16 bits of offset that we
  337. * can add in-line to find the queue head.
  338. */
  339. mfc0 v0, CP0_TCCONTEXT
  340. la a2, IPIQ
  341. srl v0, v0, 16
  342. addu a2, a2, v0
  343. LONG_L v0, 0(a2)
  344. beqz v0, 0f
  345. /*
  346. * If we have a queue, provoke dispatch within the VPE by setting C_SW1
  347. */
  348. mfc0 v0, CP0_CAUSE
  349. ori v0, v0, C_SW1
  350. mtc0 v0, CP0_CAUSE
  351. 0:
  352. /*
  353. * This test should really never branch but
  354. * let's be prudent here. Having atomized
  355. * the shared register modifications, we can
  356. * now EVPE, and must do so before interrupts
  357. * are potentially re-enabled.
  358. */
  359. andi a1, a1, MVPCONTROL_EVP
  360. beqz a1, 1f
  361. evpe
  362. 1:
  363. /* We know that TCStatua.IXMT should be set from above */
  364. xori a0, a0, TCSTATUS_IXMT
  365. or a0, a0, v1
  366. mtc0 a0, CP0_TCSTATUS
  367. _ehb
  368. .set mips0
  369. #endif /* CONFIG_MIPS_MT_SMTC */
  370. LONG_L v1, PT_EPC(sp)
  371. MTC0 v1, CP0_EPC
  372. LONG_L $31, PT_R31(sp)
  373. LONG_L $28, PT_R28(sp)
  374. LONG_L $25, PT_R25(sp)
  375. #ifdef CONFIG_64BIT
  376. LONG_L $8, PT_R8(sp)
  377. LONG_L $9, PT_R9(sp)
  378. #endif
  379. LONG_L $7, PT_R7(sp)
  380. LONG_L $6, PT_R6(sp)
  381. LONG_L $5, PT_R5(sp)
  382. LONG_L $4, PT_R4(sp)
  383. LONG_L $3, PT_R3(sp)
  384. LONG_L $2, PT_R2(sp)
  385. .set pop
  386. .endm
  387. .macro RESTORE_SP_AND_RET
  388. LONG_L sp, PT_R29(sp)
  389. .set mips3
  390. eret
  391. .set mips0
  392. .endm
  393. #endif
  394. .macro RESTORE_SP
  395. LONG_L sp, PT_R29(sp)
  396. .endm
  397. .macro RESTORE_ALL
  398. RESTORE_TEMP
  399. RESTORE_STATIC
  400. RESTORE_AT
  401. RESTORE_SOME
  402. RESTORE_SP
  403. .endm
  404. .macro RESTORE_ALL_AND_RET
  405. RESTORE_TEMP
  406. RESTORE_STATIC
  407. RESTORE_AT
  408. RESTORE_SOME
  409. RESTORE_SP_AND_RET
  410. .endm
  411. /*
  412. * Move to kernel mode and disable interrupts.
  413. * Set cp0 enable bit as sign that we're running on the kernel stack
  414. */
  415. .macro CLI
  416. #if !defined(CONFIG_MIPS_MT_SMTC)
  417. mfc0 t0, CP0_STATUS
  418. li t1, ST0_CU0 | STATMASK
  419. or t0, t1
  420. xori t0, STATMASK
  421. mtc0 t0, CP0_STATUS
  422. #else /* CONFIG_MIPS_MT_SMTC */
  423. /*
  424. * For SMTC, we need to set privilege
  425. * and disable interrupts only for the
  426. * current TC, using the TCStatus register.
  427. */
  428. mfc0 t0, CP0_TCSTATUS
  429. /* Fortunately CU 0 is in the same place in both registers */
  430. /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
  431. li t1, ST0_CU0 | 0x08001c00
  432. or t0, t1
  433. /* Clear TKSU, leave IXMT */
  434. xori t0, 0x00001800
  435. mtc0 t0, CP0_TCSTATUS
  436. _ehb
  437. /* We need to leave the global IE bit set, but clear EXL...*/
  438. mfc0 t0, CP0_STATUS
  439. ori t0, ST0_EXL | ST0_ERL
  440. xori t0, ST0_EXL | ST0_ERL
  441. mtc0 t0, CP0_STATUS
  442. #endif /* CONFIG_MIPS_MT_SMTC */
  443. irq_disable_hazard
  444. .endm
  445. /*
  446. * Move to kernel mode and enable interrupts.
  447. * Set cp0 enable bit as sign that we're running on the kernel stack
  448. */
  449. .macro STI
  450. #if !defined(CONFIG_MIPS_MT_SMTC)
  451. mfc0 t0, CP0_STATUS
  452. li t1, ST0_CU0 | STATMASK
  453. or t0, t1
  454. xori t0, STATMASK & ~1
  455. mtc0 t0, CP0_STATUS
  456. #else /* CONFIG_MIPS_MT_SMTC */
  457. /*
  458. * For SMTC, we need to set privilege
  459. * and enable interrupts only for the
  460. * current TC, using the TCStatus register.
  461. */
  462. _ehb
  463. mfc0 t0, CP0_TCSTATUS
  464. /* Fortunately CU 0 is in the same place in both registers */
  465. /* Set TCU0, TKSU (for later inversion) and IXMT */
  466. li t1, ST0_CU0 | 0x08001c00
  467. or t0, t1
  468. /* Clear TKSU *and* IXMT */
  469. xori t0, 0x00001c00
  470. mtc0 t0, CP0_TCSTATUS
  471. _ehb
  472. /* We need to leave the global IE bit set, but clear EXL...*/
  473. mfc0 t0, CP0_STATUS
  474. ori t0, ST0_EXL
  475. xori t0, ST0_EXL
  476. mtc0 t0, CP0_STATUS
  477. /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
  478. #endif /* CONFIG_MIPS_MT_SMTC */
  479. irq_enable_hazard
  480. .endm
  481. /*
  482. * Just move to kernel mode and leave interrupts as they are. Note
  483. * for the R3000 this means copying the previous enable from IEp.
  484. * Set cp0 enable bit as sign that we're running on the kernel stack
  485. */
  486. .macro KMODE
  487. #ifdef CONFIG_MIPS_MT_SMTC
  488. /*
  489. * This gets baroque in SMTC. We want to
  490. * protect the non-atomic clearing of EXL
  491. * with DMT/EMT, but we don't want to take
  492. * an interrupt while DMT is still in effect.
  493. */
  494. /* KMODE gets invoked from both reorder and noreorder code */
  495. .set push
  496. .set mips32r2
  497. .set noreorder
  498. mfc0 v0, CP0_TCSTATUS
  499. andi v1, v0, TCSTATUS_IXMT
  500. ori v0, TCSTATUS_IXMT
  501. mtc0 v0, CP0_TCSTATUS
  502. _ehb
  503. DMT 2 # dmt v0
  504. /*
  505. * We don't know a priori if ra is "live"
  506. */
  507. move t0, ra
  508. jal mips_ihb
  509. nop /* delay slot */
  510. move ra, t0
  511. #endif /* CONFIG_MIPS_MT_SMTC */
  512. mfc0 t0, CP0_STATUS
  513. li t1, ST0_CU0 | (STATMASK & ~1)
  514. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  515. andi t2, t0, ST0_IEP
  516. srl t2, 2
  517. or t0, t2
  518. #endif
  519. or t0, t1
  520. xori t0, STATMASK & ~1
  521. mtc0 t0, CP0_STATUS
  522. #ifdef CONFIG_MIPS_MT_SMTC
  523. _ehb
  524. andi v0, v0, VPECONTROL_TE
  525. beqz v0, 2f
  526. nop /* delay slot */
  527. emt
  528. 2:
  529. mfc0 v0, CP0_TCSTATUS
  530. /* Clear IXMT, then OR in previous value */
  531. ori v0, TCSTATUS_IXMT
  532. xori v0, TCSTATUS_IXMT
  533. or v0, v1, v0
  534. mtc0 v0, CP0_TCSTATUS
  535. /*
  536. * irq_disable_hazard below should expand to EHB
  537. * on 24K/34K CPUS
  538. */
  539. .set pop
  540. #endif /* CONFIG_MIPS_MT_SMTC */
  541. irq_disable_hazard
  542. .endm
  543. #endif /* _ASM_STACKFRAME_H */