stackframe.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. /*
  19. * For SMTC kernel, global IE should be left set, and interrupts
  20. * controlled exclusively via IXMT.
  21. */
  22. #ifdef CONFIG_MIPS_MT_SMTC
  23. #define STATMASK 0x1e
  24. #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  25. #define STATMASK 0x3f
  26. #else
  27. #define STATMASK 0x1f
  28. #endif
  29. #ifdef CONFIG_MIPS_MT_SMTC
  30. #include <asm/mipsmtregs.h>
  31. #endif /* CONFIG_MIPS_MT_SMTC */
  32. .macro SAVE_AT
  33. .set push
  34. .set noat
  35. LONG_S $1, PT_R1(sp)
  36. .set pop
  37. .endm
  38. .macro SAVE_TEMP
  39. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  40. mflhxu v1
  41. LONG_S v1, PT_LO(sp)
  42. mflhxu v1
  43. LONG_S v1, PT_HI(sp)
  44. mflhxu v1
  45. LONG_S v1, PT_ACX(sp)
  46. #else
  47. mfhi v1
  48. #endif
  49. #ifdef CONFIG_32BIT
  50. LONG_S $8, PT_R8(sp)
  51. LONG_S $9, PT_R9(sp)
  52. #endif
  53. LONG_S $10, PT_R10(sp)
  54. LONG_S $11, PT_R11(sp)
  55. LONG_S $12, PT_R12(sp)
  56. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  57. LONG_S v1, PT_HI(sp)
  58. mflo v1
  59. #endif
  60. LONG_S $13, PT_R13(sp)
  61. LONG_S $14, PT_R14(sp)
  62. LONG_S $15, PT_R15(sp)
  63. LONG_S $24, PT_R24(sp)
  64. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  65. LONG_S v1, PT_LO(sp)
  66. #endif
  67. .endm
  68. .macro SAVE_STATIC
  69. LONG_S $16, PT_R16(sp)
  70. LONG_S $17, PT_R17(sp)
  71. LONG_S $18, PT_R18(sp)
  72. LONG_S $19, PT_R19(sp)
  73. LONG_S $20, PT_R20(sp)
  74. LONG_S $21, PT_R21(sp)
  75. LONG_S $22, PT_R22(sp)
  76. LONG_S $23, PT_R23(sp)
  77. LONG_S $30, PT_R30(sp)
  78. .endm
  79. #ifdef CONFIG_SMP
  80. #ifdef CONFIG_MIPS_MT_SMTC
  81. #define PTEBASE_SHIFT 19 /* TCBIND */
  82. #else
  83. #define PTEBASE_SHIFT 23 /* CONTEXT */
  84. #endif
  85. .macro get_saved_sp /* SMP variation */
  86. #ifdef CONFIG_MIPS_MT_SMTC
  87. mfc0 k0, CP0_TCBIND
  88. #else
  89. MFC0 k0, CP0_CONTEXT
  90. #endif
  91. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  92. lui k1, %hi(kernelsp)
  93. #else
  94. lui k1, %highest(kernelsp)
  95. daddiu k1, %higher(kernelsp)
  96. dsll k1, 16
  97. daddiu k1, %hi(kernelsp)
  98. dsll k1, 16
  99. #endif
  100. LONG_SRL k0, PTEBASE_SHIFT
  101. LONG_ADDU k1, k0
  102. LONG_L k1, %lo(kernelsp)(k1)
  103. .endm
  104. .macro set_saved_sp stackp temp temp2
  105. #ifdef CONFIG_MIPS_MT_SMTC
  106. mfc0 \temp, CP0_TCBIND
  107. #else
  108. MFC0 \temp, CP0_CONTEXT
  109. #endif
  110. LONG_SRL \temp, PTEBASE_SHIFT
  111. LONG_S \stackp, kernelsp(\temp)
  112. .endm
  113. #else
  114. .macro get_saved_sp /* Uniprocessor variation */
  115. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  116. lui k1, %hi(kernelsp)
  117. #else
  118. lui k1, %highest(kernelsp)
  119. daddiu k1, %higher(kernelsp)
  120. dsll k1, k1, 16
  121. daddiu k1, %hi(kernelsp)
  122. dsll k1, k1, 16
  123. #endif
  124. LONG_L k1, %lo(kernelsp)(k1)
  125. .endm
  126. .macro set_saved_sp stackp temp temp2
  127. LONG_S \stackp, kernelsp
  128. .endm
  129. #endif
  130. .macro SAVE_SOME
  131. .set push
  132. .set noat
  133. .set reorder
  134. mfc0 k0, CP0_STATUS
  135. sll k0, 3 /* extract cu0 bit */
  136. .set noreorder
  137. bltz k0, 8f
  138. move k1, sp
  139. .set reorder
  140. /* Called from user mode, new stack. */
  141. get_saved_sp
  142. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  143. 8: move k0, sp
  144. PTR_SUBU sp, k1, PT_SIZE
  145. #else
  146. .set at=k0
  147. 8: PTR_SUBU k1, PT_SIZE
  148. .set noat
  149. move k0, sp
  150. move sp, k1
  151. #endif
  152. LONG_S k0, PT_R29(sp)
  153. LONG_S $3, PT_R3(sp)
  154. /*
  155. * You might think that you don't need to save $0,
  156. * but the FPU emulator and gdb remote debug stub
  157. * need it to operate correctly
  158. */
  159. LONG_S $0, PT_R0(sp)
  160. mfc0 v1, CP0_STATUS
  161. LONG_S $2, PT_R2(sp)
  162. #ifdef CONFIG_MIPS_MT_SMTC
  163. /*
  164. * Ideally, these instructions would be shuffled in
  165. * to cover the pipeline delay.
  166. */
  167. .set mips32
  168. mfc0 v1, CP0_TCSTATUS
  169. .set mips0
  170. LONG_S v1, PT_TCSTATUS(sp)
  171. #endif /* CONFIG_MIPS_MT_SMTC */
  172. LONG_S $4, PT_R4(sp)
  173. LONG_S $5, PT_R5(sp)
  174. LONG_S v1, PT_STATUS(sp)
  175. mfc0 v1, CP0_CAUSE
  176. LONG_S $6, PT_R6(sp)
  177. LONG_S $7, PT_R7(sp)
  178. LONG_S v1, PT_CAUSE(sp)
  179. MFC0 v1, CP0_EPC
  180. #ifdef CONFIG_64BIT
  181. LONG_S $8, PT_R8(sp)
  182. LONG_S $9, PT_R9(sp)
  183. #endif
  184. LONG_S $25, PT_R25(sp)
  185. LONG_S $28, PT_R28(sp)
  186. LONG_S $31, PT_R31(sp)
  187. LONG_S v1, PT_EPC(sp)
  188. ori $28, sp, _THREAD_MASK
  189. xori $28, _THREAD_MASK
  190. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  191. .set mips64
  192. pref 0, 0($28) /* Prefetch the current pointer */
  193. pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
  194. /* The Octeon multiplier state is affected by general multiply
  195. instructions. It must be saved before and kernel code might
  196. corrupt it */
  197. jal octeon_mult_save
  198. LONG_L v1, 0($28) /* Load the current pointer */
  199. /* Restore $31(ra) that was changed by the jal */
  200. LONG_L ra, PT_R31(sp)
  201. pref 0, 0(v1) /* Prefetch the current thread */
  202. #endif
  203. .set pop
  204. .endm
  205. .macro SAVE_ALL
  206. SAVE_SOME
  207. SAVE_AT
  208. SAVE_TEMP
  209. SAVE_STATIC
  210. .endm
  211. .macro RESTORE_AT
  212. .set push
  213. .set noat
  214. LONG_L $1, PT_R1(sp)
  215. .set pop
  216. .endm
  217. .macro RESTORE_TEMP
  218. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  219. LONG_L $24, PT_ACX(sp)
  220. mtlhx $24
  221. LONG_L $24, PT_HI(sp)
  222. mtlhx $24
  223. LONG_L $24, PT_LO(sp)
  224. mtlhx $24
  225. #else
  226. LONG_L $24, PT_LO(sp)
  227. mtlo $24
  228. LONG_L $24, PT_HI(sp)
  229. mthi $24
  230. #endif
  231. #ifdef CONFIG_32BIT
  232. LONG_L $8, PT_R8(sp)
  233. LONG_L $9, PT_R9(sp)
  234. #endif
  235. LONG_L $10, PT_R10(sp)
  236. LONG_L $11, PT_R11(sp)
  237. LONG_L $12, PT_R12(sp)
  238. LONG_L $13, PT_R13(sp)
  239. LONG_L $14, PT_R14(sp)
  240. LONG_L $15, PT_R15(sp)
  241. LONG_L $24, PT_R24(sp)
  242. .endm
  243. .macro RESTORE_STATIC
  244. LONG_L $16, PT_R16(sp)
  245. LONG_L $17, PT_R17(sp)
  246. LONG_L $18, PT_R18(sp)
  247. LONG_L $19, PT_R19(sp)
  248. LONG_L $20, PT_R20(sp)
  249. LONG_L $21, PT_R21(sp)
  250. LONG_L $22, PT_R22(sp)
  251. LONG_L $23, PT_R23(sp)
  252. LONG_L $30, PT_R30(sp)
  253. .endm
  254. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  255. .macro RESTORE_SOME
  256. .set push
  257. .set reorder
  258. .set noat
  259. mfc0 a0, CP0_STATUS
  260. li v1, 0xff00
  261. ori a0, STATMASK
  262. xori a0, STATMASK
  263. mtc0 a0, CP0_STATUS
  264. and a0, v1
  265. LONG_L v0, PT_STATUS(sp)
  266. nor v1, $0, v1
  267. and v0, v1
  268. or v0, a0
  269. mtc0 v0, CP0_STATUS
  270. LONG_L $31, PT_R31(sp)
  271. LONG_L $28, PT_R28(sp)
  272. LONG_L $25, PT_R25(sp)
  273. LONG_L $7, PT_R7(sp)
  274. LONG_L $6, PT_R6(sp)
  275. LONG_L $5, PT_R5(sp)
  276. LONG_L $4, PT_R4(sp)
  277. LONG_L $3, PT_R3(sp)
  278. LONG_L $2, PT_R2(sp)
  279. .set pop
  280. .endm
  281. .macro RESTORE_SP_AND_RET
  282. .set push
  283. .set noreorder
  284. LONG_L k0, PT_EPC(sp)
  285. LONG_L sp, PT_R29(sp)
  286. jr k0
  287. rfe
  288. .set pop
  289. .endm
  290. #else
  291. .macro RESTORE_SOME
  292. .set push
  293. .set reorder
  294. .set noat
  295. #ifdef CONFIG_MIPS_MT_SMTC
  296. .set mips32r2
  297. /*
  298. * We need to make sure the read-modify-write
  299. * of Status below isn't perturbed by an interrupt
  300. * or cross-TC access, so we need to do at least a DMT,
  301. * protected by an interrupt-inhibit. But setting IXMT
  302. * also creates a few-cycle window where an IPI could
  303. * be queued and not be detected before potentially
  304. * returning to a WAIT or user-mode loop. It must be
  305. * replayed.
  306. *
  307. * We're in the middle of a context switch, and
  308. * we can't dispatch it directly without trashing
  309. * some registers, so we'll try to detect this unlikely
  310. * case and program a software interrupt in the VPE,
  311. * as would be done for a cross-VPE IPI. To accomodate
  312. * the handling of that case, we're doing a DVPE instead
  313. * of just a DMT here to protect against other threads.
  314. * This is a lot of cruft to cover a tiny window.
  315. * If you can find a better design, implement it!
  316. *
  317. */
  318. mfc0 v0, CP0_TCSTATUS
  319. ori v0, TCSTATUS_IXMT
  320. mtc0 v0, CP0_TCSTATUS
  321. _ehb
  322. DVPE 5 # dvpe a1
  323. jal mips_ihb
  324. #endif /* CONFIG_MIPS_MT_SMTC */
  325. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  326. /* Restore the Octeon multiplier state */
  327. jal octeon_mult_restore
  328. #endif
  329. mfc0 a0, CP0_STATUS
  330. ori a0, STATMASK
  331. xori a0, STATMASK
  332. mtc0 a0, CP0_STATUS
  333. li v1, 0xff00
  334. and a0, v1
  335. LONG_L v0, PT_STATUS(sp)
  336. nor v1, $0, v1
  337. and v0, v1
  338. or v0, a0
  339. mtc0 v0, CP0_STATUS
  340. #ifdef CONFIG_MIPS_MT_SMTC
  341. /*
  342. * Only after EXL/ERL have been restored to status can we
  343. * restore TCStatus.IXMT.
  344. */
  345. LONG_L v1, PT_TCSTATUS(sp)
  346. _ehb
  347. mfc0 a0, CP0_TCSTATUS
  348. andi v1, TCSTATUS_IXMT
  349. bnez v1, 0f
  350. /*
  351. * We'd like to detect any IPIs queued in the tiny window
  352. * above and request an software interrupt to service them
  353. * when we ERET.
  354. *
  355. * Computing the offset into the IPIQ array of the executing
  356. * TC's IPI queue in-line would be tedious. We use part of
  357. * the TCContext register to hold 16 bits of offset that we
  358. * can add in-line to find the queue head.
  359. */
  360. mfc0 v0, CP0_TCCONTEXT
  361. la a2, IPIQ
  362. srl v0, v0, 16
  363. addu a2, a2, v0
  364. LONG_L v0, 0(a2)
  365. beqz v0, 0f
  366. /*
  367. * If we have a queue, provoke dispatch within the VPE by setting C_SW1
  368. */
  369. mfc0 v0, CP0_CAUSE
  370. ori v0, v0, C_SW1
  371. mtc0 v0, CP0_CAUSE
  372. 0:
  373. /*
  374. * This test should really never branch but
  375. * let's be prudent here. Having atomized
  376. * the shared register modifications, we can
  377. * now EVPE, and must do so before interrupts
  378. * are potentially re-enabled.
  379. */
  380. andi a1, a1, MVPCONTROL_EVP
  381. beqz a1, 1f
  382. evpe
  383. 1:
  384. /* We know that TCStatua.IXMT should be set from above */
  385. xori a0, a0, TCSTATUS_IXMT
  386. or a0, a0, v1
  387. mtc0 a0, CP0_TCSTATUS
  388. _ehb
  389. .set mips0
  390. #endif /* CONFIG_MIPS_MT_SMTC */
  391. LONG_L v1, PT_EPC(sp)
  392. MTC0 v1, CP0_EPC
  393. LONG_L $31, PT_R31(sp)
  394. LONG_L $28, PT_R28(sp)
  395. LONG_L $25, PT_R25(sp)
  396. #ifdef CONFIG_64BIT
  397. LONG_L $8, PT_R8(sp)
  398. LONG_L $9, PT_R9(sp)
  399. #endif
  400. LONG_L $7, PT_R7(sp)
  401. LONG_L $6, PT_R6(sp)
  402. LONG_L $5, PT_R5(sp)
  403. LONG_L $4, PT_R4(sp)
  404. LONG_L $3, PT_R3(sp)
  405. LONG_L $2, PT_R2(sp)
  406. .set pop
  407. .endm
  408. .macro RESTORE_SP_AND_RET
  409. LONG_L sp, PT_R29(sp)
  410. .set mips3
  411. eret
  412. .set mips0
  413. .endm
  414. #endif
  415. .macro RESTORE_SP
  416. LONG_L sp, PT_R29(sp)
  417. .endm
  418. .macro RESTORE_ALL
  419. RESTORE_TEMP
  420. RESTORE_STATIC
  421. RESTORE_AT
  422. RESTORE_SOME
  423. RESTORE_SP
  424. .endm
  425. .macro RESTORE_ALL_AND_RET
  426. RESTORE_TEMP
  427. RESTORE_STATIC
  428. RESTORE_AT
  429. RESTORE_SOME
  430. RESTORE_SP_AND_RET
  431. .endm
  432. /*
  433. * Move to kernel mode and disable interrupts.
  434. * Set cp0 enable bit as sign that we're running on the kernel stack
  435. */
  436. .macro CLI
  437. #if !defined(CONFIG_MIPS_MT_SMTC)
  438. mfc0 t0, CP0_STATUS
  439. li t1, ST0_CU0 | STATMASK
  440. or t0, t1
  441. xori t0, STATMASK
  442. mtc0 t0, CP0_STATUS
  443. #else /* CONFIG_MIPS_MT_SMTC */
  444. /*
  445. * For SMTC, we need to set privilege
  446. * and disable interrupts only for the
  447. * current TC, using the TCStatus register.
  448. */
  449. mfc0 t0, CP0_TCSTATUS
  450. /* Fortunately CU 0 is in the same place in both registers */
  451. /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
  452. li t1, ST0_CU0 | 0x08001c00
  453. or t0, t1
  454. /* Clear TKSU, leave IXMT */
  455. xori t0, 0x00001800
  456. mtc0 t0, CP0_TCSTATUS
  457. _ehb
  458. /* We need to leave the global IE bit set, but clear EXL...*/
  459. mfc0 t0, CP0_STATUS
  460. ori t0, ST0_EXL | ST0_ERL
  461. xori t0, ST0_EXL | ST0_ERL
  462. mtc0 t0, CP0_STATUS
  463. #endif /* CONFIG_MIPS_MT_SMTC */
  464. irq_disable_hazard
  465. .endm
  466. /*
  467. * Move to kernel mode and enable interrupts.
  468. * Set cp0 enable bit as sign that we're running on the kernel stack
  469. */
  470. .macro STI
  471. #if !defined(CONFIG_MIPS_MT_SMTC)
  472. mfc0 t0, CP0_STATUS
  473. li t1, ST0_CU0 | STATMASK
  474. or t0, t1
  475. xori t0, STATMASK & ~1
  476. mtc0 t0, CP0_STATUS
  477. #else /* CONFIG_MIPS_MT_SMTC */
  478. /*
  479. * For SMTC, we need to set privilege
  480. * and enable interrupts only for the
  481. * current TC, using the TCStatus register.
  482. */
  483. _ehb
  484. mfc0 t0, CP0_TCSTATUS
  485. /* Fortunately CU 0 is in the same place in both registers */
  486. /* Set TCU0, TKSU (for later inversion) and IXMT */
  487. li t1, ST0_CU0 | 0x08001c00
  488. or t0, t1
  489. /* Clear TKSU *and* IXMT */
  490. xori t0, 0x00001c00
  491. mtc0 t0, CP0_TCSTATUS
  492. _ehb
  493. /* We need to leave the global IE bit set, but clear EXL...*/
  494. mfc0 t0, CP0_STATUS
  495. ori t0, ST0_EXL
  496. xori t0, ST0_EXL
  497. mtc0 t0, CP0_STATUS
  498. /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
  499. #endif /* CONFIG_MIPS_MT_SMTC */
  500. irq_enable_hazard
  501. .endm
  502. /*
  503. * Just move to kernel mode and leave interrupts as they are. Note
  504. * for the R3000 this means copying the previous enable from IEp.
  505. * Set cp0 enable bit as sign that we're running on the kernel stack
  506. */
  507. .macro KMODE
  508. #ifdef CONFIG_MIPS_MT_SMTC
  509. /*
  510. * This gets baroque in SMTC. We want to
  511. * protect the non-atomic clearing of EXL
  512. * with DMT/EMT, but we don't want to take
  513. * an interrupt while DMT is still in effect.
  514. */
  515. /* KMODE gets invoked from both reorder and noreorder code */
  516. .set push
  517. .set mips32r2
  518. .set noreorder
  519. mfc0 v0, CP0_TCSTATUS
  520. andi v1, v0, TCSTATUS_IXMT
  521. ori v0, TCSTATUS_IXMT
  522. mtc0 v0, CP0_TCSTATUS
  523. _ehb
  524. DMT 2 # dmt v0
  525. /*
  526. * We don't know a priori if ra is "live"
  527. */
  528. move t0, ra
  529. jal mips_ihb
  530. nop /* delay slot */
  531. move ra, t0
  532. #endif /* CONFIG_MIPS_MT_SMTC */
  533. mfc0 t0, CP0_STATUS
  534. li t1, ST0_CU0 | (STATMASK & ~1)
  535. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  536. andi t2, t0, ST0_IEP
  537. srl t2, 2
  538. or t0, t2
  539. #endif
  540. or t0, t1
  541. xori t0, STATMASK & ~1
  542. mtc0 t0, CP0_STATUS
  543. #ifdef CONFIG_MIPS_MT_SMTC
  544. _ehb
  545. andi v0, v0, VPECONTROL_TE
  546. beqz v0, 2f
  547. nop /* delay slot */
  548. emt
  549. 2:
  550. mfc0 v0, CP0_TCSTATUS
  551. /* Clear IXMT, then OR in previous value */
  552. ori v0, TCSTATUS_IXMT
  553. xori v0, TCSTATUS_IXMT
  554. or v0, v1, v0
  555. mtc0 v0, CP0_TCSTATUS
  556. /*
  557. * irq_disable_hazard below should expand to EHB
  558. * on 24K/34K CPUS
  559. */
  560. .set pop
  561. #endif /* CONFIG_MIPS_MT_SMTC */
  562. irq_disable_hazard
  563. .endm
  564. #endif /* _ASM_STACKFRAME_H */