stackframe.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. /*
  19. * For SMTC kernel, global IE should be left set, and interrupts
  20. * controlled exclusively via IXMT.
  21. */
  22. #ifdef CONFIG_MIPS_MT_SMTC
  23. #define STATMASK 0x1e
  24. #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  25. #define STATMASK 0x3f
  26. #else
  27. #define STATMASK 0x1f
  28. #endif
  29. #ifdef CONFIG_MIPS_MT_SMTC
  30. #include <asm/mipsmtregs.h>
  31. #endif /* CONFIG_MIPS_MT_SMTC */
  32. .macro SAVE_AT
  33. .set push
  34. .set noat
  35. LONG_S $1, PT_R1(sp)
  36. .set pop
  37. .endm
  38. .macro SAVE_TEMP
  39. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  40. mflhxu v1
  41. LONG_S v1, PT_LO(sp)
  42. mflhxu v1
  43. LONG_S v1, PT_HI(sp)
  44. mflhxu v1
  45. LONG_S v1, PT_ACX(sp)
  46. #else
  47. mfhi v1
  48. LONG_S v1, PT_HI(sp)
  49. mflo v1
  50. LONG_S v1, PT_LO(sp)
  51. #endif
  52. #ifdef CONFIG_32BIT
  53. LONG_S $8, PT_R8(sp)
  54. LONG_S $9, PT_R9(sp)
  55. #endif
  56. LONG_S $10, PT_R10(sp)
  57. LONG_S $11, PT_R11(sp)
  58. LONG_S $12, PT_R12(sp)
  59. LONG_S $13, PT_R13(sp)
  60. LONG_S $14, PT_R14(sp)
  61. LONG_S $15, PT_R15(sp)
  62. LONG_S $24, PT_R24(sp)
  63. .endm
  64. .macro SAVE_STATIC
  65. LONG_S $16, PT_R16(sp)
  66. LONG_S $17, PT_R17(sp)
  67. LONG_S $18, PT_R18(sp)
  68. LONG_S $19, PT_R19(sp)
  69. LONG_S $20, PT_R20(sp)
  70. LONG_S $21, PT_R21(sp)
  71. LONG_S $22, PT_R22(sp)
  72. LONG_S $23, PT_R23(sp)
  73. LONG_S $30, PT_R30(sp)
  74. .endm
  75. #ifdef CONFIG_SMP
  76. #ifdef CONFIG_MIPS_MT_SMTC
  77. #define PTEBASE_SHIFT 19 /* TCBIND */
  78. #else
  79. #define PTEBASE_SHIFT 23 /* CONTEXT */
  80. #endif
  81. .macro get_saved_sp /* SMP variation */
  82. #ifdef CONFIG_MIPS_MT_SMTC
  83. mfc0 k0, CP0_TCBIND
  84. #else
  85. MFC0 k0, CP0_CONTEXT
  86. #endif
  87. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  88. lui k1, %hi(kernelsp)
  89. #else
  90. lui k1, %highest(kernelsp)
  91. daddiu k1, %higher(kernelsp)
  92. dsll k1, 16
  93. daddiu k1, %hi(kernelsp)
  94. dsll k1, 16
  95. #endif
  96. LONG_SRL k0, PTEBASE_SHIFT
  97. LONG_ADDU k1, k0
  98. LONG_L k1, %lo(kernelsp)(k1)
  99. .endm
  100. .macro set_saved_sp stackp temp temp2
  101. #ifdef CONFIG_MIPS_MT_SMTC
  102. mfc0 \temp, CP0_TCBIND
  103. #else
  104. MFC0 \temp, CP0_CONTEXT
  105. #endif
  106. LONG_SRL \temp, PTEBASE_SHIFT
  107. LONG_S \stackp, kernelsp(\temp)
  108. .endm
  109. #else
  110. .macro get_saved_sp /* Uniprocessor variation */
  111. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  112. lui k1, %hi(kernelsp)
  113. #else
  114. lui k1, %highest(kernelsp)
  115. daddiu k1, %higher(kernelsp)
  116. dsll k1, k1, 16
  117. daddiu k1, %hi(kernelsp)
  118. dsll k1, k1, 16
  119. #endif
  120. LONG_L k1, %lo(kernelsp)(k1)
  121. .endm
  122. .macro set_saved_sp stackp temp temp2
  123. LONG_S \stackp, kernelsp
  124. .endm
  125. #endif
  126. .macro SAVE_SOME
  127. .set push
  128. .set noat
  129. .set reorder
  130. mfc0 k0, CP0_STATUS
  131. sll k0, 3 /* extract cu0 bit */
  132. .set noreorder
  133. bltz k0, 8f
  134. move k1, sp
  135. .set reorder
  136. /* Called from user mode, new stack. */
  137. get_saved_sp
  138. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  139. 8: move k0, sp
  140. PTR_SUBU sp, k1, PT_SIZE
  141. #else
  142. .set at=k0
  143. 8: PTR_SUBU k1, PT_SIZE
  144. .set noat
  145. move k0, sp
  146. move sp, k1
  147. #endif
  148. LONG_S k0, PT_R29(sp)
  149. LONG_S $3, PT_R3(sp)
  150. /*
  151. * You might think that you don't need to save $0,
  152. * but the FPU emulator and gdb remote debug stub
  153. * need it to operate correctly
  154. */
  155. LONG_S $0, PT_R0(sp)
  156. mfc0 v1, CP0_STATUS
  157. LONG_S $2, PT_R2(sp)
  158. LONG_S v1, PT_STATUS(sp)
  159. #ifdef CONFIG_MIPS_MT_SMTC
  160. /*
  161. * Ideally, these instructions would be shuffled in
  162. * to cover the pipeline delay.
  163. */
  164. .set mips32
  165. mfc0 v1, CP0_TCSTATUS
  166. .set mips0
  167. LONG_S v1, PT_TCSTATUS(sp)
  168. #endif /* CONFIG_MIPS_MT_SMTC */
  169. LONG_S $4, PT_R4(sp)
  170. mfc0 v1, CP0_CAUSE
  171. LONG_S $5, PT_R5(sp)
  172. LONG_S v1, PT_CAUSE(sp)
  173. LONG_S $6, PT_R6(sp)
  174. MFC0 v1, CP0_EPC
  175. LONG_S $7, PT_R7(sp)
  176. #ifdef CONFIG_64BIT
  177. LONG_S $8, PT_R8(sp)
  178. LONG_S $9, PT_R9(sp)
  179. #endif
  180. LONG_S v1, PT_EPC(sp)
  181. LONG_S $25, PT_R25(sp)
  182. LONG_S $28, PT_R28(sp)
  183. LONG_S $31, PT_R31(sp)
  184. ori $28, sp, _THREAD_MASK
  185. xori $28, _THREAD_MASK
  186. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  187. .set mips64
  188. pref 0, 0($28) /* Prefetch the current pointer */
  189. pref 0, PT_R31(sp) /* Prefetch the $31(ra) */
  190. /* The Octeon multiplier state is affected by general multiply
  191. instructions. It must be saved before and kernel code might
  192. corrupt it */
  193. jal octeon_mult_save
  194. LONG_L v1, 0($28) /* Load the current pointer */
  195. /* Restore $31(ra) that was changed by the jal */
  196. LONG_L ra, PT_R31(sp)
  197. pref 0, 0(v1) /* Prefetch the current thread */
  198. #endif
  199. .set pop
  200. .endm
  201. .macro SAVE_ALL
  202. SAVE_SOME
  203. SAVE_AT
  204. SAVE_TEMP
  205. SAVE_STATIC
  206. .endm
  207. .macro RESTORE_AT
  208. .set push
  209. .set noat
  210. LONG_L $1, PT_R1(sp)
  211. .set pop
  212. .endm
  213. .macro RESTORE_TEMP
  214. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  215. LONG_L $24, PT_ACX(sp)
  216. mtlhx $24
  217. LONG_L $24, PT_HI(sp)
  218. mtlhx $24
  219. LONG_L $24, PT_LO(sp)
  220. mtlhx $24
  221. #else
  222. LONG_L $24, PT_LO(sp)
  223. mtlo $24
  224. LONG_L $24, PT_HI(sp)
  225. mthi $24
  226. #endif
  227. #ifdef CONFIG_32BIT
  228. LONG_L $8, PT_R8(sp)
  229. LONG_L $9, PT_R9(sp)
  230. #endif
  231. LONG_L $10, PT_R10(sp)
  232. LONG_L $11, PT_R11(sp)
  233. LONG_L $12, PT_R12(sp)
  234. LONG_L $13, PT_R13(sp)
  235. LONG_L $14, PT_R14(sp)
  236. LONG_L $15, PT_R15(sp)
  237. LONG_L $24, PT_R24(sp)
  238. .endm
  239. .macro RESTORE_STATIC
  240. LONG_L $16, PT_R16(sp)
  241. LONG_L $17, PT_R17(sp)
  242. LONG_L $18, PT_R18(sp)
  243. LONG_L $19, PT_R19(sp)
  244. LONG_L $20, PT_R20(sp)
  245. LONG_L $21, PT_R21(sp)
  246. LONG_L $22, PT_R22(sp)
  247. LONG_L $23, PT_R23(sp)
  248. LONG_L $30, PT_R30(sp)
  249. .endm
  250. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  251. .macro RESTORE_SOME
  252. .set push
  253. .set reorder
  254. .set noat
  255. mfc0 a0, CP0_STATUS
  256. li v1, 0xff00
  257. ori a0, STATMASK
  258. xori a0, STATMASK
  259. mtc0 a0, CP0_STATUS
  260. and a0, v1
  261. LONG_L v0, PT_STATUS(sp)
  262. nor v1, $0, v1
  263. and v0, v1
  264. or v0, a0
  265. mtc0 v0, CP0_STATUS
  266. LONG_L $31, PT_R31(sp)
  267. LONG_L $28, PT_R28(sp)
  268. LONG_L $25, PT_R25(sp)
  269. LONG_L $7, PT_R7(sp)
  270. LONG_L $6, PT_R6(sp)
  271. LONG_L $5, PT_R5(sp)
  272. LONG_L $4, PT_R4(sp)
  273. LONG_L $3, PT_R3(sp)
  274. LONG_L $2, PT_R2(sp)
  275. .set pop
  276. .endm
  277. .macro RESTORE_SP_AND_RET
  278. .set push
  279. .set noreorder
  280. LONG_L k0, PT_EPC(sp)
  281. LONG_L sp, PT_R29(sp)
  282. jr k0
  283. rfe
  284. .set pop
  285. .endm
  286. #else
  287. .macro RESTORE_SOME
  288. .set push
  289. .set reorder
  290. .set noat
  291. #ifdef CONFIG_MIPS_MT_SMTC
  292. .set mips32r2
  293. /*
  294. * We need to make sure the read-modify-write
  295. * of Status below isn't perturbed by an interrupt
  296. * or cross-TC access, so we need to do at least a DMT,
  297. * protected by an interrupt-inhibit. But setting IXMT
  298. * also creates a few-cycle window where an IPI could
  299. * be queued and not be detected before potentially
  300. * returning to a WAIT or user-mode loop. It must be
  301. * replayed.
  302. *
  303. * We're in the middle of a context switch, and
  304. * we can't dispatch it directly without trashing
  305. * some registers, so we'll try to detect this unlikely
  306. * case and program a software interrupt in the VPE,
  307. * as would be done for a cross-VPE IPI. To accomodate
  308. * the handling of that case, we're doing a DVPE instead
  309. * of just a DMT here to protect against other threads.
  310. * This is a lot of cruft to cover a tiny window.
  311. * If you can find a better design, implement it!
  312. *
  313. */
  314. mfc0 v0, CP0_TCSTATUS
  315. ori v0, TCSTATUS_IXMT
  316. mtc0 v0, CP0_TCSTATUS
  317. _ehb
  318. DVPE 5 # dvpe a1
  319. jal mips_ihb
  320. #endif /* CONFIG_MIPS_MT_SMTC */
  321. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  322. /* Restore the Octeon multiplier state */
  323. jal octeon_mult_restore
  324. #endif
  325. mfc0 a0, CP0_STATUS
  326. ori a0, STATMASK
  327. xori a0, STATMASK
  328. mtc0 a0, CP0_STATUS
  329. li v1, 0xff00
  330. and a0, v1
  331. LONG_L v0, PT_STATUS(sp)
  332. nor v1, $0, v1
  333. and v0, v1
  334. or v0, a0
  335. mtc0 v0, CP0_STATUS
  336. #ifdef CONFIG_MIPS_MT_SMTC
  337. /*
  338. * Only after EXL/ERL have been restored to status can we
  339. * restore TCStatus.IXMT.
  340. */
  341. LONG_L v1, PT_TCSTATUS(sp)
  342. _ehb
  343. mfc0 a0, CP0_TCSTATUS
  344. andi v1, TCSTATUS_IXMT
  345. bnez v1, 0f
  346. /*
  347. * We'd like to detect any IPIs queued in the tiny window
  348. * above and request an software interrupt to service them
  349. * when we ERET.
  350. *
  351. * Computing the offset into the IPIQ array of the executing
  352. * TC's IPI queue in-line would be tedious. We use part of
  353. * the TCContext register to hold 16 bits of offset that we
  354. * can add in-line to find the queue head.
  355. */
  356. mfc0 v0, CP0_TCCONTEXT
  357. la a2, IPIQ
  358. srl v0, v0, 16
  359. addu a2, a2, v0
  360. LONG_L v0, 0(a2)
  361. beqz v0, 0f
  362. /*
  363. * If we have a queue, provoke dispatch within the VPE by setting C_SW1
  364. */
  365. mfc0 v0, CP0_CAUSE
  366. ori v0, v0, C_SW1
  367. mtc0 v0, CP0_CAUSE
  368. 0:
  369. /*
  370. * This test should really never branch but
  371. * let's be prudent here. Having atomized
  372. * the shared register modifications, we can
  373. * now EVPE, and must do so before interrupts
  374. * are potentially re-enabled.
  375. */
  376. andi a1, a1, MVPCONTROL_EVP
  377. beqz a1, 1f
  378. evpe
  379. 1:
  380. /* We know that TCStatua.IXMT should be set from above */
  381. xori a0, a0, TCSTATUS_IXMT
  382. or a0, a0, v1
  383. mtc0 a0, CP0_TCSTATUS
  384. _ehb
  385. .set mips0
  386. #endif /* CONFIG_MIPS_MT_SMTC */
  387. LONG_L v1, PT_EPC(sp)
  388. MTC0 v1, CP0_EPC
  389. LONG_L $31, PT_R31(sp)
  390. LONG_L $28, PT_R28(sp)
  391. LONG_L $25, PT_R25(sp)
  392. #ifdef CONFIG_64BIT
  393. LONG_L $8, PT_R8(sp)
  394. LONG_L $9, PT_R9(sp)
  395. #endif
  396. LONG_L $7, PT_R7(sp)
  397. LONG_L $6, PT_R6(sp)
  398. LONG_L $5, PT_R5(sp)
  399. LONG_L $4, PT_R4(sp)
  400. LONG_L $3, PT_R3(sp)
  401. LONG_L $2, PT_R2(sp)
  402. .set pop
  403. .endm
  404. .macro RESTORE_SP_AND_RET
  405. LONG_L sp, PT_R29(sp)
  406. .set mips3
  407. eret
  408. .set mips0
  409. .endm
  410. #endif
  411. .macro RESTORE_SP
  412. LONG_L sp, PT_R29(sp)
  413. .endm
  414. .macro RESTORE_ALL
  415. RESTORE_TEMP
  416. RESTORE_STATIC
  417. RESTORE_AT
  418. RESTORE_SOME
  419. RESTORE_SP
  420. .endm
  421. .macro RESTORE_ALL_AND_RET
  422. RESTORE_TEMP
  423. RESTORE_STATIC
  424. RESTORE_AT
  425. RESTORE_SOME
  426. RESTORE_SP_AND_RET
  427. .endm
  428. /*
  429. * Move to kernel mode and disable interrupts.
  430. * Set cp0 enable bit as sign that we're running on the kernel stack
  431. */
  432. .macro CLI
  433. #if !defined(CONFIG_MIPS_MT_SMTC)
  434. mfc0 t0, CP0_STATUS
  435. li t1, ST0_CU0 | STATMASK
  436. or t0, t1
  437. xori t0, STATMASK
  438. mtc0 t0, CP0_STATUS
  439. #else /* CONFIG_MIPS_MT_SMTC */
  440. /*
  441. * For SMTC, we need to set privilege
  442. * and disable interrupts only for the
  443. * current TC, using the TCStatus register.
  444. */
  445. mfc0 t0, CP0_TCSTATUS
  446. /* Fortunately CU 0 is in the same place in both registers */
  447. /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
  448. li t1, ST0_CU0 | 0x08001c00
  449. or t0, t1
  450. /* Clear TKSU, leave IXMT */
  451. xori t0, 0x00001800
  452. mtc0 t0, CP0_TCSTATUS
  453. _ehb
  454. /* We need to leave the global IE bit set, but clear EXL...*/
  455. mfc0 t0, CP0_STATUS
  456. ori t0, ST0_EXL | ST0_ERL
  457. xori t0, ST0_EXL | ST0_ERL
  458. mtc0 t0, CP0_STATUS
  459. #endif /* CONFIG_MIPS_MT_SMTC */
  460. irq_disable_hazard
  461. .endm
  462. /*
  463. * Move to kernel mode and enable interrupts.
  464. * Set cp0 enable bit as sign that we're running on the kernel stack
  465. */
  466. .macro STI
  467. #if !defined(CONFIG_MIPS_MT_SMTC)
  468. mfc0 t0, CP0_STATUS
  469. li t1, ST0_CU0 | STATMASK
  470. or t0, t1
  471. xori t0, STATMASK & ~1
  472. mtc0 t0, CP0_STATUS
  473. #else /* CONFIG_MIPS_MT_SMTC */
  474. /*
  475. * For SMTC, we need to set privilege
  476. * and enable interrupts only for the
  477. * current TC, using the TCStatus register.
  478. */
  479. _ehb
  480. mfc0 t0, CP0_TCSTATUS
  481. /* Fortunately CU 0 is in the same place in both registers */
  482. /* Set TCU0, TKSU (for later inversion) and IXMT */
  483. li t1, ST0_CU0 | 0x08001c00
  484. or t0, t1
  485. /* Clear TKSU *and* IXMT */
  486. xori t0, 0x00001c00
  487. mtc0 t0, CP0_TCSTATUS
  488. _ehb
  489. /* We need to leave the global IE bit set, but clear EXL...*/
  490. mfc0 t0, CP0_STATUS
  491. ori t0, ST0_EXL
  492. xori t0, ST0_EXL
  493. mtc0 t0, CP0_STATUS
  494. /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
  495. #endif /* CONFIG_MIPS_MT_SMTC */
  496. irq_enable_hazard
  497. .endm
  498. /*
  499. * Just move to kernel mode and leave interrupts as they are. Note
  500. * for the R3000 this means copying the previous enable from IEp.
  501. * Set cp0 enable bit as sign that we're running on the kernel stack
  502. */
  503. .macro KMODE
  504. #ifdef CONFIG_MIPS_MT_SMTC
  505. /*
  506. * This gets baroque in SMTC. We want to
  507. * protect the non-atomic clearing of EXL
  508. * with DMT/EMT, but we don't want to take
  509. * an interrupt while DMT is still in effect.
  510. */
  511. /* KMODE gets invoked from both reorder and noreorder code */
  512. .set push
  513. .set mips32r2
  514. .set noreorder
  515. mfc0 v0, CP0_TCSTATUS
  516. andi v1, v0, TCSTATUS_IXMT
  517. ori v0, TCSTATUS_IXMT
  518. mtc0 v0, CP0_TCSTATUS
  519. _ehb
  520. DMT 2 # dmt v0
  521. /*
  522. * We don't know a priori if ra is "live"
  523. */
  524. move t0, ra
  525. jal mips_ihb
  526. nop /* delay slot */
  527. move ra, t0
  528. #endif /* CONFIG_MIPS_MT_SMTC */
  529. mfc0 t0, CP0_STATUS
  530. li t1, ST0_CU0 | (STATMASK & ~1)
  531. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  532. andi t2, t0, ST0_IEP
  533. srl t2, 2
  534. or t0, t2
  535. #endif
  536. or t0, t1
  537. xori t0, STATMASK & ~1
  538. mtc0 t0, CP0_STATUS
  539. #ifdef CONFIG_MIPS_MT_SMTC
  540. _ehb
  541. andi v0, v0, VPECONTROL_TE
  542. beqz v0, 2f
  543. nop /* delay slot */
  544. emt
  545. 2:
  546. mfc0 v0, CP0_TCSTATUS
  547. /* Clear IXMT, then OR in previous value */
  548. ori v0, TCSTATUS_IXMT
  549. xori v0, TCSTATUS_IXMT
  550. or v0, v1, v0
  551. mtc0 v0, CP0_TCSTATUS
  552. /*
  553. * irq_disable_hazard below should expand to EHB
  554. * on 24K/34K CPUS
  555. */
  556. .set pop
  557. #endif /* CONFIG_MIPS_MT_SMTC */
  558. irq_disable_hazard
  559. .endm
  560. #endif /* _ASM_STACKFRAME_H */