stackframe.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/thread_info.h>
  19. /*
  20. * For SMTC kernel, global IE should be left set, and interrupts
  21. * controlled exclusively via IXMT.
  22. */
  23. #ifdef CONFIG_MIPS_MT_SMTC
  24. #define STATMASK 0x1e
  25. #elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  26. #define STATMASK 0x3f
  27. #else
  28. #define STATMASK 0x1f
  29. #endif
  30. #ifdef CONFIG_MIPS_MT_SMTC
  31. #include <asm/mipsmtregs.h>
  32. #endif /* CONFIG_MIPS_MT_SMTC */
  33. .macro SAVE_AT
  34. .set push
  35. .set noat
  36. LONG_S $1, PT_R1(sp)
  37. .set pop
  38. .endm
  39. .macro SAVE_TEMP
  40. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  41. mflhxu v1
  42. LONG_S v1, PT_LO(sp)
  43. mflhxu v1
  44. LONG_S v1, PT_HI(sp)
  45. mflhxu v1
  46. LONG_S v1, PT_ACX(sp)
  47. #else
  48. mfhi v1
  49. #endif
  50. #ifdef CONFIG_32BIT
  51. LONG_S $8, PT_R8(sp)
  52. LONG_S $9, PT_R9(sp)
  53. #endif
  54. LONG_S $10, PT_R10(sp)
  55. LONG_S $11, PT_R11(sp)
  56. LONG_S $12, PT_R12(sp)
  57. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  58. LONG_S v1, PT_HI(sp)
  59. mflo v1
  60. #endif
  61. LONG_S $13, PT_R13(sp)
  62. LONG_S $14, PT_R14(sp)
  63. LONG_S $15, PT_R15(sp)
  64. LONG_S $24, PT_R24(sp)
  65. #ifndef CONFIG_CPU_HAS_SMARTMIPS
  66. LONG_S v1, PT_LO(sp)
  67. #endif
  68. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  69. /*
  70. * The Octeon multiplier state is affected by general
  71. * multiply instructions. It must be saved before and
  72. * kernel code might corrupt it
  73. */
  74. jal octeon_mult_save
  75. #endif
  76. .endm
  77. .macro SAVE_STATIC
  78. LONG_S $16, PT_R16(sp)
  79. LONG_S $17, PT_R17(sp)
  80. LONG_S $18, PT_R18(sp)
  81. LONG_S $19, PT_R19(sp)
  82. LONG_S $20, PT_R20(sp)
  83. LONG_S $21, PT_R21(sp)
  84. LONG_S $22, PT_R22(sp)
  85. LONG_S $23, PT_R23(sp)
  86. LONG_S $30, PT_R30(sp)
  87. .endm
  88. #ifdef CONFIG_SMP
  89. .macro get_saved_sp /* SMP variation */
  90. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  91. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  92. lui k1, %hi(kernelsp)
  93. #else
  94. lui k1, %highest(kernelsp)
  95. daddiu k1, %higher(kernelsp)
  96. dsll k1, 16
  97. daddiu k1, %hi(kernelsp)
  98. dsll k1, 16
  99. #endif
  100. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  101. LONG_ADDU k1, k0
  102. LONG_L k1, %lo(kernelsp)(k1)
  103. .endm
  104. .macro set_saved_sp stackp temp temp2
  105. ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG
  106. LONG_SRL \temp, SMP_CPUID_PTRSHIFT
  107. LONG_S \stackp, kernelsp(\temp)
  108. .endm
  109. #else /* !CONFIG_SMP */
  110. .macro get_saved_sp /* Uniprocessor variation */
  111. #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
  112. /*
  113. * Clear BTB (branch target buffer), forbid RAS (return address
  114. * stack) to workaround the Out-of-order Issue in Loongson2F
  115. * via its diagnostic register.
  116. */
  117. move k0, ra
  118. jal 1f
  119. nop
  120. 1: jal 1f
  121. nop
  122. 1: jal 1f
  123. nop
  124. 1: jal 1f
  125. nop
  126. 1: move ra, k0
  127. li k0, 3
  128. mtc0 k0, $22
  129. #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
  130. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  131. lui k1, %hi(kernelsp)
  132. #else
  133. lui k1, %highest(kernelsp)
  134. daddiu k1, %higher(kernelsp)
  135. dsll k1, k1, 16
  136. daddiu k1, %hi(kernelsp)
  137. dsll k1, k1, 16
  138. #endif
  139. LONG_L k1, %lo(kernelsp)(k1)
  140. .endm
  141. .macro set_saved_sp stackp temp temp2
  142. LONG_S \stackp, kernelsp
  143. .endm
  144. #endif
  145. .macro SAVE_SOME
  146. .set push
  147. .set noat
  148. .set reorder
  149. mfc0 k0, CP0_STATUS
  150. sll k0, 3 /* extract cu0 bit */
  151. .set noreorder
  152. bltz k0, 8f
  153. move k1, sp
  154. .set reorder
  155. /* Called from user mode, new stack. */
  156. get_saved_sp
  157. #ifndef CONFIG_CPU_DADDI_WORKAROUNDS
  158. 8: move k0, sp
  159. PTR_SUBU sp, k1, PT_SIZE
  160. #else
  161. .set at=k0
  162. 8: PTR_SUBU k1, PT_SIZE
  163. .set noat
  164. move k0, sp
  165. move sp, k1
  166. #endif
  167. LONG_S k0, PT_R29(sp)
  168. LONG_S $3, PT_R3(sp)
  169. /*
  170. * You might think that you don't need to save $0,
  171. * but the FPU emulator and gdb remote debug stub
  172. * need it to operate correctly
  173. */
  174. LONG_S $0, PT_R0(sp)
  175. mfc0 v1, CP0_STATUS
  176. LONG_S $2, PT_R2(sp)
  177. LONG_S v1, PT_STATUS(sp)
  178. #ifdef CONFIG_MIPS_MT_SMTC
  179. /*
  180. * Ideally, these instructions would be shuffled in
  181. * to cover the pipeline delay.
  182. */
  183. .set mips32
  184. mfc0 k0, CP0_TCSTATUS
  185. .set mips0
  186. LONG_S k0, PT_TCSTATUS(sp)
  187. #endif /* CONFIG_MIPS_MT_SMTC */
  188. LONG_S $4, PT_R4(sp)
  189. mfc0 v1, CP0_CAUSE
  190. LONG_S $5, PT_R5(sp)
  191. LONG_S v1, PT_CAUSE(sp)
  192. LONG_S $6, PT_R6(sp)
  193. MFC0 v1, CP0_EPC
  194. LONG_S $7, PT_R7(sp)
  195. #ifdef CONFIG_64BIT
  196. LONG_S $8, PT_R8(sp)
  197. LONG_S $9, PT_R9(sp)
  198. #endif
  199. LONG_S v1, PT_EPC(sp)
  200. LONG_S $25, PT_R25(sp)
  201. LONG_S $28, PT_R28(sp)
  202. LONG_S $31, PT_R31(sp)
  203. ori $28, sp, _THREAD_MASK
  204. xori $28, _THREAD_MASK
  205. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  206. .set mips64
  207. pref 0, 0($28) /* Prefetch the current pointer */
  208. #endif
  209. .set pop
  210. .endm
  211. .macro SAVE_ALL
  212. SAVE_SOME
  213. SAVE_AT
  214. SAVE_TEMP
  215. SAVE_STATIC
  216. .endm
  217. .macro RESTORE_AT
  218. .set push
  219. .set noat
  220. LONG_L $1, PT_R1(sp)
  221. .set pop
  222. .endm
  223. .macro RESTORE_TEMP
  224. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  225. /* Restore the Octeon multiplier state */
  226. jal octeon_mult_restore
  227. #endif
  228. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  229. LONG_L $24, PT_ACX(sp)
  230. mtlhx $24
  231. LONG_L $24, PT_HI(sp)
  232. mtlhx $24
  233. LONG_L $24, PT_LO(sp)
  234. mtlhx $24
  235. #else
  236. LONG_L $24, PT_LO(sp)
  237. mtlo $24
  238. LONG_L $24, PT_HI(sp)
  239. mthi $24
  240. #endif
  241. #ifdef CONFIG_32BIT
  242. LONG_L $8, PT_R8(sp)
  243. LONG_L $9, PT_R9(sp)
  244. #endif
  245. LONG_L $10, PT_R10(sp)
  246. LONG_L $11, PT_R11(sp)
  247. LONG_L $12, PT_R12(sp)
  248. LONG_L $13, PT_R13(sp)
  249. LONG_L $14, PT_R14(sp)
  250. LONG_L $15, PT_R15(sp)
  251. LONG_L $24, PT_R24(sp)
  252. .endm
  253. .macro RESTORE_STATIC
  254. LONG_L $16, PT_R16(sp)
  255. LONG_L $17, PT_R17(sp)
  256. LONG_L $18, PT_R18(sp)
  257. LONG_L $19, PT_R19(sp)
  258. LONG_L $20, PT_R20(sp)
  259. LONG_L $21, PT_R21(sp)
  260. LONG_L $22, PT_R22(sp)
  261. LONG_L $23, PT_R23(sp)
  262. LONG_L $30, PT_R30(sp)
  263. .endm
  264. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  265. .macro RESTORE_SOME
  266. .set push
  267. .set reorder
  268. .set noat
  269. mfc0 a0, CP0_STATUS
  270. li v1, 0xff00
  271. ori a0, STATMASK
  272. xori a0, STATMASK
  273. mtc0 a0, CP0_STATUS
  274. and a0, v1
  275. LONG_L v0, PT_STATUS(sp)
  276. nor v1, $0, v1
  277. and v0, v1
  278. or v0, a0
  279. mtc0 v0, CP0_STATUS
  280. LONG_L $31, PT_R31(sp)
  281. LONG_L $28, PT_R28(sp)
  282. LONG_L $25, PT_R25(sp)
  283. LONG_L $7, PT_R7(sp)
  284. LONG_L $6, PT_R6(sp)
  285. LONG_L $5, PT_R5(sp)
  286. LONG_L $4, PT_R4(sp)
  287. LONG_L $3, PT_R3(sp)
  288. LONG_L $2, PT_R2(sp)
  289. .set pop
  290. .endm
  291. .macro RESTORE_SP_AND_RET
  292. .set push
  293. .set noreorder
  294. LONG_L k0, PT_EPC(sp)
  295. LONG_L sp, PT_R29(sp)
  296. jr k0
  297. rfe
  298. .set pop
  299. .endm
  300. #else
  301. .macro RESTORE_SOME
  302. .set push
  303. .set reorder
  304. .set noat
  305. #ifdef CONFIG_MIPS_MT_SMTC
  306. .set mips32r2
  307. /*
  308. * We need to make sure the read-modify-write
  309. * of Status below isn't perturbed by an interrupt
  310. * or cross-TC access, so we need to do at least a DMT,
  311. * protected by an interrupt-inhibit. But setting IXMT
  312. * also creates a few-cycle window where an IPI could
  313. * be queued and not be detected before potentially
  314. * returning to a WAIT or user-mode loop. It must be
  315. * replayed.
  316. *
  317. * We're in the middle of a context switch, and
  318. * we can't dispatch it directly without trashing
  319. * some registers, so we'll try to detect this unlikely
  320. * case and program a software interrupt in the VPE,
  321. * as would be done for a cross-VPE IPI. To accommodate
  322. * the handling of that case, we're doing a DVPE instead
  323. * of just a DMT here to protect against other threads.
  324. * This is a lot of cruft to cover a tiny window.
  325. * If you can find a better design, implement it!
  326. *
  327. */
  328. mfc0 v0, CP0_TCSTATUS
  329. ori v0, TCSTATUS_IXMT
  330. mtc0 v0, CP0_TCSTATUS
  331. _ehb
  332. DVPE 5 # dvpe a1
  333. jal mips_ihb
  334. #endif /* CONFIG_MIPS_MT_SMTC */
  335. mfc0 a0, CP0_STATUS
  336. ori a0, STATMASK
  337. xori a0, STATMASK
  338. mtc0 a0, CP0_STATUS
  339. li v1, 0xff00
  340. and a0, v1
  341. LONG_L v0, PT_STATUS(sp)
  342. nor v1, $0, v1
  343. and v0, v1
  344. or v0, a0
  345. mtc0 v0, CP0_STATUS
  346. #ifdef CONFIG_MIPS_MT_SMTC
  347. /*
  348. * Only after EXL/ERL have been restored to status can we
  349. * restore TCStatus.IXMT.
  350. */
  351. LONG_L v1, PT_TCSTATUS(sp)
  352. _ehb
  353. mfc0 a0, CP0_TCSTATUS
  354. andi v1, TCSTATUS_IXMT
  355. bnez v1, 0f
  356. /*
  357. * We'd like to detect any IPIs queued in the tiny window
  358. * above and request an software interrupt to service them
  359. * when we ERET.
  360. *
  361. * Computing the offset into the IPIQ array of the executing
  362. * TC's IPI queue in-line would be tedious. We use part of
  363. * the TCContext register to hold 16 bits of offset that we
  364. * can add in-line to find the queue head.
  365. */
  366. mfc0 v0, CP0_TCCONTEXT
  367. la a2, IPIQ
  368. srl v0, v0, 16
  369. addu a2, a2, v0
  370. LONG_L v0, 0(a2)
  371. beqz v0, 0f
  372. /*
  373. * If we have a queue, provoke dispatch within the VPE by setting C_SW1
  374. */
  375. mfc0 v0, CP0_CAUSE
  376. ori v0, v0, C_SW1
  377. mtc0 v0, CP0_CAUSE
  378. 0:
  379. /*
  380. * This test should really never branch but
  381. * let's be prudent here. Having atomized
  382. * the shared register modifications, we can
  383. * now EVPE, and must do so before interrupts
  384. * are potentially re-enabled.
  385. */
  386. andi a1, a1, MVPCONTROL_EVP
  387. beqz a1, 1f
  388. evpe
  389. 1:
  390. /* We know that TCStatua.IXMT should be set from above */
  391. xori a0, a0, TCSTATUS_IXMT
  392. or a0, a0, v1
  393. mtc0 a0, CP0_TCSTATUS
  394. _ehb
  395. .set mips0
  396. #endif /* CONFIG_MIPS_MT_SMTC */
  397. LONG_L v1, PT_EPC(sp)
  398. MTC0 v1, CP0_EPC
  399. LONG_L $31, PT_R31(sp)
  400. LONG_L $28, PT_R28(sp)
  401. LONG_L $25, PT_R25(sp)
  402. #ifdef CONFIG_64BIT
  403. LONG_L $8, PT_R8(sp)
  404. LONG_L $9, PT_R9(sp)
  405. #endif
  406. LONG_L $7, PT_R7(sp)
  407. LONG_L $6, PT_R6(sp)
  408. LONG_L $5, PT_R5(sp)
  409. LONG_L $4, PT_R4(sp)
  410. LONG_L $3, PT_R3(sp)
  411. LONG_L $2, PT_R2(sp)
  412. .set pop
  413. .endm
  414. .macro RESTORE_SP_AND_RET
  415. LONG_L sp, PT_R29(sp)
  416. .set mips3
  417. eret
  418. .set mips0
  419. .endm
  420. #endif
  421. .macro RESTORE_SP
  422. LONG_L sp, PT_R29(sp)
  423. .endm
  424. .macro RESTORE_ALL
  425. RESTORE_TEMP
  426. RESTORE_STATIC
  427. RESTORE_AT
  428. RESTORE_SOME
  429. RESTORE_SP
  430. .endm
  431. .macro RESTORE_ALL_AND_RET
  432. RESTORE_TEMP
  433. RESTORE_STATIC
  434. RESTORE_AT
  435. RESTORE_SOME
  436. RESTORE_SP_AND_RET
  437. .endm
  438. /*
  439. * Move to kernel mode and disable interrupts.
  440. * Set cp0 enable bit as sign that we're running on the kernel stack
  441. */
  442. .macro CLI
  443. #if !defined(CONFIG_MIPS_MT_SMTC)
  444. mfc0 t0, CP0_STATUS
  445. li t1, ST0_CU0 | STATMASK
  446. or t0, t1
  447. xori t0, STATMASK
  448. mtc0 t0, CP0_STATUS
  449. #else /* CONFIG_MIPS_MT_SMTC */
  450. /*
  451. * For SMTC, we need to set privilege
  452. * and disable interrupts only for the
  453. * current TC, using the TCStatus register.
  454. */
  455. mfc0 t0, CP0_TCSTATUS
  456. /* Fortunately CU 0 is in the same place in both registers */
  457. /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
  458. li t1, ST0_CU0 | 0x08001c00
  459. or t0, t1
  460. /* Clear TKSU, leave IXMT */
  461. xori t0, 0x00001800
  462. mtc0 t0, CP0_TCSTATUS
  463. _ehb
  464. /* We need to leave the global IE bit set, but clear EXL...*/
  465. mfc0 t0, CP0_STATUS
  466. ori t0, ST0_EXL | ST0_ERL
  467. xori t0, ST0_EXL | ST0_ERL
  468. mtc0 t0, CP0_STATUS
  469. #endif /* CONFIG_MIPS_MT_SMTC */
  470. irq_disable_hazard
  471. .endm
  472. /*
  473. * Move to kernel mode and enable interrupts.
  474. * Set cp0 enable bit as sign that we're running on the kernel stack
  475. */
  476. .macro STI
  477. #if !defined(CONFIG_MIPS_MT_SMTC)
  478. mfc0 t0, CP0_STATUS
  479. li t1, ST0_CU0 | STATMASK
  480. or t0, t1
  481. xori t0, STATMASK & ~1
  482. mtc0 t0, CP0_STATUS
  483. #else /* CONFIG_MIPS_MT_SMTC */
  484. /*
  485. * For SMTC, we need to set privilege
  486. * and enable interrupts only for the
  487. * current TC, using the TCStatus register.
  488. */
  489. _ehb
  490. mfc0 t0, CP0_TCSTATUS
  491. /* Fortunately CU 0 is in the same place in both registers */
  492. /* Set TCU0, TKSU (for later inversion) and IXMT */
  493. li t1, ST0_CU0 | 0x08001c00
  494. or t0, t1
  495. /* Clear TKSU *and* IXMT */
  496. xori t0, 0x00001c00
  497. mtc0 t0, CP0_TCSTATUS
  498. _ehb
  499. /* We need to leave the global IE bit set, but clear EXL...*/
  500. mfc0 t0, CP0_STATUS
  501. ori t0, ST0_EXL
  502. xori t0, ST0_EXL
  503. mtc0 t0, CP0_STATUS
  504. /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
  505. #endif /* CONFIG_MIPS_MT_SMTC */
  506. irq_enable_hazard
  507. .endm
  508. /*
  509. * Just move to kernel mode and leave interrupts as they are. Note
  510. * for the R3000 this means copying the previous enable from IEp.
  511. * Set cp0 enable bit as sign that we're running on the kernel stack
  512. */
  513. .macro KMODE
  514. #ifdef CONFIG_MIPS_MT_SMTC
  515. /*
  516. * This gets baroque in SMTC. We want to
  517. * protect the non-atomic clearing of EXL
  518. * with DMT/EMT, but we don't want to take
  519. * an interrupt while DMT is still in effect.
  520. */
  521. /* KMODE gets invoked from both reorder and noreorder code */
  522. .set push
  523. .set mips32r2
  524. .set noreorder
  525. mfc0 v0, CP0_TCSTATUS
  526. andi v1, v0, TCSTATUS_IXMT
  527. ori v0, TCSTATUS_IXMT
  528. mtc0 v0, CP0_TCSTATUS
  529. _ehb
  530. DMT 2 # dmt v0
  531. /*
  532. * We don't know a priori if ra is "live"
  533. */
  534. move t0, ra
  535. jal mips_ihb
  536. nop /* delay slot */
  537. move ra, t0
  538. #endif /* CONFIG_MIPS_MT_SMTC */
  539. mfc0 t0, CP0_STATUS
  540. li t1, ST0_CU0 | (STATMASK & ~1)
  541. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  542. andi t2, t0, ST0_IEP
  543. srl t2, 2
  544. or t0, t2
  545. #endif
  546. or t0, t1
  547. xori t0, STATMASK & ~1
  548. mtc0 t0, CP0_STATUS
  549. #ifdef CONFIG_MIPS_MT_SMTC
  550. _ehb
  551. andi v0, v0, VPECONTROL_TE
  552. beqz v0, 2f
  553. nop /* delay slot */
  554. emt
  555. 2:
  556. mfc0 v0, CP0_TCSTATUS
  557. /* Clear IXMT, then OR in previous value */
  558. ori v0, TCSTATUS_IXMT
  559. xori v0, TCSTATUS_IXMT
  560. or v0, v1, v0
  561. mtc0 v0, CP0_TCSTATUS
  562. /*
  563. * irq_disable_hazard below should expand to EHB
  564. * on 24K/34K CPUS
  565. */
  566. .set pop
  567. #endif /* CONFIG_MIPS_MT_SMTC */
  568. irq_disable_hazard
  569. .endm
  570. #endif /* _ASM_STACKFRAME_H */