stackframe.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. */
  10. #ifndef _ASM_STACKFRAME_H
  11. #define _ASM_STACKFRAME_H
  12. #include <linux/threads.h>
  13. #include <asm/asm.h>
  14. #include <asm/asmmacro.h>
  15. #include <asm/mipsregs.h>
  16. #include <asm/asm-offsets.h>
  17. #ifdef CONFIG_MIPS_MT_SMTC
  18. #include <asm/mipsmtregs.h>
  19. #endif /* CONFIG_MIPS_MT_SMTC */
  20. .macro SAVE_AT
  21. .set push
  22. .set noat
  23. LONG_S $1, PT_R1(sp)
  24. .set pop
  25. .endm
  26. .macro SAVE_TEMP
  27. mfhi v1
  28. #ifdef CONFIG_32BIT
  29. LONG_S $8, PT_R8(sp)
  30. LONG_S $9, PT_R9(sp)
  31. #endif
  32. LONG_S v1, PT_HI(sp)
  33. mflo v1
  34. LONG_S $10, PT_R10(sp)
  35. LONG_S $11, PT_R11(sp)
  36. LONG_S v1, PT_LO(sp)
  37. LONG_S $12, PT_R12(sp)
  38. LONG_S $13, PT_R13(sp)
  39. LONG_S $14, PT_R14(sp)
  40. LONG_S $15, PT_R15(sp)
  41. LONG_S $24, PT_R24(sp)
  42. .endm
  43. .macro SAVE_STATIC
  44. LONG_S $16, PT_R16(sp)
  45. LONG_S $17, PT_R17(sp)
  46. LONG_S $18, PT_R18(sp)
  47. LONG_S $19, PT_R19(sp)
  48. LONG_S $20, PT_R20(sp)
  49. LONG_S $21, PT_R21(sp)
  50. LONG_S $22, PT_R22(sp)
  51. LONG_S $23, PT_R23(sp)
  52. LONG_S $30, PT_R30(sp)
  53. .endm
  54. #ifdef CONFIG_SMP
  55. #ifdef CONFIG_MIPS_MT_SMTC
  56. #define PTEBASE_SHIFT 19 /* TCBIND */
  57. #else
  58. #define PTEBASE_SHIFT 23 /* CONTEXT */
  59. #endif
  60. .macro get_saved_sp /* SMP variation */
  61. #ifdef CONFIG_MIPS_MT_SMTC
  62. mfc0 k0, CP0_TCBIND
  63. #else
  64. MFC0 k0, CP0_CONTEXT
  65. #endif
  66. #if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4)
  67. lui k1, %highest(kernelsp)
  68. daddiu k1, %higher(kernelsp)
  69. dsll k1, 16
  70. daddiu k1, %hi(kernelsp)
  71. dsll k1, 16
  72. #else
  73. lui k1, %hi(kernelsp)
  74. #endif
  75. LONG_SRL k0, PTEBASE_SHIFT
  76. LONG_ADDU k1, k0
  77. LONG_L k1, %lo(kernelsp)(k1)
  78. .endm
  79. .macro set_saved_sp stackp temp temp2
  80. #ifdef CONFIG_MIPS_MT_SMTC
  81. mfc0 \temp, CP0_TCBIND
  82. #else
  83. MFC0 \temp, CP0_CONTEXT
  84. #endif
  85. LONG_SRL \temp, PTEBASE_SHIFT
  86. LONG_S \stackp, kernelsp(\temp)
  87. .endm
  88. #else
  89. .macro get_saved_sp /* Uniprocessor variation */
  90. #if defined(CONFIG_BUILD_ELF64) || (defined(CONFIG_64BIT) && __GNUC__ < 4)
  91. lui k1, %highest(kernelsp)
  92. daddiu k1, %higher(kernelsp)
  93. dsll k1, k1, 16
  94. daddiu k1, %hi(kernelsp)
  95. dsll k1, k1, 16
  96. #else
  97. lui k1, %hi(kernelsp)
  98. #endif
  99. LONG_L k1, %lo(kernelsp)(k1)
  100. .endm
  101. .macro set_saved_sp stackp temp temp2
  102. LONG_S \stackp, kernelsp
  103. .endm
  104. #endif
  105. .macro SAVE_SOME
  106. .set push
  107. .set noat
  108. .set reorder
  109. mfc0 k0, CP0_STATUS
  110. sll k0, 3 /* extract cu0 bit */
  111. .set noreorder
  112. bltz k0, 8f
  113. move k1, sp
  114. .set reorder
  115. /* Called from user mode, new stack. */
  116. get_saved_sp
  117. 8: move k0, sp
  118. PTR_SUBU sp, k1, PT_SIZE
  119. LONG_S k0, PT_R29(sp)
  120. LONG_S $3, PT_R3(sp)
  121. /*
  122. * You might think that you don't need to save $0,
  123. * but the FPU emulator and gdb remote debug stub
  124. * need it to operate correctly
  125. */
  126. LONG_S $0, PT_R0(sp)
  127. mfc0 v1, CP0_STATUS
  128. LONG_S $2, PT_R2(sp)
  129. LONG_S v1, PT_STATUS(sp)
  130. #ifdef CONFIG_MIPS_MT_SMTC
  131. /*
  132. * Ideally, these instructions would be shuffled in
  133. * to cover the pipeline delay.
  134. */
  135. .set mips32
  136. mfc0 v1, CP0_TCSTATUS
  137. .set mips0
  138. LONG_S v1, PT_TCSTATUS(sp)
  139. #endif /* CONFIG_MIPS_MT_SMTC */
  140. LONG_S $4, PT_R4(sp)
  141. mfc0 v1, CP0_CAUSE
  142. LONG_S $5, PT_R5(sp)
  143. LONG_S v1, PT_CAUSE(sp)
  144. LONG_S $6, PT_R6(sp)
  145. MFC0 v1, CP0_EPC
  146. LONG_S $7, PT_R7(sp)
  147. #ifdef CONFIG_64BIT
  148. LONG_S $8, PT_R8(sp)
  149. LONG_S $9, PT_R9(sp)
  150. #endif
  151. LONG_S v1, PT_EPC(sp)
  152. LONG_S $25, PT_R25(sp)
  153. LONG_S $28, PT_R28(sp)
  154. LONG_S $31, PT_R31(sp)
  155. ori $28, sp, _THREAD_MASK
  156. xori $28, _THREAD_MASK
  157. .set pop
  158. .endm
  159. .macro SAVE_ALL
  160. SAVE_SOME
  161. SAVE_AT
  162. SAVE_TEMP
  163. SAVE_STATIC
  164. .endm
  165. .macro RESTORE_AT
  166. .set push
  167. .set noat
  168. LONG_L $1, PT_R1(sp)
  169. .set pop
  170. .endm
  171. .macro RESTORE_TEMP
  172. LONG_L $24, PT_LO(sp)
  173. #ifdef CONFIG_32BIT
  174. LONG_L $8, PT_R8(sp)
  175. LONG_L $9, PT_R9(sp)
  176. #endif
  177. mtlo $24
  178. LONG_L $24, PT_HI(sp)
  179. LONG_L $10, PT_R10(sp)
  180. LONG_L $11, PT_R11(sp)
  181. mthi $24
  182. LONG_L $12, PT_R12(sp)
  183. LONG_L $13, PT_R13(sp)
  184. LONG_L $14, PT_R14(sp)
  185. LONG_L $15, PT_R15(sp)
  186. LONG_L $24, PT_R24(sp)
  187. .endm
  188. .macro RESTORE_STATIC
  189. LONG_L $16, PT_R16(sp)
  190. LONG_L $17, PT_R17(sp)
  191. LONG_L $18, PT_R18(sp)
  192. LONG_L $19, PT_R19(sp)
  193. LONG_L $20, PT_R20(sp)
  194. LONG_L $21, PT_R21(sp)
  195. LONG_L $22, PT_R22(sp)
  196. LONG_L $23, PT_R23(sp)
  197. LONG_L $30, PT_R30(sp)
  198. .endm
  199. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  200. .macro RESTORE_SOME
  201. .set push
  202. .set reorder
  203. .set noat
  204. mfc0 a0, CP0_STATUS
  205. ori a0, 0x1f
  206. xori a0, 0x1f
  207. mtc0 a0, CP0_STATUS
  208. li v1, 0xff00
  209. and a0, v1
  210. LONG_L v0, PT_STATUS(sp)
  211. nor v1, $0, v1
  212. and v0, v1
  213. or v0, a0
  214. mtc0 v0, CP0_STATUS
  215. LONG_L $31, PT_R31(sp)
  216. LONG_L $28, PT_R28(sp)
  217. LONG_L $25, PT_R25(sp)
  218. #ifdef CONFIG_64BIT
  219. LONG_L $8, PT_R8(sp)
  220. LONG_L $9, PT_R9(sp)
  221. #endif
  222. LONG_L $7, PT_R7(sp)
  223. LONG_L $6, PT_R6(sp)
  224. LONG_L $5, PT_R5(sp)
  225. LONG_L $4, PT_R4(sp)
  226. LONG_L $3, PT_R3(sp)
  227. LONG_L $2, PT_R2(sp)
  228. .set pop
  229. .endm
  230. .macro RESTORE_SP_AND_RET
  231. .set push
  232. .set noreorder
  233. LONG_L k0, PT_EPC(sp)
  234. LONG_L sp, PT_R29(sp)
  235. jr k0
  236. rfe
  237. .set pop
  238. .endm
  239. #else
  240. /*
  241. * For SMTC kernel, global IE should be left set, and interrupts
  242. * controlled exclusively via IXMT.
  243. */
  244. #ifdef CONFIG_MIPS_MT_SMTC
  245. #define STATMASK 0x1e
  246. #else
  247. #define STATMASK 0x1f
  248. #endif
  249. .macro RESTORE_SOME
  250. .set push
  251. .set reorder
  252. .set noat
  253. #ifdef CONFIG_MIPS_MT_SMTC
  254. .set mips32r2
  255. /*
  256. * This may not really be necessary if ints are already
  257. * inhibited here.
  258. */
  259. mfc0 v0, CP0_TCSTATUS
  260. ori v0, TCSTATUS_IXMT
  261. mtc0 v0, CP0_TCSTATUS
  262. _ehb
  263. DMT 5 # dmt a1
  264. jal mips_ihb
  265. #endif /* CONFIG_MIPS_MT_SMTC */
  266. mfc0 a0, CP0_STATUS
  267. ori a0, STATMASK
  268. xori a0, STATMASK
  269. mtc0 a0, CP0_STATUS
  270. li v1, 0xff00
  271. and a0, v1
  272. LONG_L v0, PT_STATUS(sp)
  273. nor v1, $0, v1
  274. and v0, v1
  275. or v0, a0
  276. mtc0 v0, CP0_STATUS
  277. #ifdef CONFIG_MIPS_MT_SMTC
  278. /*
  279. * Only after EXL/ERL have been restored to status can we
  280. * restore TCStatus.IXMT.
  281. */
  282. LONG_L v1, PT_TCSTATUS(sp)
  283. _ehb
  284. mfc0 v0, CP0_TCSTATUS
  285. andi v1, TCSTATUS_IXMT
  286. /* We know that TCStatua.IXMT should be set from above */
  287. xori v0, v0, TCSTATUS_IXMT
  288. or v0, v0, v1
  289. mtc0 v0, CP0_TCSTATUS
  290. _ehb
  291. andi a1, a1, VPECONTROL_TE
  292. beqz a1, 1f
  293. emt
  294. 1:
  295. .set mips0
  296. #endif /* CONFIG_MIPS_MT_SMTC */
  297. LONG_L v1, PT_EPC(sp)
  298. MTC0 v1, CP0_EPC
  299. LONG_L $31, PT_R31(sp)
  300. LONG_L $28, PT_R28(sp)
  301. LONG_L $25, PT_R25(sp)
  302. #ifdef CONFIG_64BIT
  303. LONG_L $8, PT_R8(sp)
  304. LONG_L $9, PT_R9(sp)
  305. #endif
  306. LONG_L $7, PT_R7(sp)
  307. LONG_L $6, PT_R6(sp)
  308. LONG_L $5, PT_R5(sp)
  309. LONG_L $4, PT_R4(sp)
  310. LONG_L $3, PT_R3(sp)
  311. LONG_L $2, PT_R2(sp)
  312. .set pop
  313. .endm
  314. .macro RESTORE_SP_AND_RET
  315. LONG_L sp, PT_R29(sp)
  316. .set mips3
  317. eret
  318. .set mips0
  319. .endm
  320. #endif
  321. .macro RESTORE_SP
  322. LONG_L sp, PT_R29(sp)
  323. .endm
  324. .macro RESTORE_ALL
  325. RESTORE_TEMP
  326. RESTORE_STATIC
  327. RESTORE_AT
  328. RESTORE_SOME
  329. RESTORE_SP
  330. .endm
  331. .macro RESTORE_ALL_AND_RET
  332. RESTORE_TEMP
  333. RESTORE_STATIC
  334. RESTORE_AT
  335. RESTORE_SOME
  336. RESTORE_SP_AND_RET
  337. .endm
  338. /*
  339. * Move to kernel mode and disable interrupts.
  340. * Set cp0 enable bit as sign that we're running on the kernel stack
  341. */
  342. .macro CLI
  343. #if !defined(CONFIG_MIPS_MT_SMTC)
  344. mfc0 t0, CP0_STATUS
  345. li t1, ST0_CU0 | 0x1f
  346. or t0, t1
  347. xori t0, 0x1f
  348. mtc0 t0, CP0_STATUS
  349. #else /* CONFIG_MIPS_MT_SMTC */
  350. /*
  351. * For SMTC, we need to set privilege
  352. * and disable interrupts only for the
  353. * current TC, using the TCStatus register.
  354. */
  355. mfc0 t0,CP0_TCSTATUS
  356. /* Fortunately CU 0 is in the same place in both registers */
  357. /* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
  358. li t1, ST0_CU0 | 0x08001c00
  359. or t0,t1
  360. /* Clear TKSU, leave IXMT */
  361. xori t0, 0x00001800
  362. mtc0 t0, CP0_TCSTATUS
  363. _ehb
  364. /* We need to leave the global IE bit set, but clear EXL...*/
  365. mfc0 t0, CP0_STATUS
  366. ori t0, ST0_EXL | ST0_ERL
  367. xori t0, ST0_EXL | ST0_ERL
  368. mtc0 t0, CP0_STATUS
  369. #endif /* CONFIG_MIPS_MT_SMTC */
  370. irq_disable_hazard
  371. .endm
  372. /*
  373. * Move to kernel mode and enable interrupts.
  374. * Set cp0 enable bit as sign that we're running on the kernel stack
  375. */
  376. .macro STI
  377. #if !defined(CONFIG_MIPS_MT_SMTC)
  378. mfc0 t0, CP0_STATUS
  379. li t1, ST0_CU0 | 0x1f
  380. or t0, t1
  381. xori t0, 0x1e
  382. mtc0 t0, CP0_STATUS
  383. #else /* CONFIG_MIPS_MT_SMTC */
  384. /*
  385. * For SMTC, we need to set privilege
  386. * and enable interrupts only for the
  387. * current TC, using the TCStatus register.
  388. */
  389. _ehb
  390. mfc0 t0,CP0_TCSTATUS
  391. /* Fortunately CU 0 is in the same place in both registers */
  392. /* Set TCU0, TKSU (for later inversion) and IXMT */
  393. li t1, ST0_CU0 | 0x08001c00
  394. or t0,t1
  395. /* Clear TKSU *and* IXMT */
  396. xori t0, 0x00001c00
  397. mtc0 t0, CP0_TCSTATUS
  398. _ehb
  399. /* We need to leave the global IE bit set, but clear EXL...*/
  400. mfc0 t0, CP0_STATUS
  401. ori t0, ST0_EXL
  402. xori t0, ST0_EXL
  403. mtc0 t0, CP0_STATUS
  404. /* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
  405. #endif /* CONFIG_MIPS_MT_SMTC */
  406. irq_enable_hazard
  407. .endm
  408. /*
  409. * Just move to kernel mode and leave interrupts as they are.
  410. * Set cp0 enable bit as sign that we're running on the kernel stack
  411. */
  412. .macro KMODE
  413. #ifdef CONFIG_MIPS_MT_SMTC
  414. /*
  415. * This gets baroque in SMTC. We want to
  416. * protect the non-atomic clearing of EXL
  417. * with DMT/EMT, but we don't want to take
  418. * an interrupt while DMT is still in effect.
  419. */
  420. /* KMODE gets invoked from both reorder and noreorder code */
  421. .set push
  422. .set mips32r2
  423. .set noreorder
  424. mfc0 v0, CP0_TCSTATUS
  425. andi v1, v0, TCSTATUS_IXMT
  426. ori v0, TCSTATUS_IXMT
  427. mtc0 v0, CP0_TCSTATUS
  428. _ehb
  429. DMT 2 # dmt v0
  430. /*
  431. * We don't know a priori if ra is "live"
  432. */
  433. move t0, ra
  434. jal mips_ihb
  435. nop /* delay slot */
  436. move ra, t0
  437. #endif /* CONFIG_MIPS_MT_SMTC */
  438. mfc0 t0, CP0_STATUS
  439. li t1, ST0_CU0 | 0x1e
  440. or t0, t1
  441. xori t0, 0x1e
  442. mtc0 t0, CP0_STATUS
  443. #ifdef CONFIG_MIPS_MT_SMTC
  444. _ehb
  445. andi v0, v0, VPECONTROL_TE
  446. beqz v0, 2f
  447. nop /* delay slot */
  448. emt
  449. 2:
  450. mfc0 v0, CP0_TCSTATUS
  451. /* Clear IXMT, then OR in previous value */
  452. ori v0, TCSTATUS_IXMT
  453. xori v0, TCSTATUS_IXMT
  454. or v0, v1, v0
  455. mtc0 v0, CP0_TCSTATUS
  456. /*
  457. * irq_disable_hazard below should expand to EHB
  458. * on 24K/34K CPUS
  459. */
  460. .set pop
  461. #endif /* CONFIG_MIPS_MT_SMTC */
  462. irq_disable_hazard
  463. .endm
  464. #endif /* _ASM_STACKFRAME_H */