entry.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
  9. * Stack switching code can no longer reliably rely on the fact that
  10. * if we are NOT in user mode, stack is switched to kernel mode.
  11. * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
  12. * it's prologue including stack switching from user mode
  13. *
  14. * Vineetg: Aug 28th 2008: Bug #94984
  15. * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
  16. * Normally CPU does this automatically, however when doing FAKE rtie,
  17. * we also need to explicitly do this. The problem in macros
  18. * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
  19. * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
  20. *
  21. * Vineetg: May 5th 2008
  22. * -Modified CALLEE_REG save/restore macros to handle the fact that
  23. * r25 contains the kernel current task ptr
  24. * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
  25. * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
  26. * address Write back load ld.ab instead of seperate ld/add instn
  27. *
  28. * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  29. */
  30. #ifndef __ASM_ARC_ENTRY_H
  31. #define __ASM_ARC_ENTRY_H
  32. #ifdef __ASSEMBLY__
  33. #include <asm/unistd.h> /* For NR_syscalls defination */
  34. #include <asm/asm-offsets.h>
  35. #include <asm/arcregs.h>
  36. #include <asm/ptrace.h>
  37. #include <asm/processor.h> /* For VMALLOC_START */
  38. #include <asm/thread_info.h> /* For THREAD_SIZE */
  39. /* Note on the LD/ST addr modes with addr reg wback
  40. *
  41. * LD.a same as LD.aw
  42. *
  43. * LD.a reg1, [reg2, x] => Pre Incr
  44. * Eff Addr for load = [reg2 + x]
  45. *
  46. * LD.ab reg1, [reg2, x] => Post Incr
  47. * Eff Addr for load = [reg2]
  48. */
  49. .macro PUSH reg
  50. st.a \reg, [sp, -4]
  51. .endm
  52. .macro PUSHAX aux
  53. lr r9, [\aux]
  54. PUSH r9
  55. .endm
  56. .macro POP reg
  57. ld.ab \reg, [sp, 4]
  58. .endm
  59. .macro POPAX aux
  60. POP r9
  61. sr r9, [\aux]
  62. .endm
  63. /*--------------------------------------------------------------
  64. * Helpers to save/restore Scratch Regs:
  65. * used by Interrupt/Exception Prologue/Epilogue
  66. *-------------------------------------------------------------*/
  67. .macro SAVE_R0_TO_R12
  68. PUSH r0
  69. PUSH r1
  70. PUSH r2
  71. PUSH r3
  72. PUSH r4
  73. PUSH r5
  74. PUSH r6
  75. PUSH r7
  76. PUSH r8
  77. PUSH r9
  78. PUSH r10
  79. PUSH r11
  80. PUSH r12
  81. .endm
  82. .macro RESTORE_R12_TO_R0
  83. POP r12
  84. POP r11
  85. POP r10
  86. POP r9
  87. POP r8
  88. POP r7
  89. POP r6
  90. POP r5
  91. POP r4
  92. POP r3
  93. POP r2
  94. POP r1
  95. POP r0
  96. .endm
  97. /*--------------------------------------------------------------
  98. * Helpers to save/restore callee-saved regs:
  99. * used by several macros below
  100. *-------------------------------------------------------------*/
  101. .macro SAVE_R13_TO_R24
  102. PUSH r13
  103. PUSH r14
  104. PUSH r15
  105. PUSH r16
  106. PUSH r17
  107. PUSH r18
  108. PUSH r19
  109. PUSH r20
  110. PUSH r21
  111. PUSH r22
  112. PUSH r23
  113. PUSH r24
  114. .endm
  115. .macro RESTORE_R24_TO_R13
  116. POP r24
  117. POP r23
  118. POP r22
  119. POP r21
  120. POP r20
  121. POP r19
  122. POP r18
  123. POP r17
  124. POP r16
  125. POP r15
  126. POP r14
  127. POP r13
  128. .endm
  129. /*--------------------------------------------------------------
  130. * Collect User Mode callee regs as struct callee_regs - needed by
  131. * fork/do_signal/unaligned-access-emulation.
  132. * (By default only scratch regs are saved on entry to kernel)
  133. *
  134. * Special handling for r25 if used for caching Task Pointer.
  135. * It would have been saved in task->thread.user_r25 already, but to keep
  136. * the interface same it is copied into regular r25 placeholder in
  137. * struct callee_regs.
  138. *-------------------------------------------------------------*/
  139. .macro SAVE_CALLEE_SAVED_USER
  140. SAVE_R13_TO_R24
  141. #ifdef CONFIG_ARC_CURR_IN_REG
  142. ; Retrieve orig r25 and save it on stack
  143. ld r12, [r25, TASK_THREAD + THREAD_USER_R25]
  144. st.a r12, [sp, -4]
  145. #else
  146. PUSH r25
  147. #endif
  148. .endm
  149. /*--------------------------------------------------------------
  150. * Save kernel Mode callee regs at the time of Contect Switch.
  151. *
  152. * Special handling for r25 if used for caching Task Pointer.
  153. * Kernel simply skips saving it since it will be loaded with
  154. * incoming task pointer anyways
  155. *-------------------------------------------------------------*/
  156. .macro SAVE_CALLEE_SAVED_KERNEL
  157. SAVE_R13_TO_R24
  158. #ifdef CONFIG_ARC_CURR_IN_REG
  159. sub sp, sp, 4
  160. #else
  161. PUSH r25
  162. #endif
  163. .endm
  164. /*--------------------------------------------------------------
  165. * Opposite of SAVE_CALLEE_SAVED_KERNEL
  166. *-------------------------------------------------------------*/
  167. .macro RESTORE_CALLEE_SAVED_KERNEL
  168. #ifdef CONFIG_ARC_CURR_IN_REG
  169. add sp, sp, 4 /* skip usual r25 placeholder */
  170. #else
  171. POP r25
  172. #endif
  173. RESTORE_R24_TO_R13
  174. .endm
  175. /*--------------------------------------------------------------
  176. * Opposite of SAVE_CALLEE_SAVED_USER
  177. *
  178. * ptrace tracer or unaligned-access fixup might have changed a user mode
  179. * callee reg which is saved back to usual r25 storage location
  180. *-------------------------------------------------------------*/
  181. .macro RESTORE_CALLEE_SAVED_USER
  182. #ifdef CONFIG_ARC_CURR_IN_REG
  183. ld.ab r12, [sp, 4]
  184. st r12, [r25, TASK_THREAD + THREAD_USER_R25]
  185. #else
  186. POP r25
  187. #endif
  188. RESTORE_R24_TO_R13
  189. .endm
  190. /*--------------------------------------------------------------
  191. * Super FAST Restore callee saved regs by simply re-adjusting SP
  192. *-------------------------------------------------------------*/
  193. .macro DISCARD_CALLEE_SAVED_USER
  194. add sp, sp, SZ_CALLEE_REGS
  195. .endm
  196. /*--------------------------------------------------------------
  197. * Restore User mode r25 saved in task_struct->thread.user_r25
  198. *-------------------------------------------------------------*/
  199. .macro RESTORE_USER_R25
  200. ld r25, [r25, TASK_THREAD + THREAD_USER_R25]
  201. .endm
  202. /*-------------------------------------------------------------
  203. * given a tsk struct, get to the base of it's kernel mode stack
  204. * tsk->thread_info is really a PAGE, whose bottom hoists stack
  205. * which grows upwards towards thread_info
  206. *------------------------------------------------------------*/
  207. .macro GET_TSK_STACK_BASE tsk, out
  208. /* Get task->thread_info (this is essentially start of a PAGE) */
  209. ld \out, [\tsk, TASK_THREAD_INFO]
  210. /* Go to end of page where stack begins (grows upwards) */
  211. add2 \out, \out, (THREAD_SIZE)/4
  212. .endm
  213. /*--------------------------------------------------------------
  214. * Switch to Kernel Mode stack if SP points to User Mode stack
  215. *
  216. * Entry : r9 contains pre-IRQ/exception/trap status32
  217. * Exit : SP is set to kernel mode stack pointer
  218. * If CURR_IN_REG, r25 set to "current" task pointer
  219. * Clobbers: r9
  220. *-------------------------------------------------------------*/
  221. .macro SWITCH_TO_KERNEL_STK
  222. /* User Mode when this happened ? Yes: Proceed to switch stack */
  223. bbit1 r9, STATUS_U_BIT, 88f
  224. /* OK we were already in kernel mode when this event happened, thus can
  225. * assume SP is kernel mode SP. _NO_ need to do any stack switching
  226. */
  227. #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
  228. /* However....
  229. * If Level 2 Interrupts enabled, we may end up with a corner case:
  230. * 1. User Task executing
  231. * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
  232. * 3. But before it could switch SP from USER to KERNEL stack
  233. * a L2 IRQ "Interrupts" L1
  234. * Thay way although L2 IRQ happened in Kernel mode, stack is still
  235. * not switched.
  236. * To handle this, we may need to switch stack even if in kernel mode
  237. * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
  238. */
  239. brlo sp, VMALLOC_START, 88f
  240. /* TODO: vineetg:
  241. * We need to be a bit more cautious here. What if a kernel bug in
  242. * L1 ISR, caused SP to go whaco (some small value which looks like
  243. * USER stk) and then we take L2 ISR.
  244. * Above brlo alone would treat it as a valid L1-L2 sceanrio
  245. * instead of shouting alound
  246. * The only feasible way is to make sure this L2 happened in
  247. * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
  248. * L1 ISR before it switches stack
  249. */
  250. #endif
  251. /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
  252. * safe-keeping not really needed, but it keeps the epilogue code
  253. * (SP restore) simpler/uniform.
  254. */
  255. b.d 66f
  256. mov r9, sp
  257. 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
  258. GET_CURR_TASK_ON_CPU r9
  259. #ifdef CONFIG_ARC_CURR_IN_REG
  260. /* If current task pointer cached in r25, time to
  261. * -safekeep USER r25 in task->thread_struct->user_r25
  262. * -load r25 with current task ptr
  263. */
  264. st.as r25, [r9, (TASK_THREAD + THREAD_USER_R25)/4]
  265. mov r25, r9
  266. #endif
  267. /* With current tsk in r9, get it's kernel mode stack base */
  268. GET_TSK_STACK_BASE r9, r9
  269. 66:
  270. /* Save Pre Intr/Exception User SP on kernel stack */
  271. st.a sp, [r9, -12] ; Make room for orig_r0 and orig_r8
  272. /* CAUTION:
  273. * SP should be set at the very end when we are done with everything
  274. * In case of 2 levels of interrupt we depend on value of SP to assume
  275. * that everything else is done (loading r25 etc)
  276. */
  277. /* set SP to point to kernel mode stack */
  278. mov sp, r9
  279. /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
  280. .endm
  281. /*------------------------------------------------------------
  282. * "FAKE" a rtie to return from CPU Exception context
  283. * This is to re-enable Exceptions within exception
  284. * Look at EV_ProtV to see how this is actually used
  285. *-------------------------------------------------------------*/
  286. .macro FAKE_RET_FROM_EXCPN reg
  287. ld \reg, [sp, PT_status32]
  288. bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
  289. bset \reg, \reg, STATUS_L_BIT
  290. sr \reg, [erstatus]
  291. mov \reg, 55f
  292. sr \reg, [eret]
  293. rtie
  294. 55:
  295. .endm
  296. /*
  297. * @reg [OUT] &thread_info of "current"
  298. */
  299. .macro GET_CURR_THR_INFO_FROM_SP reg
  300. bic \reg, sp, (THREAD_SIZE - 1)
  301. .endm
  302. /*
  303. * @reg [OUT] thread_info->flags of "current"
  304. */
  305. .macro GET_CURR_THR_INFO_FLAGS reg
  306. GET_CURR_THR_INFO_FROM_SP \reg
  307. ld \reg, [\reg, THREAD_INFO_FLAGS]
  308. .endm
  309. /*--------------------------------------------------------------
  310. * For early Exception Prologue, a core reg is temporarily needed to
  311. * code the rest of prolog (stack switching). This is done by stashing
  312. * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
  313. *
  314. * Before saving the full regfile - this reg is restored back, only
  315. * to be saved again on kernel mode stack, as part of ptregs.
  316. *-------------------------------------------------------------*/
  317. .macro EXCPN_PROLOG_FREEUP_REG reg
  318. #ifdef CONFIG_SMP
  319. sr \reg, [ARC_REG_SCRATCH_DATA0]
  320. #else
  321. st \reg, [@ex_saved_reg1]
  322. #endif
  323. .endm
  324. .macro EXCPN_PROLOG_RESTORE_REG reg
  325. #ifdef CONFIG_SMP
  326. lr \reg, [ARC_REG_SCRATCH_DATA0]
  327. #else
  328. ld \reg, [@ex_saved_reg1]
  329. #endif
  330. .endm
  331. /*--------------------------------------------------------------
  332. * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
  333. * Requires SP to be already switched to kernel mode Stack
  334. * sp points to the next free element on the stack at exit of this macro.
  335. * Registers are pushed / popped in the order defined in struct ptregs
  336. * in asm/ptrace.h
  337. * Note that syscalls are implemented via TRAP which is also a exception
  338. * from CPU's point of view
  339. *-------------------------------------------------------------*/
  340. .macro SAVE_ALL_EXCEPTION marker
  341. st \marker, [sp, 8] /* orig_r8 */
  342. st r0, [sp, 4] /* orig_r0, needed only for sys calls */
  343. /* Restore r9 used to code the early prologue */
  344. EXCPN_PROLOG_RESTORE_REG r9
  345. SAVE_R0_TO_R12
  346. PUSH gp
  347. PUSH fp
  348. PUSH blink
  349. PUSHAX eret
  350. PUSHAX erstatus
  351. PUSH lp_count
  352. PUSHAX lp_end
  353. PUSHAX lp_start
  354. PUSHAX erbta
  355. .endm
  356. /*--------------------------------------------------------------
  357. * Save scratch regs for exceptions
  358. *-------------------------------------------------------------*/
  359. .macro SAVE_ALL_SYS
  360. SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
  361. .endm
  362. /*--------------------------------------------------------------
  363. * Save scratch regs for sys calls
  364. *-------------------------------------------------------------*/
  365. .macro SAVE_ALL_TRAP
  366. /*
  367. * Setup pt_regs->orig_r8.
  368. * Encode syscall number (r8) in upper short word of event type (r9)
  369. * N.B. #1: This is already endian safe (see ptrace.h)
  370. * #2: Only r9 can be used as scratch as it is already clobbered
  371. * and it's contents are no longer needed by the latter part
  372. * of exception prologue
  373. */
  374. lsl r9, r8, 16
  375. or r9, r9, orig_r8_IS_SCALL
  376. SAVE_ALL_EXCEPTION r9
  377. .endm
  378. /*--------------------------------------------------------------
  379. * Restore all registers used by system call or Exceptions
  380. * SP should always be pointing to the next free stack element
  381. * when entering this macro.
  382. *
  383. * NOTE:
  384. *
  385. * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
  386. * for memory load operations. If used in that way interrupts are deffered
  387. * by hardware and that is not good.
  388. *-------------------------------------------------------------*/
  389. .macro RESTORE_ALL_SYS
  390. POPAX erbta
  391. POPAX lp_start
  392. POPAX lp_end
  393. POP r9
  394. mov lp_count, r9 ;LD to lp_count is not allowed
  395. POPAX erstatus
  396. POPAX eret
  397. POP blink
  398. POP fp
  399. POP gp
  400. RESTORE_R12_TO_R0
  401. ld sp, [sp] /* restore original sp */
  402. /* orig_r0 and orig_r8 skipped automatically */
  403. .endm
  404. /*--------------------------------------------------------------
  405. * Save all registers used by interrupt handlers.
  406. *-------------------------------------------------------------*/
  407. .macro SAVE_ALL_INT1
  408. /* restore original r9 to be saved as part of reg-file */
  409. #ifdef CONFIG_SMP
  410. lr r9, [ARC_REG_SCRATCH_DATA0]
  411. #else
  412. ld r9, [@int1_saved_reg]
  413. #endif
  414. /* now we are ready to save the remaining context :) */
  415. st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
  416. st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
  417. SAVE_R0_TO_R12
  418. PUSH gp
  419. PUSH fp
  420. PUSH blink
  421. PUSH ilink1
  422. PUSHAX status32_l1
  423. PUSH lp_count
  424. PUSHAX lp_end
  425. PUSHAX lp_start
  426. PUSHAX bta_l1
  427. .endm
  428. .macro SAVE_ALL_INT2
  429. /* TODO-vineetg: SMP we can't use global nor can we use
  430. * SCRATCH0 as we do for int1 because while int1 is using
  431. * it, int2 can come
  432. */
  433. /* retsore original r9 , saved in sys_saved_r9 */
  434. ld r9, [@int2_saved_reg]
  435. /* now we are ready to save the remaining context :) */
  436. st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
  437. st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
  438. SAVE_R0_TO_R12
  439. PUSH gp
  440. PUSH fp
  441. PUSH blink
  442. PUSH ilink2
  443. PUSHAX status32_l2
  444. PUSH lp_count
  445. PUSHAX lp_end
  446. PUSHAX lp_start
  447. PUSHAX bta_l2
  448. .endm
  449. /*--------------------------------------------------------------
  450. * Restore all registers used by interrupt handlers.
  451. *
  452. * NOTE:
  453. *
  454. * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
  455. * for memory load operations. If used in that way interrupts are deffered
  456. * by hardware and that is not good.
  457. *-------------------------------------------------------------*/
  458. .macro RESTORE_ALL_INT1
  459. POPAX bta_l1
  460. POPAX lp_start
  461. POPAX lp_end
  462. POP r9
  463. mov lp_count, r9 ;LD to lp_count is not allowed
  464. POPAX status32_l1
  465. POP ilink1
  466. POP blink
  467. POP fp
  468. POP gp
  469. RESTORE_R12_TO_R0
  470. ld sp, [sp] /* restore original sp */
  471. /* orig_r0 and orig_r8 skipped automatically */
  472. .endm
  473. .macro RESTORE_ALL_INT2
  474. POPAX bta_l2
  475. POPAX lp_start
  476. POPAX lp_end
  477. POP r9
  478. mov lp_count, r9 ;LD to lp_count is not allowed
  479. POPAX status32_l2
  480. POP ilink2
  481. POP blink
  482. POP fp
  483. POP gp
  484. RESTORE_R12_TO_R0
  485. ld sp, [sp] /* restore original sp */
  486. /* orig_r0 and orig_r8 skipped automatically */
  487. .endm
  488. /* Get CPU-ID of this core */
  489. .macro GET_CPU_ID reg
  490. lr \reg, [identity]
  491. lsr \reg, \reg, 8
  492. bmsk \reg, \reg, 7
  493. .endm
  494. #ifdef CONFIG_SMP
  495. /*-------------------------------------------------
  496. * Retrieve the current running task on this CPU
  497. * 1. Determine curr CPU id.
  498. * 2. Use it to index into _current_task[ ]
  499. */
  500. .macro GET_CURR_TASK_ON_CPU reg
  501. GET_CPU_ID \reg
  502. ld.as \reg, [@_current_task, \reg]
  503. .endm
  504. /*-------------------------------------------------
  505. * Save a new task as the "current" task on this CPU
  506. * 1. Determine curr CPU id.
  507. * 2. Use it to index into _current_task[ ]
  508. *
  509. * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
  510. * because ST r0, [r1, offset] can ONLY have s9 @offset
  511. * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
  512. */
  513. .macro SET_CURR_TASK_ON_CPU tsk, tmp
  514. GET_CPU_ID \tmp
  515. add2 \tmp, @_current_task, \tmp
  516. st \tsk, [\tmp]
  517. #ifdef CONFIG_ARC_CURR_IN_REG
  518. mov r25, \tsk
  519. #endif
  520. .endm
  521. #else /* Uniprocessor implementation of macros */
  522. .macro GET_CURR_TASK_ON_CPU reg
  523. ld \reg, [@_current_task]
  524. .endm
  525. .macro SET_CURR_TASK_ON_CPU tsk, tmp
  526. st \tsk, [@_current_task]
  527. #ifdef CONFIG_ARC_CURR_IN_REG
  528. mov r25, \tsk
  529. #endif
  530. .endm
  531. #endif /* SMP / UNI */
  532. /* ------------------------------------------------------------------
  533. * Get the ptr to some field of Current Task at @off in task struct
  534. * -Uses r25 for Current task ptr if that is enabled
  535. */
  536. #ifdef CONFIG_ARC_CURR_IN_REG
  537. .macro GET_CURR_TASK_FIELD_PTR off, reg
  538. add \reg, r25, \off
  539. .endm
  540. #else
  541. .macro GET_CURR_TASK_FIELD_PTR off, reg
  542. GET_CURR_TASK_ON_CPU \reg
  543. add \reg, \reg, \off
  544. .endm
  545. #endif /* CONFIG_ARC_CURR_IN_REG */
  546. #endif /* __ASSEMBLY__ */
  547. #endif /* __ASM_ARC_ENTRY_H */