entry.h 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
  9. * Stack switching code can no longer reliably rely on the fact that
  10. * if we are NOT in user mode, stack is switched to kernel mode.
  11. * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
  12. * it's prologue including stack switching from user mode
  13. *
  14. * Vineetg: Aug 28th 2008: Bug #94984
  15. * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
  16. * Normally CPU does this automatically, however when doing FAKE rtie,
  17. * we also need to explicitly do this. The problem in macros
  18. * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
  19. * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
  20. *
  21. * Vineetg: May 5th 2008
  22. * -Modified CALLEE_REG save/restore macros to handle the fact that
  23. * r25 contains the kernel current task ptr
  24. * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
  25. * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
  26. * address Write back load ld.ab instead of seperate ld/add instn
  27. *
  28. * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  29. */
  30. #ifndef __ASM_ARC_ENTRY_H
  31. #define __ASM_ARC_ENTRY_H
  32. #ifdef __ASSEMBLY__
  33. #include <asm/unistd.h> /* For NR_syscalls defination */
  34. #include <asm/asm-offsets.h>
  35. #include <asm/arcregs.h>
  36. #include <asm/ptrace.h>
  37. #include <asm/processor.h> /* For VMALLOC_START */
  38. #include <asm/thread_info.h> /* For THREAD_SIZE */
  39. /* Note on the LD/ST addr modes with addr reg wback
  40. *
  41. * LD.a same as LD.aw
  42. *
  43. * LD.a reg1, [reg2, x] => Pre Incr
  44. * Eff Addr for load = [reg2 + x]
  45. *
  46. * LD.ab reg1, [reg2, x] => Post Incr
  47. * Eff Addr for load = [reg2]
  48. */
  49. .macro PUSH reg
  50. st.a \reg, [sp, -4]
  51. .endm
  52. .macro PUSHAX aux
  53. lr r9, [\aux]
  54. PUSH r9
  55. .endm
  56. .macro POP reg
  57. ld.ab \reg, [sp, 4]
  58. .endm
  59. .macro POPAX aux
  60. POP r9
  61. sr r9, [\aux]
  62. .endm
  63. /*--------------------------------------------------------------
  64. * Helpers to save/restore Scratch Regs:
  65. * used by Interrupt/Exception Prologue/Epilogue
  66. *-------------------------------------------------------------*/
  67. .macro SAVE_R0_TO_R12
  68. PUSH r0
  69. PUSH r1
  70. PUSH r2
  71. PUSH r3
  72. PUSH r4
  73. PUSH r5
  74. PUSH r6
  75. PUSH r7
  76. PUSH r8
  77. PUSH r9
  78. PUSH r10
  79. PUSH r11
  80. PUSH r12
  81. .endm
  82. .macro RESTORE_R12_TO_R0
  83. POP r12
  84. POP r11
  85. POP r10
  86. POP r9
  87. POP r8
  88. POP r7
  89. POP r6
  90. POP r5
  91. POP r4
  92. POP r3
  93. POP r2
  94. POP r1
  95. POP r0
  96. #ifdef CONFIG_ARC_CURR_IN_REG
  97. ld r25, [sp, 12]
  98. #endif
  99. .endm
  100. /*--------------------------------------------------------------
  101. * Helpers to save/restore callee-saved regs:
  102. * used by several macros below
  103. *-------------------------------------------------------------*/
  104. .macro SAVE_R13_TO_R24
  105. PUSH r13
  106. PUSH r14
  107. PUSH r15
  108. PUSH r16
  109. PUSH r17
  110. PUSH r18
  111. PUSH r19
  112. PUSH r20
  113. PUSH r21
  114. PUSH r22
  115. PUSH r23
  116. PUSH r24
  117. .endm
  118. .macro RESTORE_R24_TO_R13
  119. POP r24
  120. POP r23
  121. POP r22
  122. POP r21
  123. POP r20
  124. POP r19
  125. POP r18
  126. POP r17
  127. POP r16
  128. POP r15
  129. POP r14
  130. POP r13
  131. .endm
  132. #define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
  133. /*--------------------------------------------------------------
  134. * Collect User Mode callee regs as struct callee_regs - needed by
  135. * fork/do_signal/unaligned-access-emulation.
  136. * (By default only scratch regs are saved on entry to kernel)
  137. *
  138. * Special handling for r25 if used for caching Task Pointer.
  139. * It would have been saved in task->thread.user_r25 already, but to keep
  140. * the interface same it is copied into regular r25 placeholder in
  141. * struct callee_regs.
  142. *-------------------------------------------------------------*/
  143. .macro SAVE_CALLEE_SAVED_USER
  144. SAVE_R13_TO_R24
  145. #ifdef CONFIG_ARC_CURR_IN_REG
  146. ; Retrieve orig r25 and save it on stack
  147. ld.as r12, [sp, OFF_USER_R25_FROM_R24]
  148. st.a r12, [sp, -4]
  149. #else
  150. PUSH r25
  151. #endif
  152. .endm
  153. /*--------------------------------------------------------------
  154. * Save kernel Mode callee regs at the time of Contect Switch.
  155. *
  156. * Special handling for r25 if used for caching Task Pointer.
  157. * Kernel simply skips saving it since it will be loaded with
  158. * incoming task pointer anyways
  159. *-------------------------------------------------------------*/
  160. .macro SAVE_CALLEE_SAVED_KERNEL
  161. SAVE_R13_TO_R24
  162. #ifdef CONFIG_ARC_CURR_IN_REG
  163. sub sp, sp, 4
  164. #else
  165. PUSH r25
  166. #endif
  167. .endm
  168. /*--------------------------------------------------------------
  169. * Opposite of SAVE_CALLEE_SAVED_KERNEL
  170. *-------------------------------------------------------------*/
  171. .macro RESTORE_CALLEE_SAVED_KERNEL
  172. #ifdef CONFIG_ARC_CURR_IN_REG
  173. add sp, sp, 4 /* skip usual r25 placeholder */
  174. #else
  175. POP r25
  176. #endif
  177. RESTORE_R24_TO_R13
  178. .endm
  179. /*--------------------------------------------------------------
  180. * Opposite of SAVE_CALLEE_SAVED_USER
  181. *
  182. * ptrace tracer or unaligned-access fixup might have changed a user mode
  183. * callee reg which is saved back to usual r25 storage location
  184. *-------------------------------------------------------------*/
  185. .macro RESTORE_CALLEE_SAVED_USER
  186. #ifdef CONFIG_ARC_CURR_IN_REG
  187. ld.ab r12, [sp, 4]
  188. st.as r12, [sp, OFF_USER_R25_FROM_R24]
  189. #else
  190. POP r25
  191. #endif
  192. RESTORE_R24_TO_R13
  193. .endm
  194. /*--------------------------------------------------------------
  195. * Super FAST Restore callee saved regs by simply re-adjusting SP
  196. *-------------------------------------------------------------*/
  197. .macro DISCARD_CALLEE_SAVED_USER
  198. add sp, sp, SZ_CALLEE_REGS
  199. .endm
  200. /*-------------------------------------------------------------
  201. * given a tsk struct, get to the base of it's kernel mode stack
  202. * tsk->thread_info is really a PAGE, whose bottom hoists stack
  203. * which grows upwards towards thread_info
  204. *------------------------------------------------------------*/
  205. .macro GET_TSK_STACK_BASE tsk, out
  206. /* Get task->thread_info (this is essentially start of a PAGE) */
  207. ld \out, [\tsk, TASK_THREAD_INFO]
  208. /* Go to end of page where stack begins (grows upwards) */
  209. add2 \out, \out, (THREAD_SIZE)/4
  210. .endm
  211. /*--------------------------------------------------------------
  212. * Switch to Kernel Mode stack if SP points to User Mode stack
  213. *
  214. * Entry : r9 contains pre-IRQ/exception/trap status32
  215. * Exit : SP is set to kernel mode stack pointer
  216. * If CURR_IN_REG, r25 set to "current" task pointer
  217. * Clobbers: r9
  218. *-------------------------------------------------------------*/
  219. .macro SWITCH_TO_KERNEL_STK
  220. /* User Mode when this happened ? Yes: Proceed to switch stack */
  221. bbit1 r9, STATUS_U_BIT, 88f
  222. /* OK we were already in kernel mode when this event happened, thus can
  223. * assume SP is kernel mode SP. _NO_ need to do any stack switching
  224. */
  225. #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
  226. /* However....
  227. * If Level 2 Interrupts enabled, we may end up with a corner case:
  228. * 1. User Task executing
  229. * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
  230. * 3. But before it could switch SP from USER to KERNEL stack
  231. * a L2 IRQ "Interrupts" L1
  232. * Thay way although L2 IRQ happened in Kernel mode, stack is still
  233. * not switched.
  234. * To handle this, we may need to switch stack even if in kernel mode
  235. * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
  236. */
  237. brlo sp, VMALLOC_START, 88f
  238. /* TODO: vineetg:
  239. * We need to be a bit more cautious here. What if a kernel bug in
  240. * L1 ISR, caused SP to go whaco (some small value which looks like
  241. * USER stk) and then we take L2 ISR.
  242. * Above brlo alone would treat it as a valid L1-L2 sceanrio
  243. * instead of shouting alound
  244. * The only feasible way is to make sure this L2 happened in
  245. * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
  246. * L1 ISR before it switches stack
  247. */
  248. #endif
  249. /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
  250. * safe-keeping not really needed, but it keeps the epilogue code
  251. * (SP restore) simpler/uniform.
  252. */
  253. b.d 66f
  254. mov r9, sp
  255. 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
  256. GET_CURR_TASK_ON_CPU r9
  257. /* With current tsk in r9, get it's kernel mode stack base */
  258. GET_TSK_STACK_BASE r9, r9
  259. 66:
  260. #ifdef CONFIG_ARC_CURR_IN_REG
  261. /*
  262. * Treat r25 as scratch reg, save it on stack first
  263. * Load it with current task pointer
  264. */
  265. st r25, [r9, -4]
  266. GET_CURR_TASK_ON_CPU r25
  267. #endif
  268. /* Save Pre Intr/Exception User SP on kernel stack */
  269. st.a sp, [r9, -16] ; Make room for orig_r0, orig_r8, user_r25
  270. /* CAUTION:
  271. * SP should be set at the very end when we are done with everything
  272. * In case of 2 levels of interrupt we depend on value of SP to assume
  273. * that everything else is done (loading r25 etc)
  274. */
  275. /* set SP to point to kernel mode stack */
  276. mov sp, r9
  277. /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
  278. .endm
  279. /*------------------------------------------------------------
  280. * "FAKE" a rtie to return from CPU Exception context
  281. * This is to re-enable Exceptions within exception
  282. * Look at EV_ProtV to see how this is actually used
  283. *-------------------------------------------------------------*/
  284. .macro FAKE_RET_FROM_EXCPN reg
  285. ld \reg, [sp, PT_status32]
  286. bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
  287. bset \reg, \reg, STATUS_L_BIT
  288. sr \reg, [erstatus]
  289. mov \reg, 55f
  290. sr \reg, [eret]
  291. rtie
  292. 55:
  293. .endm
  294. /*
  295. * @reg [OUT] &thread_info of "current"
  296. */
  297. .macro GET_CURR_THR_INFO_FROM_SP reg
  298. bic \reg, sp, (THREAD_SIZE - 1)
  299. .endm
  300. /*
  301. * @reg [OUT] thread_info->flags of "current"
  302. */
  303. .macro GET_CURR_THR_INFO_FLAGS reg
  304. GET_CURR_THR_INFO_FROM_SP \reg
  305. ld \reg, [\reg, THREAD_INFO_FLAGS]
  306. .endm
  307. /*--------------------------------------------------------------
  308. * For early Exception Prologue, a core reg is temporarily needed to
  309. * code the rest of prolog (stack switching). This is done by stashing
  310. * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
  311. *
  312. * Before saving the full regfile - this reg is restored back, only
  313. * to be saved again on kernel mode stack, as part of ptregs.
  314. *-------------------------------------------------------------*/
  315. .macro EXCPN_PROLOG_FREEUP_REG reg
  316. #ifdef CONFIG_SMP
  317. sr \reg, [ARC_REG_SCRATCH_DATA0]
  318. #else
  319. st \reg, [@ex_saved_reg1]
  320. #endif
  321. .endm
  322. .macro EXCPN_PROLOG_RESTORE_REG reg
  323. #ifdef CONFIG_SMP
  324. lr \reg, [ARC_REG_SCRATCH_DATA0]
  325. #else
  326. ld \reg, [@ex_saved_reg1]
  327. #endif
  328. .endm
  329. /*--------------------------------------------------------------
  330. * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
  331. * Requires SP to be already switched to kernel mode Stack
  332. * sp points to the next free element on the stack at exit of this macro.
  333. * Registers are pushed / popped in the order defined in struct ptregs
  334. * in asm/ptrace.h
  335. * Note that syscalls are implemented via TRAP which is also a exception
  336. * from CPU's point of view
  337. *-------------------------------------------------------------*/
  338. .macro SAVE_ALL_EXCEPTION marker
  339. st \marker, [sp, 8] /* orig_r8 */
  340. st r0, [sp, 4] /* orig_r0, needed only for sys calls */
  341. /* Restore r9 used to code the early prologue */
  342. EXCPN_PROLOG_RESTORE_REG r9
  343. SAVE_R0_TO_R12
  344. PUSH gp
  345. PUSH fp
  346. PUSH blink
  347. PUSHAX eret
  348. PUSHAX erstatus
  349. PUSH lp_count
  350. PUSHAX lp_end
  351. PUSHAX lp_start
  352. PUSHAX erbta
  353. .endm
  354. /*--------------------------------------------------------------
  355. * Save scratch regs for exceptions
  356. *-------------------------------------------------------------*/
  357. .macro SAVE_ALL_SYS
  358. SAVE_ALL_EXCEPTION orig_r8_IS_EXCPN
  359. .endm
  360. /*--------------------------------------------------------------
  361. * Save scratch regs for sys calls
  362. *-------------------------------------------------------------*/
  363. .macro SAVE_ALL_TRAP
  364. /*
  365. * Setup pt_regs->orig_r8.
  366. * Encode syscall number (r8) in upper short word of event type (r9)
  367. * N.B. #1: This is already endian safe (see ptrace.h)
  368. * #2: Only r9 can be used as scratch as it is already clobbered
  369. * and it's contents are no longer needed by the latter part
  370. * of exception prologue
  371. */
  372. lsl r9, r8, 16
  373. or r9, r9, orig_r8_IS_SCALL
  374. SAVE_ALL_EXCEPTION r9
  375. .endm
  376. /*--------------------------------------------------------------
  377. * Restore all registers used by system call or Exceptions
  378. * SP should always be pointing to the next free stack element
  379. * when entering this macro.
  380. *
  381. * NOTE:
  382. *
  383. * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
  384. * for memory load operations. If used in that way interrupts are deffered
  385. * by hardware and that is not good.
  386. *-------------------------------------------------------------*/
  387. .macro RESTORE_ALL_SYS
  388. POPAX erbta
  389. POPAX lp_start
  390. POPAX lp_end
  391. POP r9
  392. mov lp_count, r9 ;LD to lp_count is not allowed
  393. POPAX erstatus
  394. POPAX eret
  395. POP blink
  396. POP fp
  397. POP gp
  398. RESTORE_R12_TO_R0
  399. ld sp, [sp] /* restore original sp */
  400. /* orig_r0, orig_r8, user_r25 skipped automatically */
  401. .endm
  402. /*--------------------------------------------------------------
  403. * Save all registers used by interrupt handlers.
  404. *-------------------------------------------------------------*/
  405. .macro SAVE_ALL_INT1
  406. /* restore original r9 to be saved as part of reg-file */
  407. #ifdef CONFIG_SMP
  408. lr r9, [ARC_REG_SCRATCH_DATA0]
  409. #else
  410. ld r9, [@int1_saved_reg]
  411. #endif
  412. /* now we are ready to save the remaining context :) */
  413. st orig_r8_IS_IRQ1, [sp, 8] /* Event Type */
  414. st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
  415. SAVE_R0_TO_R12
  416. PUSH gp
  417. PUSH fp
  418. PUSH blink
  419. PUSH ilink1
  420. PUSHAX status32_l1
  421. PUSH lp_count
  422. PUSHAX lp_end
  423. PUSHAX lp_start
  424. PUSHAX bta_l1
  425. .endm
  426. .macro SAVE_ALL_INT2
  427. /* TODO-vineetg: SMP we can't use global nor can we use
  428. * SCRATCH0 as we do for int1 because while int1 is using
  429. * it, int2 can come
  430. */
  431. /* retsore original r9 , saved in sys_saved_r9 */
  432. ld r9, [@int2_saved_reg]
  433. /* now we are ready to save the remaining context :) */
  434. st orig_r8_IS_IRQ2, [sp, 8] /* Event Type */
  435. st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
  436. SAVE_R0_TO_R12
  437. PUSH gp
  438. PUSH fp
  439. PUSH blink
  440. PUSH ilink2
  441. PUSHAX status32_l2
  442. PUSH lp_count
  443. PUSHAX lp_end
  444. PUSHAX lp_start
  445. PUSHAX bta_l2
  446. .endm
  447. /*--------------------------------------------------------------
  448. * Restore all registers used by interrupt handlers.
  449. *
  450. * NOTE:
  451. *
  452. * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
  453. * for memory load operations. If used in that way interrupts are deffered
  454. * by hardware and that is not good.
  455. *-------------------------------------------------------------*/
  456. .macro RESTORE_ALL_INT1
  457. POPAX bta_l1
  458. POPAX lp_start
  459. POPAX lp_end
  460. POP r9
  461. mov lp_count, r9 ;LD to lp_count is not allowed
  462. POPAX status32_l1
  463. POP ilink1
  464. POP blink
  465. POP fp
  466. POP gp
  467. RESTORE_R12_TO_R0
  468. ld sp, [sp] /* restore original sp */
  469. /* orig_r0, orig_r8, user_r25 skipped automatically */
  470. .endm
  471. .macro RESTORE_ALL_INT2
  472. POPAX bta_l2
  473. POPAX lp_start
  474. POPAX lp_end
  475. POP r9
  476. mov lp_count, r9 ;LD to lp_count is not allowed
  477. POPAX status32_l2
  478. POP ilink2
  479. POP blink
  480. POP fp
  481. POP gp
  482. RESTORE_R12_TO_R0
  483. ld sp, [sp] /* restore original sp */
  484. /* orig_r0, orig_r8, user_r25 skipped automatically */
  485. .endm
  486. /* Get CPU-ID of this core */
  487. .macro GET_CPU_ID reg
  488. lr \reg, [identity]
  489. lsr \reg, \reg, 8
  490. bmsk \reg, \reg, 7
  491. .endm
  492. #ifdef CONFIG_SMP
  493. /*-------------------------------------------------
  494. * Retrieve the current running task on this CPU
  495. * 1. Determine curr CPU id.
  496. * 2. Use it to index into _current_task[ ]
  497. */
  498. .macro GET_CURR_TASK_ON_CPU reg
  499. GET_CPU_ID \reg
  500. ld.as \reg, [@_current_task, \reg]
  501. .endm
  502. /*-------------------------------------------------
  503. * Save a new task as the "current" task on this CPU
  504. * 1. Determine curr CPU id.
  505. * 2. Use it to index into _current_task[ ]
  506. *
  507. * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
  508. * because ST r0, [r1, offset] can ONLY have s9 @offset
  509. * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
  510. */
  511. .macro SET_CURR_TASK_ON_CPU tsk, tmp
  512. GET_CPU_ID \tmp
  513. add2 \tmp, @_current_task, \tmp
  514. st \tsk, [\tmp]
  515. #ifdef CONFIG_ARC_CURR_IN_REG
  516. mov r25, \tsk
  517. #endif
  518. .endm
  519. #else /* Uniprocessor implementation of macros */
  520. .macro GET_CURR_TASK_ON_CPU reg
  521. ld \reg, [@_current_task]
  522. .endm
  523. .macro SET_CURR_TASK_ON_CPU tsk, tmp
  524. st \tsk, [@_current_task]
  525. #ifdef CONFIG_ARC_CURR_IN_REG
  526. mov r25, \tsk
  527. #endif
  528. .endm
  529. #endif /* SMP / UNI */
  530. /* ------------------------------------------------------------------
  531. * Get the ptr to some field of Current Task at @off in task struct
  532. * -Uses r25 for Current task ptr if that is enabled
  533. */
  534. #ifdef CONFIG_ARC_CURR_IN_REG
  535. .macro GET_CURR_TASK_FIELD_PTR off, reg
  536. add \reg, r25, \off
  537. .endm
  538. #else
  539. .macro GET_CURR_TASK_FIELD_PTR off, reg
  540. GET_CURR_TASK_ON_CPU \reg
  541. add \reg, \reg, \off
  542. .endm
  543. #endif /* CONFIG_ARC_CURR_IN_REG */
  544. #endif /* __ASSEMBLY__ */
  545. #endif /* __ASM_ARC_ENTRY_H */