entry.h 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * Vineetg: March 2009 (Supporting 2 levels of Interrupts)
  9. * Stack switching code can no longer reliably rely on the fact that
  10. * if we are NOT in user mode, stack is switched to kernel mode.
  11. * e.g. L2 IRQ interrupted a L1 ISR which had not yet completed
  12. * it's prologue including stack switching from user mode
  13. *
  14. * Vineetg: Aug 28th 2008: Bug #94984
  15. * -Zero Overhead Loop Context shd be cleared when entering IRQ/EXcp/Trap
  16. * Normally CPU does this automatically, however when doing FAKE rtie,
  17. * we also need to explicitly do this. The problem in macros
  18. * FAKE_RET_FROM_EXCPN and FAKE_RET_FROM_EXCPN_LOCK_IRQ was that this bit
  19. * was being "CLEARED" rather then "SET". Actually "SET" clears ZOL context
  20. *
  21. * Vineetg: May 5th 2008
  22. * -Modified CALLEE_REG save/restore macros to handle the fact that
  23. * r25 contains the kernel current task ptr
  24. * - Defined Stack Switching Macro to be reused in all intr/excp hdlrs
  25. * - Shaved off 11 instructions from RESTORE_ALL_INT1 by using the
  26. * address Write back load ld.ab instead of seperate ld/add instn
  27. *
  28. * Amit Bhor, Sameer Dhavale: Codito Technologies 2004
  29. */
  30. #ifndef __ASM_ARC_ENTRY_H
  31. #define __ASM_ARC_ENTRY_H
  32. #ifdef __ASSEMBLY__
  33. #include <asm/unistd.h> /* For NR_syscalls defination */
  34. #include <asm/asm-offsets.h>
  35. #include <asm/arcregs.h>
  36. #include <asm/ptrace.h>
  37. #include <asm/processor.h> /* For VMALLOC_START */
  38. #include <asm/thread_info.h> /* For THREAD_SIZE */
  39. /* Note on the LD/ST addr modes with addr reg wback
  40. *
  41. * LD.a same as LD.aw
  42. *
  43. * LD.a reg1, [reg2, x] => Pre Incr
  44. * Eff Addr for load = [reg2 + x]
  45. *
  46. * LD.ab reg1, [reg2, x] => Post Incr
  47. * Eff Addr for load = [reg2]
  48. */
  49. .macro PUSH reg
  50. st.a \reg, [sp, -4]
  51. .endm
  52. .macro PUSHAX aux
  53. lr r9, [\aux]
  54. PUSH r9
  55. .endm
  56. .macro POP reg
  57. ld.ab \reg, [sp, 4]
  58. .endm
  59. .macro POPAX aux
  60. POP r9
  61. sr r9, [\aux]
  62. .endm
  63. /*--------------------------------------------------------------
  64. * Helpers to save/restore Scratch Regs:
  65. * used by Interrupt/Exception Prologue/Epilogue
  66. *-------------------------------------------------------------*/
  67. .macro SAVE_R0_TO_R12
  68. PUSH r0
  69. PUSH r1
  70. PUSH r2
  71. PUSH r3
  72. PUSH r4
  73. PUSH r5
  74. PUSH r6
  75. PUSH r7
  76. PUSH r8
  77. PUSH r9
  78. PUSH r10
  79. PUSH r11
  80. PUSH r12
  81. .endm
  82. .macro RESTORE_R12_TO_R0
  83. POP r12
  84. POP r11
  85. POP r10
  86. POP r9
  87. POP r8
  88. POP r7
  89. POP r6
  90. POP r5
  91. POP r4
  92. POP r3
  93. POP r2
  94. POP r1
  95. POP r0
  96. #ifdef CONFIG_ARC_CURR_IN_REG
  97. ld r25, [sp, 12]
  98. #endif
  99. .endm
  100. /*--------------------------------------------------------------
  101. * Helpers to save/restore callee-saved regs:
  102. * used by several macros below
  103. *-------------------------------------------------------------*/
  104. .macro SAVE_R13_TO_R24
  105. PUSH r13
  106. PUSH r14
  107. PUSH r15
  108. PUSH r16
  109. PUSH r17
  110. PUSH r18
  111. PUSH r19
  112. PUSH r20
  113. PUSH r21
  114. PUSH r22
  115. PUSH r23
  116. PUSH r24
  117. .endm
  118. .macro RESTORE_R24_TO_R13
  119. POP r24
  120. POP r23
  121. POP r22
  122. POP r21
  123. POP r20
  124. POP r19
  125. POP r18
  126. POP r17
  127. POP r16
  128. POP r15
  129. POP r14
  130. POP r13
  131. .endm
  132. #define OFF_USER_R25_FROM_R24 (SZ_CALLEE_REGS + SZ_PT_REGS - 8)/4
  133. /*--------------------------------------------------------------
  134. * Collect User Mode callee regs as struct callee_regs - needed by
  135. * fork/do_signal/unaligned-access-emulation.
  136. * (By default only scratch regs are saved on entry to kernel)
  137. *
  138. * Special handling for r25 if used for caching Task Pointer.
  139. * It would have been saved in task->thread.user_r25 already, but to keep
  140. * the interface same it is copied into regular r25 placeholder in
  141. * struct callee_regs.
  142. *-------------------------------------------------------------*/
  143. .macro SAVE_CALLEE_SAVED_USER
  144. SAVE_R13_TO_R24
  145. #ifdef CONFIG_ARC_CURR_IN_REG
  146. ; Retrieve orig r25 and save it on stack
  147. ld.as r12, [sp, OFF_USER_R25_FROM_R24]
  148. st.a r12, [sp, -4]
  149. #else
  150. PUSH r25
  151. #endif
  152. .endm
  153. /*--------------------------------------------------------------
  154. * Save kernel Mode callee regs at the time of Contect Switch.
  155. *
  156. * Special handling for r25 if used for caching Task Pointer.
  157. * Kernel simply skips saving it since it will be loaded with
  158. * incoming task pointer anyways
  159. *-------------------------------------------------------------*/
  160. .macro SAVE_CALLEE_SAVED_KERNEL
  161. SAVE_R13_TO_R24
  162. #ifdef CONFIG_ARC_CURR_IN_REG
  163. sub sp, sp, 4
  164. #else
  165. PUSH r25
  166. #endif
  167. .endm
  168. /*--------------------------------------------------------------
  169. * Opposite of SAVE_CALLEE_SAVED_KERNEL
  170. *-------------------------------------------------------------*/
  171. .macro RESTORE_CALLEE_SAVED_KERNEL
  172. #ifdef CONFIG_ARC_CURR_IN_REG
  173. add sp, sp, 4 /* skip usual r25 placeholder */
  174. #else
  175. POP r25
  176. #endif
  177. RESTORE_R24_TO_R13
  178. .endm
  179. /*--------------------------------------------------------------
  180. * Opposite of SAVE_CALLEE_SAVED_USER
  181. *
  182. * ptrace tracer or unaligned-access fixup might have changed a user mode
  183. * callee reg which is saved back to usual r25 storage location
  184. *-------------------------------------------------------------*/
  185. .macro RESTORE_CALLEE_SAVED_USER
  186. #ifdef CONFIG_ARC_CURR_IN_REG
  187. ld.ab r12, [sp, 4]
  188. st.as r12, [sp, OFF_USER_R25_FROM_R24]
  189. #else
  190. POP r25
  191. #endif
  192. RESTORE_R24_TO_R13
  193. .endm
  194. /*--------------------------------------------------------------
  195. * Super FAST Restore callee saved regs by simply re-adjusting SP
  196. *-------------------------------------------------------------*/
  197. .macro DISCARD_CALLEE_SAVED_USER
  198. add sp, sp, SZ_CALLEE_REGS
  199. .endm
  200. /*-------------------------------------------------------------
  201. * given a tsk struct, get to the base of it's kernel mode stack
  202. * tsk->thread_info is really a PAGE, whose bottom hoists stack
  203. * which grows upwards towards thread_info
  204. *------------------------------------------------------------*/
  205. .macro GET_TSK_STACK_BASE tsk, out
  206. /* Get task->thread_info (this is essentially start of a PAGE) */
  207. ld \out, [\tsk, TASK_THREAD_INFO]
  208. /* Go to end of page where stack begins (grows upwards) */
  209. add2 \out, \out, (THREAD_SIZE)/4
  210. .endm
  211. /*--------------------------------------------------------------
  212. * Switch to Kernel Mode stack if SP points to User Mode stack
  213. *
  214. * Entry : r9 contains pre-IRQ/exception/trap status32
  215. * Exit : SP is set to kernel mode stack pointer
  216. * If CURR_IN_REG, r25 set to "current" task pointer
  217. * Clobbers: r9
  218. *-------------------------------------------------------------*/
  219. .macro SWITCH_TO_KERNEL_STK
  220. /* User Mode when this happened ? Yes: Proceed to switch stack */
  221. bbit1 r9, STATUS_U_BIT, 88f
  222. /* OK we were already in kernel mode when this event happened, thus can
  223. * assume SP is kernel mode SP. _NO_ need to do any stack switching
  224. */
  225. #ifdef CONFIG_ARC_COMPACT_IRQ_LEVELS
  226. /* However....
  227. * If Level 2 Interrupts enabled, we may end up with a corner case:
  228. * 1. User Task executing
  229. * 2. L1 IRQ taken, ISR starts (CPU auto-switched to KERNEL mode)
  230. * 3. But before it could switch SP from USER to KERNEL stack
  231. * a L2 IRQ "Interrupts" L1
  232. * Thay way although L2 IRQ happened in Kernel mode, stack is still
  233. * not switched.
  234. * To handle this, we may need to switch stack even if in kernel mode
  235. * provided SP has values in range of USER mode stack ( < 0x7000_0000 )
  236. */
  237. brlo sp, VMALLOC_START, 88f
  238. /* TODO: vineetg:
  239. * We need to be a bit more cautious here. What if a kernel bug in
  240. * L1 ISR, caused SP to go whaco (some small value which looks like
  241. * USER stk) and then we take L2 ISR.
  242. * Above brlo alone would treat it as a valid L1-L2 sceanrio
  243. * instead of shouting alound
  244. * The only feasible way is to make sure this L2 happened in
  245. * L1 prelogue ONLY i.e. ilink2 is less than a pre-set marker in
  246. * L1 ISR before it switches stack
  247. */
  248. #endif
  249. /* Save Pre Intr/Exception KERNEL MODE SP on kernel stack
  250. * safe-keeping not really needed, but it keeps the epilogue code
  251. * (SP restore) simpler/uniform.
  252. */
  253. b.d 66f
  254. mov r9, sp
  255. 88: /*------Intr/Ecxp happened in user mode, "switch" stack ------ */
  256. GET_CURR_TASK_ON_CPU r9
  257. /* With current tsk in r9, get it's kernel mode stack base */
  258. GET_TSK_STACK_BASE r9, r9
  259. 66:
  260. #ifdef CONFIG_ARC_CURR_IN_REG
  261. /*
  262. * Treat r25 as scratch reg, save it on stack first
  263. * Load it with current task pointer
  264. */
  265. st r25, [r9, -4]
  266. GET_CURR_TASK_ON_CPU r25
  267. #endif
  268. /* Save Pre Intr/Exception User SP on kernel stack */
  269. st.a sp, [r9, -16] ; Make room for orig_r0, ECR, user_r25
  270. /* CAUTION:
  271. * SP should be set at the very end when we are done with everything
  272. * In case of 2 levels of interrupt we depend on value of SP to assume
  273. * that everything else is done (loading r25 etc)
  274. */
  275. /* set SP to point to kernel mode stack */
  276. mov sp, r9
  277. /* ----- Stack Switched to kernel Mode, Now save REG FILE ----- */
  278. .endm
  279. /*------------------------------------------------------------
  280. * "FAKE" a rtie to return from CPU Exception context
  281. * This is to re-enable Exceptions within exception
  282. * Look at EV_ProtV to see how this is actually used
  283. *-------------------------------------------------------------*/
  284. .macro FAKE_RET_FROM_EXCPN reg
  285. ld \reg, [sp, PT_status32]
  286. bic \reg, \reg, (STATUS_U_MASK|STATUS_DE_MASK)
  287. bset \reg, \reg, STATUS_L_BIT
  288. sr \reg, [erstatus]
  289. mov \reg, 55f
  290. sr \reg, [eret]
  291. rtie
  292. 55:
  293. .endm
  294. /*
  295. * @reg [OUT] &thread_info of "current"
  296. */
  297. .macro GET_CURR_THR_INFO_FROM_SP reg
  298. bic \reg, sp, (THREAD_SIZE - 1)
  299. .endm
  300. /*
  301. * @reg [OUT] thread_info->flags of "current"
  302. */
  303. .macro GET_CURR_THR_INFO_FLAGS reg
  304. GET_CURR_THR_INFO_FROM_SP \reg
  305. ld \reg, [\reg, THREAD_INFO_FLAGS]
  306. .endm
  307. /*--------------------------------------------------------------
  308. * For early Exception Prologue, a core reg is temporarily needed to
  309. * code the rest of prolog (stack switching). This is done by stashing
  310. * it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
  311. *
  312. * Before saving the full regfile - this reg is restored back, only
  313. * to be saved again on kernel mode stack, as part of ptregs.
  314. *-------------------------------------------------------------*/
  315. .macro EXCPN_PROLOG_FREEUP_REG reg
  316. #ifdef CONFIG_SMP
  317. sr \reg, [ARC_REG_SCRATCH_DATA0]
  318. #else
  319. st \reg, [@ex_saved_reg1]
  320. #endif
  321. .endm
  322. .macro EXCPN_PROLOG_RESTORE_REG reg
  323. #ifdef CONFIG_SMP
  324. lr \reg, [ARC_REG_SCRATCH_DATA0]
  325. #else
  326. ld \reg, [@ex_saved_reg1]
  327. #endif
  328. .endm
  329. /*--------------------------------------------------------------
  330. * Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
  331. * Requires SP to be already switched to kernel mode Stack
  332. * sp points to the next free element on the stack at exit of this macro.
  333. * Registers are pushed / popped in the order defined in struct ptregs
  334. * in asm/ptrace.h
  335. * Note that syscalls are implemented via TRAP which is also a exception
  336. * from CPU's point of view
  337. *-------------------------------------------------------------*/
  338. .macro SAVE_ALL_SYS
  339. lr r9, [ecr]
  340. st r9, [sp, 8] /* ECR */
  341. st r0, [sp, 4] /* orig_r0, needed only for sys calls */
  342. /* Restore r9 used to code the early prologue */
  343. EXCPN_PROLOG_RESTORE_REG r9
  344. SAVE_R0_TO_R12
  345. PUSH gp
  346. PUSH fp
  347. PUSH blink
  348. PUSHAX eret
  349. PUSHAX erstatus
  350. PUSH lp_count
  351. PUSHAX lp_end
  352. PUSHAX lp_start
  353. PUSHAX erbta
  354. .endm
  355. /*--------------------------------------------------------------
  356. * Restore all registers used by system call or Exceptions
  357. * SP should always be pointing to the next free stack element
  358. * when entering this macro.
  359. *
  360. * NOTE:
  361. *
  362. * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
  363. * for memory load operations. If used in that way interrupts are deffered
  364. * by hardware and that is not good.
  365. *-------------------------------------------------------------*/
  366. .macro RESTORE_ALL_SYS
  367. POPAX erbta
  368. POPAX lp_start
  369. POPAX lp_end
  370. POP r9
  371. mov lp_count, r9 ;LD to lp_count is not allowed
  372. POPAX erstatus
  373. POPAX eret
  374. POP blink
  375. POP fp
  376. POP gp
  377. RESTORE_R12_TO_R0
  378. ld sp, [sp] /* restore original sp */
  379. /* orig_r0, ECR, user_r25 skipped automatically */
  380. .endm
  381. /*--------------------------------------------------------------
  382. * Save all registers used by interrupt handlers.
  383. *-------------------------------------------------------------*/
  384. .macro SAVE_ALL_INT1
  385. /* restore original r9 to be saved as part of reg-file */
  386. #ifdef CONFIG_SMP
  387. lr r9, [ARC_REG_SCRATCH_DATA0]
  388. #else
  389. ld r9, [@int1_saved_reg]
  390. #endif
  391. /* now we are ready to save the remaining context :) */
  392. st event_IRQ1, [sp, 8] /* Dummy ECR */
  393. st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
  394. SAVE_R0_TO_R12
  395. PUSH gp
  396. PUSH fp
  397. PUSH blink
  398. PUSH ilink1
  399. PUSHAX status32_l1
  400. PUSH lp_count
  401. PUSHAX lp_end
  402. PUSHAX lp_start
  403. PUSHAX bta_l1
  404. .endm
  405. .macro SAVE_ALL_INT2
  406. /* TODO-vineetg: SMP we can't use global nor can we use
  407. * SCRATCH0 as we do for int1 because while int1 is using
  408. * it, int2 can come
  409. */
  410. /* retsore original r9 , saved in sys_saved_r9 */
  411. ld r9, [@int2_saved_reg]
  412. /* now we are ready to save the remaining context :) */
  413. st event_IRQ2, [sp, 8] /* Dummy ECR */
  414. st 0, [sp, 4] /* orig_r0 , N/A for IRQ */
  415. SAVE_R0_TO_R12
  416. PUSH gp
  417. PUSH fp
  418. PUSH blink
  419. PUSH ilink2
  420. PUSHAX status32_l2
  421. PUSH lp_count
  422. PUSHAX lp_end
  423. PUSHAX lp_start
  424. PUSHAX bta_l2
  425. .endm
  426. /*--------------------------------------------------------------
  427. * Restore all registers used by interrupt handlers.
  428. *
  429. * NOTE:
  430. *
  431. * It is recommended that lp_count/ilink1/ilink2 not be used as a dest reg
  432. * for memory load operations. If used in that way interrupts are deffered
  433. * by hardware and that is not good.
  434. *-------------------------------------------------------------*/
  435. .macro RESTORE_ALL_INT1
  436. POPAX bta_l1
  437. POPAX lp_start
  438. POPAX lp_end
  439. POP r9
  440. mov lp_count, r9 ;LD to lp_count is not allowed
  441. POPAX status32_l1
  442. POP ilink1
  443. POP blink
  444. POP fp
  445. POP gp
  446. RESTORE_R12_TO_R0
  447. ld sp, [sp] /* restore original sp */
  448. /* orig_r0, ECR, user_r25 skipped automatically */
  449. .endm
  450. .macro RESTORE_ALL_INT2
  451. POPAX bta_l2
  452. POPAX lp_start
  453. POPAX lp_end
  454. POP r9
  455. mov lp_count, r9 ;LD to lp_count is not allowed
  456. POPAX status32_l2
  457. POP ilink2
  458. POP blink
  459. POP fp
  460. POP gp
  461. RESTORE_R12_TO_R0
  462. ld sp, [sp] /* restore original sp */
  463. /* orig_r0, ECR, user_r25 skipped automatically */
  464. .endm
  465. /* Get CPU-ID of this core */
  466. .macro GET_CPU_ID reg
  467. lr \reg, [identity]
  468. lsr \reg, \reg, 8
  469. bmsk \reg, \reg, 7
  470. .endm
  471. #ifdef CONFIG_SMP
  472. /*-------------------------------------------------
  473. * Retrieve the current running task on this CPU
  474. * 1. Determine curr CPU id.
  475. * 2. Use it to index into _current_task[ ]
  476. */
  477. .macro GET_CURR_TASK_ON_CPU reg
  478. GET_CPU_ID \reg
  479. ld.as \reg, [@_current_task, \reg]
  480. .endm
  481. /*-------------------------------------------------
  482. * Save a new task as the "current" task on this CPU
  483. * 1. Determine curr CPU id.
  484. * 2. Use it to index into _current_task[ ]
  485. *
  486. * Coded differently than GET_CURR_TASK_ON_CPU (which uses LD.AS)
  487. * because ST r0, [r1, offset] can ONLY have s9 @offset
  488. * while LD can take s9 (4 byte insn) or LIMM (8 byte insn)
  489. */
  490. .macro SET_CURR_TASK_ON_CPU tsk, tmp
  491. GET_CPU_ID \tmp
  492. add2 \tmp, @_current_task, \tmp
  493. st \tsk, [\tmp]
  494. #ifdef CONFIG_ARC_CURR_IN_REG
  495. mov r25, \tsk
  496. #endif
  497. .endm
  498. #else /* Uniprocessor implementation of macros */
  499. .macro GET_CURR_TASK_ON_CPU reg
  500. ld \reg, [@_current_task]
  501. .endm
  502. .macro SET_CURR_TASK_ON_CPU tsk, tmp
  503. st \tsk, [@_current_task]
  504. #ifdef CONFIG_ARC_CURR_IN_REG
  505. mov r25, \tsk
  506. #endif
  507. .endm
  508. #endif /* SMP / UNI */
  509. /* ------------------------------------------------------------------
  510. * Get the ptr to some field of Current Task at @off in task struct
  511. * -Uses r25 for Current task ptr if that is enabled
  512. */
  513. #ifdef CONFIG_ARC_CURR_IN_REG
  514. .macro GET_CURR_TASK_FIELD_PTR off, reg
  515. add \reg, r25, \off
  516. .endm
  517. #else
  518. .macro GET_CURR_TASK_FIELD_PTR off, reg
  519. GET_CURR_TASK_ON_CPU \reg
  520. add \reg, \reg, \off
  521. .endm
  522. #endif /* CONFIG_ARC_CURR_IN_REG */
  523. #endif /* __ASSEMBLY__ */
  524. #endif /* __ASM_ARC_ENTRY_H */