entry.S 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. /* The size of a state save frame. */
  31. #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
  32. /* The offset of the struct pt_regs in a `state save frame' on the stack. */
  33. #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
  34. #define C_ENTRY(name) .globl name; .align 4; name
  35. /*
  36. * Various ways of setting and clearing BIP in flags reg.
  37. * This is mucky, but necessary using microblaze version that
  38. * allows msr ops to write to BIP
  39. */
  40. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  41. .macro clear_bip
  42. msrclr r0, MSR_BIP
  43. .endm
  44. .macro set_bip
  45. msrset r0, MSR_BIP
  46. .endm
  47. .macro clear_eip
  48. msrclr r0, MSR_EIP
  49. .endm
  50. .macro set_ee
  51. msrset r0, MSR_EE
  52. .endm
  53. .macro disable_irq
  54. msrclr r0, MSR_IE
  55. .endm
  56. .macro enable_irq
  57. msrset r0, MSR_IE
  58. .endm
  59. .macro set_ums
  60. msrset r0, MSR_UMS
  61. msrclr r0, MSR_VMS
  62. .endm
  63. .macro set_vms
  64. msrclr r0, MSR_UMS
  65. msrset r0, MSR_VMS
  66. .endm
  67. .macro clear_ums
  68. msrclr r0, MSR_UMS
  69. .endm
  70. .macro clear_vms_ums
  71. msrclr r0, MSR_VMS | MSR_UMS
  72. .endm
  73. #else
  74. .macro clear_bip
  75. mfs r11, rmsr
  76. andi r11, r11, ~MSR_BIP
  77. mts rmsr, r11
  78. .endm
  79. .macro set_bip
  80. mfs r11, rmsr
  81. ori r11, r11, MSR_BIP
  82. mts rmsr, r11
  83. .endm
  84. .macro clear_eip
  85. mfs r11, rmsr
  86. andi r11, r11, ~MSR_EIP
  87. mts rmsr, r11
  88. .endm
  89. .macro set_ee
  90. mfs r11, rmsr
  91. ori r11, r11, MSR_EE
  92. mts rmsr, r11
  93. .endm
  94. .macro disable_irq
  95. mfs r11, rmsr
  96. andi r11, r11, ~MSR_IE
  97. mts rmsr, r11
  98. .endm
  99. .macro enable_irq
  100. mfs r11, rmsr
  101. ori r11, r11, MSR_IE
  102. mts rmsr, r11
  103. .endm
  104. .macro set_ums
  105. mfs r11, rmsr
  106. ori r11, r11, MSR_VMS
  107. andni r11, r11, MSR_UMS
  108. mts rmsr, r11
  109. .endm
  110. .macro set_vms
  111. mfs r11, rmsr
  112. ori r11, r11, MSR_VMS
  113. andni r11, r11, MSR_UMS
  114. mts rmsr, r11
  115. .endm
  116. .macro clear_ums
  117. mfs r11, rmsr
  118. andni r11, r11, MSR_UMS
  119. mts rmsr,r11
  120. .endm
  121. .macro clear_vms_ums
  122. mfs r11, rmsr
  123. andni r11, r11, (MSR_VMS|MSR_UMS)
  124. mts rmsr,r11
  125. .endm
  126. #endif
  127. /* Define how to call high-level functions. With MMU, virtual mode must be
  128. * enabled when calling the high-level function. Clobbers R11.
  129. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  130. */
  131. /* turn on virtual protected mode save */
  132. #define VM_ON \
  133. set_ums; \
  134. rted r0, 2f; \
  135. nop; \
  136. 2:
  137. /* turn off virtual protected mode save and user mode save*/
  138. #define VM_OFF \
  139. clear_vms_ums; \
  140. rted r0, TOPHYS(1f); \
  141. nop; \
  142. 1:
  143. #define SAVE_REGS \
  144. swi r2, r1, PTO+PT_R2; /* Save SDA */ \
  145. swi r3, r1, PTO+PT_R3; \
  146. swi r4, r1, PTO+PT_R4; \
  147. swi r5, r1, PTO+PT_R5; \
  148. swi r6, r1, PTO+PT_R6; \
  149. swi r7, r1, PTO+PT_R7; \
  150. swi r8, r1, PTO+PT_R8; \
  151. swi r9, r1, PTO+PT_R9; \
  152. swi r10, r1, PTO+PT_R10; \
  153. swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
  154. swi r12, r1, PTO+PT_R12; \
  155. swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
  156. swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
  157. swi r15, r1, PTO+PT_R15; /* Save LP */ \
  158. swi r16, r1, PTO+PT_R16; \
  159. swi r17, r1, PTO+PT_R17; \
  160. swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
  161. swi r19, r1, PTO+PT_R19; \
  162. swi r20, r1, PTO+PT_R20; \
  163. swi r21, r1, PTO+PT_R21; \
  164. swi r22, r1, PTO+PT_R22; \
  165. swi r23, r1, PTO+PT_R23; \
  166. swi r24, r1, PTO+PT_R24; \
  167. swi r25, r1, PTO+PT_R25; \
  168. swi r26, r1, PTO+PT_R26; \
  169. swi r27, r1, PTO+PT_R27; \
  170. swi r28, r1, PTO+PT_R28; \
  171. swi r29, r1, PTO+PT_R29; \
  172. swi r30, r1, PTO+PT_R30; \
  173. swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
  174. mfs r11, rmsr; /* save MSR */ \
  175. swi r11, r1, PTO+PT_MSR;
  176. #define RESTORE_REGS \
  177. lwi r11, r1, PTO+PT_MSR; \
  178. mts rmsr , r11; \
  179. lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
  180. lwi r3, r1, PTO+PT_R3; \
  181. lwi r4, r1, PTO+PT_R4; \
  182. lwi r5, r1, PTO+PT_R5; \
  183. lwi r6, r1, PTO+PT_R6; \
  184. lwi r7, r1, PTO+PT_R7; \
  185. lwi r8, r1, PTO+PT_R8; \
  186. lwi r9, r1, PTO+PT_R9; \
  187. lwi r10, r1, PTO+PT_R10; \
  188. lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
  189. lwi r12, r1, PTO+PT_R12; \
  190. lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
  191. lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  192. lwi r15, r1, PTO+PT_R15; /* restore LP */ \
  193. lwi r16, r1, PTO+PT_R16; \
  194. lwi r17, r1, PTO+PT_R17; \
  195. lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
  196. lwi r19, r1, PTO+PT_R19; \
  197. lwi r20, r1, PTO+PT_R20; \
  198. lwi r21, r1, PTO+PT_R21; \
  199. lwi r22, r1, PTO+PT_R22; \
  200. lwi r23, r1, PTO+PT_R23; \
  201. lwi r24, r1, PTO+PT_R24; \
  202. lwi r25, r1, PTO+PT_R25; \
  203. lwi r26, r1, PTO+PT_R26; \
  204. lwi r27, r1, PTO+PT_R27; \
  205. lwi r28, r1, PTO+PT_R28; \
  206. lwi r29, r1, PTO+PT_R29; \
  207. lwi r30, r1, PTO+PT_R30; \
  208. lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
  209. #define SAVE_STATE \
  210. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  211. /* See if already in kernel mode.*/ \
  212. mfs r1, rmsr; \
  213. andi r1, r1, MSR_UMS; \
  214. bnei r1, 1f; \
  215. /* Kernel-mode state save. */ \
  216. /* Reload kernel stack-ptr. */ \
  217. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  218. /* FIXME: I can add these two lines to one */ \
  219. /* tophys(r1,r1); */ \
  220. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  221. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  222. SAVE_REGS \
  223. brid 2f; \
  224. swi r1, r1, PTO+PT_MODE; \
  225. 1: /* User-mode state save. */ \
  226. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  227. tophys(r1,r1); \
  228. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  229. /* MS these three instructions can be added to one */ \
  230. /* addik r1, r1, THREAD_SIZE; */ \
  231. /* tophys(r1,r1); */ \
  232. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  233. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  234. SAVE_REGS \
  235. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  236. swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
  237. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
  238. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  239. clear_ums; \
  240. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  241. .text
  242. /*
  243. * User trap.
  244. *
  245. * System calls are handled here.
  246. *
  247. * Syscall protocol:
  248. * Syscall number in r12, args in r5-r10
  249. * Return value in r3
  250. *
  251. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  252. * are masked. This is nice, means we don't have to CLI before state save
  253. */
  254. C_ENTRY(_user_exception):
  255. addi r14, r14, 4 /* return address is 4 byte after call */
  256. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  257. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  258. tophys(r1,r1);
  259. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  260. /* MS these three instructions can be added to one */
  261. /* addik r1, r1, THREAD_SIZE; */
  262. /* tophys(r1,r1); */
  263. /* addik r1, r1, -STATE_SAVE_SIZE; */
  264. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
  265. SAVE_REGS
  266. swi r0, r1, PTO + PT_R3
  267. swi r0, r1, PTO + PT_R4
  268. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  269. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  270. clear_ums;
  271. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  272. /* Save away the syscall number. */
  273. swi r12, r1, PTO+PT_R0;
  274. tovirt(r1,r1)
  275. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  276. /* Jump to the appropriate function for the system call number in r12
  277. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  278. * register should point to the location where
  279. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  280. /* Step into virtual mode */
  281. rtbd r0, 3f
  282. nop
  283. 3:
  284. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  285. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  286. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  287. beqi r11, 4f
  288. addik r3, r0, -ENOSYS
  289. swi r3, r1, PTO + PT_R3
  290. brlid r15, do_syscall_trace_enter
  291. addik r5, r1, PTO + PT_R0
  292. # do_syscall_trace_enter returns the new syscall nr.
  293. addk r12, r0, r3
  294. lwi r5, r1, PTO+PT_R5;
  295. lwi r6, r1, PTO+PT_R6;
  296. lwi r7, r1, PTO+PT_R7;
  297. lwi r8, r1, PTO+PT_R8;
  298. lwi r9, r1, PTO+PT_R9;
  299. lwi r10, r1, PTO+PT_R10;
  300. 4:
  301. /* Jump to the appropriate function for the system call number in r12
  302. * (r12 is not preserved), or return an error if r12 is not valid.
  303. * The LP register should point to the location where the called function
  304. * should return. [note that MAKE_SYS_CALL uses label 1] */
  305. /* See if the system call number is valid */
  306. addi r11, r12, -__NR_syscalls;
  307. bgei r11,5f;
  308. /* Figure out which function to use for this system call. */
  309. /* Note Microblaze barrel shift is optional, so don't rely on it */
  310. add r12, r12, r12; /* convert num -> ptr */
  311. add r12, r12, r12;
  312. #ifdef DEBUG
  313. /* Trac syscalls and stored them to r0_ram */
  314. lwi r3, r12, 0x400 + r0_ram
  315. addi r3, r3, 1
  316. swi r3, r12, 0x400 + r0_ram
  317. #endif
  318. # Find and jump into the syscall handler.
  319. lwi r12, r12, sys_call_table
  320. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  321. addi r15, r0, ret_from_trap-8
  322. bra r12
  323. /* The syscall number is invalid, return an error. */
  324. 5:
  325. rtsd r15, 8; /* looks like a normal subroutine return */
  326. addi r3, r0, -ENOSYS;
  327. /* Entry point used to return from a syscall/trap */
  328. /* We re-enable BIP bit before state restore */
  329. C_ENTRY(ret_from_trap):
  330. swi r3, r1, PTO + PT_R3
  331. swi r4, r1, PTO + PT_R4
  332. /* We're returning to user mode, so check for various conditions that
  333. * trigger rescheduling. */
  334. /* FIXME: Restructure all these flag checks. */
  335. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  336. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  337. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  338. beqi r11, 1f
  339. brlid r15, do_syscall_trace_leave
  340. addik r5, r1, PTO + PT_R0
  341. 1:
  342. /* We're returning to user mode, so check for various conditions that
  343. * trigger rescheduling. */
  344. /* get thread info from current task */
  345. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  346. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  347. andi r11, r11, _TIF_NEED_RESCHED;
  348. beqi r11, 5f;
  349. bralid r15, schedule; /* Call scheduler */
  350. nop; /* delay slot */
  351. /* Maybe handle a signal */
  352. 5: /* get thread info from current task*/
  353. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  354. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  355. andi r11, r11, _TIF_SIGPENDING;
  356. beqi r11, 1f; /* Signals to handle, handle them */
  357. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  358. addi r7, r0, 1; /* Arg 3: int in_syscall */
  359. bralid r15, do_signal; /* Handle any signals */
  360. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  361. /* Finally, return to user state. */
  362. 1: set_bip; /* Ints masked for state restore */
  363. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  364. VM_OFF;
  365. tophys(r1,r1);
  366. RESTORE_REGS;
  367. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  368. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  369. TRAP_return: /* Make global symbol for debugging */
  370. rtbd r14, 0; /* Instructions to return from an IRQ */
  371. nop;
  372. /* These syscalls need access to the struct pt_regs on the stack, so we
  373. implement them in assembly (they're basically all wrappers anyway). */
  374. C_ENTRY(sys_fork_wrapper):
  375. addi r5, r0, SIGCHLD /* Arg 0: flags */
  376. lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
  377. addik r7, r1, PTO /* Arg 2: parent context */
  378. add r8. r0, r0 /* Arg 3: (unused) */
  379. add r9, r0, r0; /* Arg 4: (unused) */
  380. brid do_fork /* Do real work (tail-call) */
  381. add r10, r0, r0; /* Arg 5: (unused) */
  382. /* This the initial entry point for a new child thread, with an appropriate
  383. stack in place that makes it look the the child is in the middle of an
  384. syscall. This function is actually `returned to' from switch_thread
  385. (copy_thread makes ret_from_fork the return address in each new thread's
  386. saved context). */
  387. C_ENTRY(ret_from_fork):
  388. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  389. add r3, r5, r0; /* switch_thread returns the prev task */
  390. /* ( in the delay slot ) */
  391. brid ret_from_trap; /* Do normal trap return */
  392. add r3, r0, r0; /* Child's fork call should return 0. */
  393. C_ENTRY(sys_vfork):
  394. brid microblaze_vfork /* Do real work (tail-call) */
  395. addik r5, r1, PTO
  396. C_ENTRY(sys_clone):
  397. bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
  398. lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
  399. 1: addik r7, r1, PTO; /* Arg 2: parent context */
  400. add r8, r0, r0; /* Arg 3: (unused) */
  401. add r9, r0, r0; /* Arg 4: (unused) */
  402. brid do_fork /* Do real work (tail-call) */
  403. add r10, r0, r0; /* Arg 5: (unused) */
  404. C_ENTRY(sys_execve):
  405. brid microblaze_execve; /* Do real work (tail-call).*/
  406. addik r8, r1, PTO; /* add user context as 4th arg */
  407. C_ENTRY(sys_rt_sigreturn_wrapper):
  408. brid sys_rt_sigreturn /* Do real work */
  409. addik r5, r1, PTO; /* add user context as 1st arg */
  410. /*
  411. * HW EXCEPTION rutine start
  412. */
  413. C_ENTRY(full_exception_trap):
  414. /* adjust exception address for privileged instruction
  415. * for finding where is it */
  416. addik r17, r17, -4
  417. SAVE_STATE /* Save registers */
  418. /* PC, before IRQ/trap - this is one instruction above */
  419. swi r17, r1, PTO+PT_PC;
  420. tovirt(r1,r1)
  421. /* FIXME this can be store directly in PT_ESR reg.
  422. * I tested it but there is a fault */
  423. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  424. addik r15, r0, ret_from_exc - 8
  425. mfs r6, resr
  426. mfs r7, rfsr; /* save FSR */
  427. mts rfsr, r0; /* Clear sticky fsr */
  428. rted r0, full_exception
  429. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  430. /*
  431. * Unaligned data trap.
  432. *
  433. * Unaligned data trap last on 4k page is handled here.
  434. *
  435. * Trap entered via exception, so EE bit is set, and interrupts
  436. * are masked. This is nice, means we don't have to CLI before state save
  437. *
  438. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  439. */
  440. C_ENTRY(unaligned_data_trap):
  441. /* MS: I have to save r11 value and then restore it because
  442. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  443. * instructions are not used. We don't need to do if MSR instructions
  444. * are used and they use r0 instead of r11.
  445. * I am using ENTRY_SP which should be primary used only for stack
  446. * pointer saving. */
  447. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  448. set_bip; /* equalize initial state for all possible entries */
  449. clear_eip;
  450. set_ee;
  451. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  452. SAVE_STATE /* Save registers.*/
  453. /* PC, before IRQ/trap - this is one instruction above */
  454. swi r17, r1, PTO+PT_PC;
  455. tovirt(r1,r1)
  456. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  457. addik r15, r0, ret_from_exc-8
  458. mfs r3, resr /* ESR */
  459. mfs r4, rear /* EAR */
  460. rtbd r0, _unaligned_data_exception
  461. addik r7, r1, PTO /* parameter struct pt_regs * regs */
  462. /*
  463. * Page fault traps.
  464. *
  465. * If the real exception handler (from hw_exception_handler.S) didn't find
  466. * the mapping for the process, then we're thrown here to handle such situation.
  467. *
  468. * Trap entered via exceptions, so EE bit is set, and interrupts
  469. * are masked. This is nice, means we don't have to CLI before state save
  470. *
  471. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  472. * will bail out to this point if they can't resolve the lightweight TLB fault.
  473. *
  474. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  475. * void do_page_fault(struct pt_regs *regs,
  476. * unsigned long address,
  477. * unsigned long error_code)
  478. */
  479. /* data and intruction trap - which is choose is resolved int fault.c */
  480. C_ENTRY(page_fault_data_trap):
  481. SAVE_STATE /* Save registers.*/
  482. /* PC, before IRQ/trap - this is one instruction above */
  483. swi r17, r1, PTO+PT_PC;
  484. tovirt(r1,r1)
  485. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  486. addik r15, r0, ret_from_exc-8
  487. mfs r6, rear /* parameter unsigned long address */
  488. mfs r7, resr /* parameter unsigned long error_code */
  489. rted r0, do_page_fault
  490. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  491. C_ENTRY(page_fault_instr_trap):
  492. SAVE_STATE /* Save registers.*/
  493. /* PC, before IRQ/trap - this is one instruction above */
  494. swi r17, r1, PTO+PT_PC;
  495. tovirt(r1,r1)
  496. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  497. addik r15, r0, ret_from_exc-8
  498. mfs r6, rear /* parameter unsigned long address */
  499. ori r7, r0, 0 /* parameter unsigned long error_code */
  500. rted r0, do_page_fault
  501. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  502. /* Entry point used to return from an exception. */
  503. C_ENTRY(ret_from_exc):
  504. lwi r11, r1, PTO + PT_MODE;
  505. bnei r11, 2f; /* See if returning to kernel mode, */
  506. /* ... if so, skip resched &c. */
  507. /* We're returning to user mode, so check for various conditions that
  508. trigger rescheduling. */
  509. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  510. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  511. andi r11, r11, _TIF_NEED_RESCHED;
  512. beqi r11, 5f;
  513. /* Call the scheduler before returning from a syscall/trap. */
  514. bralid r15, schedule; /* Call scheduler */
  515. nop; /* delay slot */
  516. /* Maybe handle a signal */
  517. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  518. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  519. andi r11, r11, _TIF_SIGPENDING;
  520. beqi r11, 1f; /* Signals to handle, handle them */
  521. /*
  522. * Handle a signal return; Pending signals should be in r18.
  523. *
  524. * Not all registers are saved by the normal trap/interrupt entry
  525. * points (for instance, call-saved registers (because the normal
  526. * C-compiler calling sequence in the kernel makes sure they're
  527. * preserved), and call-clobbered registers in the case of
  528. * traps), but signal handlers may want to examine or change the
  529. * complete register state. Here we save anything not saved by
  530. * the normal entry sequence, so that it may be safely restored
  531. * (in a possibly modified form) after do_signal returns. */
  532. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  533. addi r7, r0, 0; /* Arg 3: int in_syscall */
  534. bralid r15, do_signal; /* Handle any signals */
  535. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  536. /* Finally, return to user state. */
  537. 1: set_bip; /* Ints masked for state restore */
  538. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  539. VM_OFF;
  540. tophys(r1,r1);
  541. RESTORE_REGS;
  542. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  543. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  544. bri 6f;
  545. /* Return to kernel state. */
  546. 2: set_bip; /* Ints masked for state restore */
  547. VM_OFF;
  548. tophys(r1,r1);
  549. RESTORE_REGS;
  550. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  551. tovirt(r1,r1);
  552. 6:
  553. EXC_return: /* Make global symbol for debugging */
  554. rtbd r14, 0; /* Instructions to return from an IRQ */
  555. nop;
  556. /*
  557. * HW EXCEPTION rutine end
  558. */
  559. /*
  560. * Hardware maskable interrupts.
  561. *
  562. * The stack-pointer (r1) should have already been saved to the memory
  563. * location PER_CPU(ENTRY_SP).
  564. */
  565. C_ENTRY(_interrupt):
  566. /* MS: we are in physical address */
  567. /* Save registers, switch to proper stack, convert SP to virtual.*/
  568. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  569. /* MS: See if already in kernel mode. */
  570. mfs r1, rmsr
  571. nop
  572. andi r1, r1, MSR_UMS
  573. bnei r1, 1f
  574. /* Kernel-mode state save. */
  575. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  576. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  577. /* save registers */
  578. /* MS: Make room on the stack -> activation record */
  579. addik r1, r1, -STATE_SAVE_SIZE;
  580. SAVE_REGS
  581. brid 2f;
  582. swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
  583. 1:
  584. /* User-mode state save. */
  585. /* MS: get the saved current */
  586. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  587. tophys(r1,r1);
  588. lwi r1, r1, TS_THREAD_INFO;
  589. addik r1, r1, THREAD_SIZE;
  590. tophys(r1,r1);
  591. /* save registers */
  592. addik r1, r1, -STATE_SAVE_SIZE;
  593. SAVE_REGS
  594. /* calculate mode */
  595. swi r0, r1, PTO + PT_MODE;
  596. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  597. swi r11, r1, PTO+PT_R1;
  598. clear_ums;
  599. 2:
  600. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  601. tovirt(r1,r1)
  602. addik r15, r0, irq_call;
  603. irq_call:rtbd r0, do_IRQ;
  604. addik r5, r1, PTO;
  605. /* MS: we are in virtual mode */
  606. ret_from_irq:
  607. lwi r11, r1, PTO + PT_MODE;
  608. bnei r11, 2f;
  609. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  610. lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
  611. andi r11, r11, _TIF_NEED_RESCHED;
  612. beqi r11, 5f
  613. bralid r15, schedule;
  614. nop; /* delay slot */
  615. /* Maybe handle a signal */
  616. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
  617. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  618. andi r11, r11, _TIF_SIGPENDING;
  619. beqid r11, no_intr_resched
  620. /* Handle a signal return; Pending signals should be in r18. */
  621. addi r7, r0, 0; /* Arg 3: int in_syscall */
  622. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  623. bralid r15, do_signal; /* Handle any signals */
  624. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  625. /* Finally, return to user state. */
  626. no_intr_resched:
  627. /* Disable interrupts, we are now committed to the state restore */
  628. disable_irq
  629. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  630. VM_OFF;
  631. tophys(r1,r1);
  632. RESTORE_REGS
  633. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  634. lwi r1, r1, PT_R1 - PT_SIZE;
  635. bri 6f;
  636. /* MS: Return to kernel state. */
  637. 2:
  638. #ifdef CONFIG_PREEMPT
  639. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  640. /* MS: get preempt_count from thread info */
  641. lwi r5, r11, TI_PREEMPT_COUNT;
  642. bgti r5, restore;
  643. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  644. andi r5, r5, _TIF_NEED_RESCHED;
  645. beqi r5, restore /* if zero jump over */
  646. preempt:
  647. /* interrupts are off that's why I am calling preempt_chedule_irq */
  648. bralid r15, preempt_schedule_irq
  649. nop
  650. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  651. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  652. andi r5, r5, _TIF_NEED_RESCHED;
  653. bnei r5, preempt /* if non zero jump to resched */
  654. restore:
  655. #endif
  656. VM_OFF /* MS: turn off MMU */
  657. tophys(r1,r1)
  658. RESTORE_REGS
  659. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  660. tovirt(r1,r1);
  661. 6:
  662. IRQ_return: /* MS: Make global symbol for debugging */
  663. rtid r14, 0
  664. nop
  665. /*
  666. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  667. * and call handling function with saved pt_regs
  668. */
  669. C_ENTRY(_debug_exception):
  670. /* BIP bit is set on entry, no interrupts can occur */
  671. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  672. mfs r1, rmsr
  673. nop
  674. andi r1, r1, MSR_UMS
  675. bnei r1, 1f
  676. /* MS: Kernel-mode state save - kgdb */
  677. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  678. /* BIP bit is set on entry, no interrupts can occur */
  679. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
  680. SAVE_REGS;
  681. /* save all regs to pt_reg structure */
  682. swi r0, r1, PTO+PT_R0; /* R0 must be saved too */
  683. swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */
  684. swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */
  685. /* save special purpose registers to pt_regs */
  686. mfs r11, rear;
  687. swi r11, r1, PTO+PT_EAR;
  688. mfs r11, resr;
  689. swi r11, r1, PTO+PT_ESR;
  690. mfs r11, rfsr;
  691. swi r11, r1, PTO+PT_FSR;
  692. /* stack pointer is in physical address at it is decrease
  693. * by STATE_SAVE_SIZE but we need to get correct R1 value */
  694. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE;
  695. swi r11, r1, PTO+PT_R1
  696. /* MS: r31 - current pointer isn't changed */
  697. tovirt(r1,r1)
  698. #ifdef CONFIG_KGDB
  699. addi r5, r1, PTO /* pass pt_reg address as the first arg */
  700. la r15, r0, dbtrap_call; /* return address */
  701. rtbd r0, microblaze_kgdb_break
  702. nop;
  703. #endif
  704. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  705. * It is very unlikely that another brki instruction is called. */
  706. bri 0
  707. /* MS: User-mode state save - gdb */
  708. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  709. tophys(r1,r1);
  710. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  711. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  712. tophys(r1,r1);
  713. addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
  714. SAVE_REGS;
  715. swi r16, r1, PTO+PT_PC; /* Save LP */
  716. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
  717. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  718. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  719. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  720. tovirt(r1,r1)
  721. set_vms;
  722. addik r5, r1, PTO;
  723. addik r15, r0, dbtrap_call;
  724. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  725. rtbd r0, sw_exception
  726. nop
  727. /* MS: The first instruction for the second part of the gdb/kgdb */
  728. set_bip; /* Ints masked for state restore */
  729. lwi r11, r1, PTO + PT_MODE;
  730. bnei r11, 2f;
  731. /* MS: Return to user space - gdb */
  732. /* Get current task ptr into r11 */
  733. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  734. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  735. andi r11, r11, _TIF_NEED_RESCHED;
  736. beqi r11, 5f;
  737. /* Call the scheduler before returning from a syscall/trap. */
  738. bralid r15, schedule; /* Call scheduler */
  739. nop; /* delay slot */
  740. /* Maybe handle a signal */
  741. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  742. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  743. andi r11, r11, _TIF_SIGPENDING;
  744. beqi r11, 1f; /* Signals to handle, handle them */
  745. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  746. addi r7, r0, 0; /* Arg 3: int in_syscall */
  747. bralid r15, do_signal; /* Handle any signals */
  748. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  749. /* Finally, return to user state. */
  750. 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  751. VM_OFF;
  752. tophys(r1,r1);
  753. /* MS: Restore all regs */
  754. RESTORE_REGS
  755. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */
  756. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  757. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  758. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  759. nop;
  760. /* MS: Return to kernel state - kgdb */
  761. 2: VM_OFF;
  762. tophys(r1,r1);
  763. /* MS: Restore all regs */
  764. RESTORE_REGS
  765. lwi r14, r1, PTO+PT_R14;
  766. lwi r16, r1, PTO+PT_PC;
  767. addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
  768. tovirt(r1,r1);
  769. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  770. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  771. nop;
  772. ENTRY(_switch_to)
  773. /* prepare return value */
  774. addk r3, r0, CURRENT_TASK
  775. /* save registers in cpu_context */
  776. /* use r11 and r12, volatile registers, as temp register */
  777. /* give start of cpu_context for previous process */
  778. addik r11, r5, TI_CPU_CONTEXT
  779. swi r1, r11, CC_R1
  780. swi r2, r11, CC_R2
  781. /* skip volatile registers.
  782. * they are saved on stack when we jumped to _switch_to() */
  783. /* dedicated registers */
  784. swi r13, r11, CC_R13
  785. swi r14, r11, CC_R14
  786. swi r15, r11, CC_R15
  787. swi r16, r11, CC_R16
  788. swi r17, r11, CC_R17
  789. swi r18, r11, CC_R18
  790. /* save non-volatile registers */
  791. swi r19, r11, CC_R19
  792. swi r20, r11, CC_R20
  793. swi r21, r11, CC_R21
  794. swi r22, r11, CC_R22
  795. swi r23, r11, CC_R23
  796. swi r24, r11, CC_R24
  797. swi r25, r11, CC_R25
  798. swi r26, r11, CC_R26
  799. swi r27, r11, CC_R27
  800. swi r28, r11, CC_R28
  801. swi r29, r11, CC_R29
  802. swi r30, r11, CC_R30
  803. /* special purpose registers */
  804. mfs r12, rmsr
  805. swi r12, r11, CC_MSR
  806. mfs r12, rear
  807. swi r12, r11, CC_EAR
  808. mfs r12, resr
  809. swi r12, r11, CC_ESR
  810. mfs r12, rfsr
  811. swi r12, r11, CC_FSR
  812. /* update r31, the current-give me pointer to task which will be next */
  813. lwi CURRENT_TASK, r6, TI_TASK
  814. /* stored it to current_save too */
  815. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  816. /* get new process' cpu context and restore */
  817. /* give me start where start context of next task */
  818. addik r11, r6, TI_CPU_CONTEXT
  819. /* non-volatile registers */
  820. lwi r30, r11, CC_R30
  821. lwi r29, r11, CC_R29
  822. lwi r28, r11, CC_R28
  823. lwi r27, r11, CC_R27
  824. lwi r26, r11, CC_R26
  825. lwi r25, r11, CC_R25
  826. lwi r24, r11, CC_R24
  827. lwi r23, r11, CC_R23
  828. lwi r22, r11, CC_R22
  829. lwi r21, r11, CC_R21
  830. lwi r20, r11, CC_R20
  831. lwi r19, r11, CC_R19
  832. /* dedicated registers */
  833. lwi r18, r11, CC_R18
  834. lwi r17, r11, CC_R17
  835. lwi r16, r11, CC_R16
  836. lwi r15, r11, CC_R15
  837. lwi r14, r11, CC_R14
  838. lwi r13, r11, CC_R13
  839. /* skip volatile registers */
  840. lwi r2, r11, CC_R2
  841. lwi r1, r11, CC_R1
  842. /* special purpose registers */
  843. lwi r12, r11, CC_FSR
  844. mts rfsr, r12
  845. lwi r12, r11, CC_MSR
  846. mts rmsr, r12
  847. rtsd r15, 8
  848. nop
  849. ENTRY(_reset)
  850. brai 0x70; /* Jump back to FS-boot */
  851. /* These are compiled and loaded into high memory, then
  852. * copied into place in mach_early_setup */
  853. .section .init.ivt, "ax"
  854. .org 0x0
  855. /* this is very important - here is the reset vector */
  856. /* in current MMU branch you don't care what is here - it is
  857. * used from bootloader site - but this is correct for FS-BOOT */
  858. brai 0x70
  859. nop
  860. brai TOPHYS(_user_exception); /* syscall handler */
  861. brai TOPHYS(_interrupt); /* Interrupt handler */
  862. brai TOPHYS(_debug_exception); /* debug trap handler */
  863. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  864. .section .rodata,"a"
  865. #include "syscall_table.S"
  866. syscall_table_size=(.-sys_call_table)
  867. type_SYSCALL:
  868. .ascii "SYSCALL\0"
  869. type_IRQ:
  870. .ascii "IRQ\0"
  871. type_IRQ_PREEMPT:
  872. .ascii "IRQ (PREEMPTED)\0"
  873. type_SYSCALL_PREEMPT:
  874. .ascii " SYSCALL (PREEMPTED)\0"
  875. /*
  876. * Trap decoding for stack unwinder
  877. * Tuples are (start addr, end addr, string)
  878. * If return address lies on [start addr, end addr],
  879. * unwinder displays 'string'
  880. */
  881. .align 4
  882. .global microblaze_trap_handlers
  883. microblaze_trap_handlers:
  884. /* Exact matches come first */
  885. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  886. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  887. /* Fuzzy matches go here */
  888. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  889. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  890. /* End of table */
  891. .word 0 ; .word 0 ; .word 0