entry.S 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. /* The size of a state save frame. */
  31. #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
  32. /* The offset of the struct pt_regs in a `state save frame' on the stack. */
  33. #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
  34. #define C_ENTRY(name) .globl name; .align 4; name
  35. /*
  36. * Various ways of setting and clearing BIP in flags reg.
  37. * This is mucky, but necessary using microblaze version that
  38. * allows msr ops to write to BIP
  39. */
  40. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  41. .macro clear_bip
  42. msrclr r0, MSR_BIP
  43. .endm
  44. .macro set_bip
  45. msrset r0, MSR_BIP
  46. .endm
  47. .macro clear_eip
  48. msrclr r0, MSR_EIP
  49. .endm
  50. .macro set_ee
  51. msrset r0, MSR_EE
  52. .endm
  53. .macro disable_irq
  54. msrclr r0, MSR_IE
  55. .endm
  56. .macro enable_irq
  57. msrset r0, MSR_IE
  58. .endm
  59. .macro set_ums
  60. msrset r0, MSR_UMS
  61. msrclr r0, MSR_VMS
  62. .endm
  63. .macro set_vms
  64. msrclr r0, MSR_UMS
  65. msrset r0, MSR_VMS
  66. .endm
  67. .macro clear_ums
  68. msrclr r0, MSR_UMS
  69. .endm
  70. .macro clear_vms_ums
  71. msrclr r0, MSR_VMS | MSR_UMS
  72. .endm
  73. #else
  74. .macro clear_bip
  75. mfs r11, rmsr
  76. andi r11, r11, ~MSR_BIP
  77. mts rmsr, r11
  78. .endm
  79. .macro set_bip
  80. mfs r11, rmsr
  81. ori r11, r11, MSR_BIP
  82. mts rmsr, r11
  83. .endm
  84. .macro clear_eip
  85. mfs r11, rmsr
  86. andi r11, r11, ~MSR_EIP
  87. mts rmsr, r11
  88. .endm
  89. .macro set_ee
  90. mfs r11, rmsr
  91. ori r11, r11, MSR_EE
  92. mts rmsr, r11
  93. .endm
  94. .macro disable_irq
  95. mfs r11, rmsr
  96. andi r11, r11, ~MSR_IE
  97. mts rmsr, r11
  98. .endm
  99. .macro enable_irq
  100. mfs r11, rmsr
  101. ori r11, r11, MSR_IE
  102. mts rmsr, r11
  103. .endm
  104. .macro set_ums
  105. mfs r11, rmsr
  106. ori r11, r11, MSR_VMS
  107. andni r11, r11, MSR_UMS
  108. mts rmsr, r11
  109. .endm
  110. .macro set_vms
  111. mfs r11, rmsr
  112. ori r11, r11, MSR_VMS
  113. andni r11, r11, MSR_UMS
  114. mts rmsr, r11
  115. .endm
  116. .macro clear_ums
  117. mfs r11, rmsr
  118. andni r11, r11, MSR_UMS
  119. mts rmsr,r11
  120. .endm
  121. .macro clear_vms_ums
  122. mfs r11, rmsr
  123. andni r11, r11, (MSR_VMS|MSR_UMS)
  124. mts rmsr,r11
  125. .endm
  126. #endif
  127. /* Define how to call high-level functions. With MMU, virtual mode must be
  128. * enabled when calling the high-level function. Clobbers R11.
  129. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  130. */
  131. /* turn on virtual protected mode save */
  132. #define VM_ON \
  133. set_ums; \
  134. rted r0, 2f; \
  135. nop; \
  136. 2:
  137. /* turn off virtual protected mode save and user mode save*/
  138. #define VM_OFF \
  139. clear_vms_ums; \
  140. rted r0, TOPHYS(1f); \
  141. nop; \
  142. 1:
  143. #define SAVE_REGS \
  144. swi r2, r1, PTO+PT_R2; /* Save SDA */ \
  145. swi r3, r1, PTO+PT_R3; \
  146. swi r4, r1, PTO+PT_R4; \
  147. swi r5, r1, PTO+PT_R5; \
  148. swi r6, r1, PTO+PT_R6; \
  149. swi r7, r1, PTO+PT_R7; \
  150. swi r8, r1, PTO+PT_R8; \
  151. swi r9, r1, PTO+PT_R9; \
  152. swi r10, r1, PTO+PT_R10; \
  153. swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
  154. swi r12, r1, PTO+PT_R12; \
  155. swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
  156. swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
  157. swi r15, r1, PTO+PT_R15; /* Save LP */ \
  158. swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
  159. swi r19, r1, PTO+PT_R19; \
  160. swi r20, r1, PTO+PT_R20; \
  161. swi r21, r1, PTO+PT_R21; \
  162. swi r22, r1, PTO+PT_R22; \
  163. swi r23, r1, PTO+PT_R23; \
  164. swi r24, r1, PTO+PT_R24; \
  165. swi r25, r1, PTO+PT_R25; \
  166. swi r26, r1, PTO+PT_R26; \
  167. swi r27, r1, PTO+PT_R27; \
  168. swi r28, r1, PTO+PT_R28; \
  169. swi r29, r1, PTO+PT_R29; \
  170. swi r30, r1, PTO+PT_R30; \
  171. swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
  172. mfs r11, rmsr; /* save MSR */ \
  173. swi r11, r1, PTO+PT_MSR;
  174. #define RESTORE_REGS \
  175. lwi r11, r1, PTO+PT_MSR; \
  176. mts rmsr , r11; \
  177. lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
  178. lwi r3, r1, PTO+PT_R3; \
  179. lwi r4, r1, PTO+PT_R4; \
  180. lwi r5, r1, PTO+PT_R5; \
  181. lwi r6, r1, PTO+PT_R6; \
  182. lwi r7, r1, PTO+PT_R7; \
  183. lwi r8, r1, PTO+PT_R8; \
  184. lwi r9, r1, PTO+PT_R9; \
  185. lwi r10, r1, PTO+PT_R10; \
  186. lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
  187. lwi r12, r1, PTO+PT_R12; \
  188. lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
  189. lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  190. lwi r15, r1, PTO+PT_R15; /* restore LP */ \
  191. lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
  192. lwi r19, r1, PTO+PT_R19; \
  193. lwi r20, r1, PTO+PT_R20; \
  194. lwi r21, r1, PTO+PT_R21; \
  195. lwi r22, r1, PTO+PT_R22; \
  196. lwi r23, r1, PTO+PT_R23; \
  197. lwi r24, r1, PTO+PT_R24; \
  198. lwi r25, r1, PTO+PT_R25; \
  199. lwi r26, r1, PTO+PT_R26; \
  200. lwi r27, r1, PTO+PT_R27; \
  201. lwi r28, r1, PTO+PT_R28; \
  202. lwi r29, r1, PTO+PT_R29; \
  203. lwi r30, r1, PTO+PT_R30; \
  204. lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
  205. #define SAVE_STATE \
  206. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  207. /* See if already in kernel mode.*/ \
  208. mfs r1, rmsr; \
  209. andi r1, r1, MSR_UMS; \
  210. bnei r1, 1f; \
  211. /* Kernel-mode state save. */ \
  212. /* Reload kernel stack-ptr. */ \
  213. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  214. /* FIXME: I can add these two lines to one */ \
  215. /* tophys(r1,r1); */ \
  216. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  217. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  218. SAVE_REGS \
  219. brid 2f; \
  220. swi r1, r1, PTO+PT_MODE; \
  221. 1: /* User-mode state save. */ \
  222. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  223. tophys(r1,r1); \
  224. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  225. /* MS these three instructions can be added to one */ \
  226. /* addik r1, r1, THREAD_SIZE; */ \
  227. /* tophys(r1,r1); */ \
  228. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  229. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  230. SAVE_REGS \
  231. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  232. swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
  233. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
  234. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  235. clear_ums; \
  236. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  237. .text
  238. /*
  239. * User trap.
  240. *
  241. * System calls are handled here.
  242. *
  243. * Syscall protocol:
  244. * Syscall number in r12, args in r5-r10
  245. * Return value in r3
  246. *
  247. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  248. * are masked. This is nice, means we don't have to CLI before state save
  249. */
  250. C_ENTRY(_user_exception):
  251. addi r14, r14, 4 /* return address is 4 byte after call */
  252. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  253. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  254. tophys(r1,r1);
  255. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  256. /* MS these three instructions can be added to one */
  257. /* addik r1, r1, THREAD_SIZE; */
  258. /* tophys(r1,r1); */
  259. /* addik r1, r1, -STATE_SAVE_SIZE; */
  260. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
  261. SAVE_REGS
  262. swi r0, r1, PTO + PT_R3
  263. swi r0, r1, PTO + PT_R4
  264. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  265. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  266. clear_ums;
  267. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  268. /* Save away the syscall number. */
  269. swi r12, r1, PTO+PT_R0;
  270. tovirt(r1,r1)
  271. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  272. /* Jump to the appropriate function for the system call number in r12
  273. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  274. * register should point to the location where
  275. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  276. /* Step into virtual mode */
  277. rtbd r0, 3f
  278. nop
  279. 3:
  280. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  281. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  282. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  283. beqi r11, 4f
  284. addik r3, r0, -ENOSYS
  285. swi r3, r1, PTO + PT_R3
  286. brlid r15, do_syscall_trace_enter
  287. addik r5, r1, PTO + PT_R0
  288. # do_syscall_trace_enter returns the new syscall nr.
  289. addk r12, r0, r3
  290. lwi r5, r1, PTO+PT_R5;
  291. lwi r6, r1, PTO+PT_R6;
  292. lwi r7, r1, PTO+PT_R7;
  293. lwi r8, r1, PTO+PT_R8;
  294. lwi r9, r1, PTO+PT_R9;
  295. lwi r10, r1, PTO+PT_R10;
  296. 4:
  297. /* Jump to the appropriate function for the system call number in r12
  298. * (r12 is not preserved), or return an error if r12 is not valid.
  299. * The LP register should point to the location where the called function
  300. * should return. [note that MAKE_SYS_CALL uses label 1] */
  301. /* See if the system call number is valid */
  302. addi r11, r12, -__NR_syscalls;
  303. bgei r11,5f;
  304. /* Figure out which function to use for this system call. */
  305. /* Note Microblaze barrel shift is optional, so don't rely on it */
  306. add r12, r12, r12; /* convert num -> ptr */
  307. add r12, r12, r12;
  308. #ifdef DEBUG
  309. /* Trac syscalls and stored them to r0_ram */
  310. lwi r3, r12, 0x400 + r0_ram
  311. addi r3, r3, 1
  312. swi r3, r12, 0x400 + r0_ram
  313. #endif
  314. # Find and jump into the syscall handler.
  315. lwi r12, r12, sys_call_table
  316. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  317. addi r15, r0, ret_from_trap-8
  318. bra r12
  319. /* The syscall number is invalid, return an error. */
  320. 5:
  321. rtsd r15, 8; /* looks like a normal subroutine return */
  322. addi r3, r0, -ENOSYS;
  323. /* Entry point used to return from a syscall/trap */
  324. /* We re-enable BIP bit before state restore */
  325. C_ENTRY(ret_from_trap):
  326. swi r3, r1, PTO + PT_R3
  327. swi r4, r1, PTO + PT_R4
  328. /* We're returning to user mode, so check for various conditions that
  329. * trigger rescheduling. */
  330. /* FIXME: Restructure all these flag checks. */
  331. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  332. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  333. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  334. beqi r11, 1f
  335. brlid r15, do_syscall_trace_leave
  336. addik r5, r1, PTO + PT_R0
  337. 1:
  338. /* We're returning to user mode, so check for various conditions that
  339. * trigger rescheduling. */
  340. /* get thread info from current task */
  341. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  342. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  343. andi r11, r11, _TIF_NEED_RESCHED;
  344. beqi r11, 5f;
  345. bralid r15, schedule; /* Call scheduler */
  346. nop; /* delay slot */
  347. /* Maybe handle a signal */
  348. 5: /* get thread info from current task*/
  349. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  350. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  351. andi r11, r11, _TIF_SIGPENDING;
  352. beqi r11, 1f; /* Signals to handle, handle them */
  353. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  354. addi r7, r0, 1; /* Arg 3: int in_syscall */
  355. bralid r15, do_signal; /* Handle any signals */
  356. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  357. /* Finally, return to user state. */
  358. 1: set_bip; /* Ints masked for state restore */
  359. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  360. VM_OFF;
  361. tophys(r1,r1);
  362. RESTORE_REGS;
  363. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  364. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  365. TRAP_return: /* Make global symbol for debugging */
  366. rtbd r14, 0; /* Instructions to return from an IRQ */
  367. nop;
  368. /* These syscalls need access to the struct pt_regs on the stack, so we
  369. implement them in assembly (they're basically all wrappers anyway). */
  370. C_ENTRY(sys_fork_wrapper):
  371. addi r5, r0, SIGCHLD /* Arg 0: flags */
  372. lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
  373. addik r7, r1, PTO /* Arg 2: parent context */
  374. add r8. r0, r0 /* Arg 3: (unused) */
  375. add r9, r0, r0; /* Arg 4: (unused) */
  376. brid do_fork /* Do real work (tail-call) */
  377. add r10, r0, r0; /* Arg 5: (unused) */
  378. /* This the initial entry point for a new child thread, with an appropriate
  379. stack in place that makes it look the the child is in the middle of an
  380. syscall. This function is actually `returned to' from switch_thread
  381. (copy_thread makes ret_from_fork the return address in each new thread's
  382. saved context). */
  383. C_ENTRY(ret_from_fork):
  384. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  385. add r3, r5, r0; /* switch_thread returns the prev task */
  386. /* ( in the delay slot ) */
  387. brid ret_from_trap; /* Do normal trap return */
  388. add r3, r0, r0; /* Child's fork call should return 0. */
  389. C_ENTRY(sys_vfork):
  390. brid microblaze_vfork /* Do real work (tail-call) */
  391. addik r5, r1, PTO
  392. C_ENTRY(sys_clone):
  393. bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
  394. lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
  395. 1: addik r7, r1, PTO; /* Arg 2: parent context */
  396. add r8, r0, r0; /* Arg 3: (unused) */
  397. add r9, r0, r0; /* Arg 4: (unused) */
  398. brid do_fork /* Do real work (tail-call) */
  399. add r10, r0, r0; /* Arg 5: (unused) */
  400. C_ENTRY(sys_execve):
  401. brid microblaze_execve; /* Do real work (tail-call).*/
  402. addik r8, r1, PTO; /* add user context as 4th arg */
  403. C_ENTRY(sys_rt_sigreturn_wrapper):
  404. brid sys_rt_sigreturn /* Do real work */
  405. addik r5, r1, PTO; /* add user context as 1st arg */
  406. /*
  407. * HW EXCEPTION rutine start
  408. */
  409. C_ENTRY(full_exception_trap):
  410. /* adjust exception address for privileged instruction
  411. * for finding where is it */
  412. addik r17, r17, -4
  413. SAVE_STATE /* Save registers */
  414. /* PC, before IRQ/trap - this is one instruction above */
  415. swi r17, r1, PTO+PT_PC;
  416. tovirt(r1,r1)
  417. /* FIXME this can be store directly in PT_ESR reg.
  418. * I tested it but there is a fault */
  419. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  420. addik r15, r0, ret_from_exc - 8
  421. mfs r6, resr
  422. mfs r7, rfsr; /* save FSR */
  423. mts rfsr, r0; /* Clear sticky fsr */
  424. rted r0, full_exception
  425. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  426. /*
  427. * Unaligned data trap.
  428. *
  429. * Unaligned data trap last on 4k page is handled here.
  430. *
  431. * Trap entered via exception, so EE bit is set, and interrupts
  432. * are masked. This is nice, means we don't have to CLI before state save
  433. *
  434. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  435. */
  436. C_ENTRY(unaligned_data_trap):
  437. /* MS: I have to save r11 value and then restore it because
  438. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  439. * instructions are not used. We don't need to do if MSR instructions
  440. * are used and they use r0 instead of r11.
  441. * I am using ENTRY_SP which should be primary used only for stack
  442. * pointer saving. */
  443. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  444. set_bip; /* equalize initial state for all possible entries */
  445. clear_eip;
  446. set_ee;
  447. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  448. SAVE_STATE /* Save registers.*/
  449. /* PC, before IRQ/trap - this is one instruction above */
  450. swi r17, r1, PTO+PT_PC;
  451. tovirt(r1,r1)
  452. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  453. addik r15, r0, ret_from_exc-8
  454. mfs r3, resr /* ESR */
  455. mfs r4, rear /* EAR */
  456. rtbd r0, _unaligned_data_exception
  457. addik r7, r1, PTO /* parameter struct pt_regs * regs */
  458. /*
  459. * Page fault traps.
  460. *
  461. * If the real exception handler (from hw_exception_handler.S) didn't find
  462. * the mapping for the process, then we're thrown here to handle such situation.
  463. *
  464. * Trap entered via exceptions, so EE bit is set, and interrupts
  465. * are masked. This is nice, means we don't have to CLI before state save
  466. *
  467. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  468. * will bail out to this point if they can't resolve the lightweight TLB fault.
  469. *
  470. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  471. * void do_page_fault(struct pt_regs *regs,
  472. * unsigned long address,
  473. * unsigned long error_code)
  474. */
  475. /* data and intruction trap - which is choose is resolved int fault.c */
  476. C_ENTRY(page_fault_data_trap):
  477. SAVE_STATE /* Save registers.*/
  478. /* PC, before IRQ/trap - this is one instruction above */
  479. swi r17, r1, PTO+PT_PC;
  480. tovirt(r1,r1)
  481. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  482. addik r15, r0, ret_from_exc-8
  483. mfs r6, rear /* parameter unsigned long address */
  484. mfs r7, resr /* parameter unsigned long error_code */
  485. rted r0, do_page_fault
  486. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  487. C_ENTRY(page_fault_instr_trap):
  488. SAVE_STATE /* Save registers.*/
  489. /* PC, before IRQ/trap - this is one instruction above */
  490. swi r17, r1, PTO+PT_PC;
  491. tovirt(r1,r1)
  492. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  493. addik r15, r0, ret_from_exc-8
  494. mfs r6, rear /* parameter unsigned long address */
  495. ori r7, r0, 0 /* parameter unsigned long error_code */
  496. rted r0, do_page_fault
  497. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  498. /* Entry point used to return from an exception. */
  499. C_ENTRY(ret_from_exc):
  500. lwi r11, r1, PTO + PT_MODE;
  501. bnei r11, 2f; /* See if returning to kernel mode, */
  502. /* ... if so, skip resched &c. */
  503. /* We're returning to user mode, so check for various conditions that
  504. trigger rescheduling. */
  505. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  506. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  507. andi r11, r11, _TIF_NEED_RESCHED;
  508. beqi r11, 5f;
  509. /* Call the scheduler before returning from a syscall/trap. */
  510. bralid r15, schedule; /* Call scheduler */
  511. nop; /* delay slot */
  512. /* Maybe handle a signal */
  513. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  514. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  515. andi r11, r11, _TIF_SIGPENDING;
  516. beqi r11, 1f; /* Signals to handle, handle them */
  517. /*
  518. * Handle a signal return; Pending signals should be in r18.
  519. *
  520. * Not all registers are saved by the normal trap/interrupt entry
  521. * points (for instance, call-saved registers (because the normal
  522. * C-compiler calling sequence in the kernel makes sure they're
  523. * preserved), and call-clobbered registers in the case of
  524. * traps), but signal handlers may want to examine or change the
  525. * complete register state. Here we save anything not saved by
  526. * the normal entry sequence, so that it may be safely restored
  527. * (in a possibly modified form) after do_signal returns. */
  528. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  529. addi r7, r0, 0; /* Arg 3: int in_syscall */
  530. bralid r15, do_signal; /* Handle any signals */
  531. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  532. /* Finally, return to user state. */
  533. 1: set_bip; /* Ints masked for state restore */
  534. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  535. VM_OFF;
  536. tophys(r1,r1);
  537. RESTORE_REGS;
  538. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  539. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  540. bri 6f;
  541. /* Return to kernel state. */
  542. 2: set_bip; /* Ints masked for state restore */
  543. VM_OFF;
  544. tophys(r1,r1);
  545. RESTORE_REGS;
  546. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  547. tovirt(r1,r1);
  548. 6:
  549. EXC_return: /* Make global symbol for debugging */
  550. rtbd r14, 0; /* Instructions to return from an IRQ */
  551. nop;
  552. /*
  553. * HW EXCEPTION rutine end
  554. */
  555. /*
  556. * Hardware maskable interrupts.
  557. *
  558. * The stack-pointer (r1) should have already been saved to the memory
  559. * location PER_CPU(ENTRY_SP).
  560. */
  561. C_ENTRY(_interrupt):
  562. /* MS: we are in physical address */
  563. /* Save registers, switch to proper stack, convert SP to virtual.*/
  564. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  565. /* MS: See if already in kernel mode. */
  566. mfs r1, rmsr
  567. nop
  568. andi r1, r1, MSR_UMS
  569. bnei r1, 1f
  570. /* Kernel-mode state save. */
  571. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  572. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  573. /* save registers */
  574. /* MS: Make room on the stack -> activation record */
  575. addik r1, r1, -STATE_SAVE_SIZE;
  576. SAVE_REGS
  577. brid 2f;
  578. swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
  579. 1:
  580. /* User-mode state save. */
  581. /* MS: get the saved current */
  582. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  583. tophys(r1,r1);
  584. lwi r1, r1, TS_THREAD_INFO;
  585. addik r1, r1, THREAD_SIZE;
  586. tophys(r1,r1);
  587. /* save registers */
  588. addik r1, r1, -STATE_SAVE_SIZE;
  589. SAVE_REGS
  590. /* calculate mode */
  591. swi r0, r1, PTO + PT_MODE;
  592. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  593. swi r11, r1, PTO+PT_R1;
  594. clear_ums;
  595. 2:
  596. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  597. tovirt(r1,r1)
  598. addik r15, r0, irq_call;
  599. irq_call:rtbd r0, do_IRQ;
  600. addik r5, r1, PTO;
  601. /* MS: we are in virtual mode */
  602. ret_from_irq:
  603. lwi r11, r1, PTO + PT_MODE;
  604. bnei r11, 2f;
  605. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  606. lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
  607. andi r11, r11, _TIF_NEED_RESCHED;
  608. beqi r11, 5f
  609. bralid r15, schedule;
  610. nop; /* delay slot */
  611. /* Maybe handle a signal */
  612. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
  613. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  614. andi r11, r11, _TIF_SIGPENDING;
  615. beqid r11, no_intr_resched
  616. /* Handle a signal return; Pending signals should be in r18. */
  617. addi r7, r0, 0; /* Arg 3: int in_syscall */
  618. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  619. bralid r15, do_signal; /* Handle any signals */
  620. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  621. /* Finally, return to user state. */
  622. no_intr_resched:
  623. /* Disable interrupts, we are now committed to the state restore */
  624. disable_irq
  625. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  626. VM_OFF;
  627. tophys(r1,r1);
  628. RESTORE_REGS
  629. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  630. lwi r1, r1, PT_R1 - PT_SIZE;
  631. bri 6f;
  632. /* MS: Return to kernel state. */
  633. 2:
  634. #ifdef CONFIG_PREEMPT
  635. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  636. /* MS: get preempt_count from thread info */
  637. lwi r5, r11, TI_PREEMPT_COUNT;
  638. bgti r5, restore;
  639. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  640. andi r5, r5, _TIF_NEED_RESCHED;
  641. beqi r5, restore /* if zero jump over */
  642. preempt:
  643. /* interrupts are off that's why I am calling preempt_chedule_irq */
  644. bralid r15, preempt_schedule_irq
  645. nop
  646. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  647. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  648. andi r5, r5, _TIF_NEED_RESCHED;
  649. bnei r5, preempt /* if non zero jump to resched */
  650. restore:
  651. #endif
  652. VM_OFF /* MS: turn off MMU */
  653. tophys(r1,r1)
  654. RESTORE_REGS
  655. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  656. tovirt(r1,r1);
  657. 6:
  658. IRQ_return: /* MS: Make global symbol for debugging */
  659. rtid r14, 0
  660. nop
  661. /*
  662. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  663. * and call handling function with saved pt_regs
  664. */
  665. C_ENTRY(_debug_exception):
  666. /* BIP bit is set on entry, no interrupts can occur */
  667. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  668. mfs r1, rmsr
  669. nop
  670. andi r1, r1, MSR_UMS
  671. bnei r1, 1f
  672. /* MS: Kernel-mode state save - kgdb */
  673. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  674. /* BIP bit is set on entry, no interrupts can occur */
  675. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
  676. SAVE_REGS;
  677. /* save all regs to pt_reg structure */
  678. swi r0, r1, PTO+PT_R0; /* R0 must be saved too */
  679. swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */
  680. swi r16, r1, PTO+PT_R16
  681. swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */
  682. swi r17, r1, PTO+PT_R17
  683. /* save special purpose registers to pt_regs */
  684. mfs r11, rear;
  685. swi r11, r1, PTO+PT_EAR;
  686. mfs r11, resr;
  687. swi r11, r1, PTO+PT_ESR;
  688. mfs r11, rfsr;
  689. swi r11, r1, PTO+PT_FSR;
  690. /* stack pointer is in physical address at it is decrease
  691. * by STATE_SAVE_SIZE but we need to get correct R1 value */
  692. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE;
  693. swi r11, r1, PTO+PT_R1
  694. /* MS: r31 - current pointer isn't changed */
  695. tovirt(r1,r1)
  696. #ifdef CONFIG_KGDB
  697. addi r5, r1, PTO /* pass pt_reg address as the first arg */
  698. la r15, r0, dbtrap_call; /* return address */
  699. rtbd r0, microblaze_kgdb_break
  700. nop;
  701. #endif
  702. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  703. * It is very unlikely that another brki instruction is called. */
  704. bri 0
  705. /* MS: User-mode state save - gdb */
  706. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  707. tophys(r1,r1);
  708. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  709. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  710. tophys(r1,r1);
  711. addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
  712. SAVE_REGS;
  713. swi r17, r1, PTO+PT_R17;
  714. swi r16, r1, PTO+PT_R16;
  715. swi r16, r1, PTO+PT_PC; /* Save LP */
  716. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
  717. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  718. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  719. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  720. tovirt(r1,r1)
  721. set_vms;
  722. addik r5, r1, PTO;
  723. addik r15, r0, dbtrap_call;
  724. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  725. rtbd r0, sw_exception
  726. nop
  727. /* MS: The first instruction for the second part of the gdb/kgdb */
  728. set_bip; /* Ints masked for state restore */
  729. lwi r11, r1, PTO + PT_MODE;
  730. bnei r11, 2f;
  731. /* MS: Return to user space - gdb */
  732. /* Get current task ptr into r11 */
  733. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  734. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  735. andi r11, r11, _TIF_NEED_RESCHED;
  736. beqi r11, 5f;
  737. /* Call the scheduler before returning from a syscall/trap. */
  738. bralid r15, schedule; /* Call scheduler */
  739. nop; /* delay slot */
  740. /* Maybe handle a signal */
  741. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  742. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  743. andi r11, r11, _TIF_SIGPENDING;
  744. beqi r11, 1f; /* Signals to handle, handle them */
  745. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  746. addi r7, r0, 0; /* Arg 3: int in_syscall */
  747. bralid r15, do_signal; /* Handle any signals */
  748. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  749. /* Finally, return to user state. */
  750. 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  751. VM_OFF;
  752. tophys(r1,r1);
  753. /* MS: Restore all regs */
  754. RESTORE_REGS
  755. lwi r17, r1, PTO+PT_R17;
  756. lwi r16, r1, PTO+PT_R16;
  757. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */
  758. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  759. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  760. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  761. nop;
  762. /* MS: Return to kernel state - kgdb */
  763. 2: VM_OFF;
  764. tophys(r1,r1);
  765. /* MS: Restore all regs */
  766. RESTORE_REGS
  767. lwi r14, r1, PTO+PT_R14;
  768. lwi r16, r1, PTO+PT_PC;
  769. lwi r17, r1, PTO+PT_R17;
  770. addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
  771. tovirt(r1,r1);
  772. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  773. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  774. nop;
  775. ENTRY(_switch_to)
  776. /* prepare return value */
  777. addk r3, r0, CURRENT_TASK
  778. /* save registers in cpu_context */
  779. /* use r11 and r12, volatile registers, as temp register */
  780. /* give start of cpu_context for previous process */
  781. addik r11, r5, TI_CPU_CONTEXT
  782. swi r1, r11, CC_R1
  783. swi r2, r11, CC_R2
  784. /* skip volatile registers.
  785. * they are saved on stack when we jumped to _switch_to() */
  786. /* dedicated registers */
  787. swi r13, r11, CC_R13
  788. swi r14, r11, CC_R14
  789. swi r15, r11, CC_R15
  790. swi r16, r11, CC_R16
  791. swi r17, r11, CC_R17
  792. swi r18, r11, CC_R18
  793. /* save non-volatile registers */
  794. swi r19, r11, CC_R19
  795. swi r20, r11, CC_R20
  796. swi r21, r11, CC_R21
  797. swi r22, r11, CC_R22
  798. swi r23, r11, CC_R23
  799. swi r24, r11, CC_R24
  800. swi r25, r11, CC_R25
  801. swi r26, r11, CC_R26
  802. swi r27, r11, CC_R27
  803. swi r28, r11, CC_R28
  804. swi r29, r11, CC_R29
  805. swi r30, r11, CC_R30
  806. /* special purpose registers */
  807. mfs r12, rmsr
  808. swi r12, r11, CC_MSR
  809. mfs r12, rear
  810. swi r12, r11, CC_EAR
  811. mfs r12, resr
  812. swi r12, r11, CC_ESR
  813. mfs r12, rfsr
  814. swi r12, r11, CC_FSR
  815. /* update r31, the current-give me pointer to task which will be next */
  816. lwi CURRENT_TASK, r6, TI_TASK
  817. /* stored it to current_save too */
  818. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  819. /* get new process' cpu context and restore */
  820. /* give me start where start context of next task */
  821. addik r11, r6, TI_CPU_CONTEXT
  822. /* non-volatile registers */
  823. lwi r30, r11, CC_R30
  824. lwi r29, r11, CC_R29
  825. lwi r28, r11, CC_R28
  826. lwi r27, r11, CC_R27
  827. lwi r26, r11, CC_R26
  828. lwi r25, r11, CC_R25
  829. lwi r24, r11, CC_R24
  830. lwi r23, r11, CC_R23
  831. lwi r22, r11, CC_R22
  832. lwi r21, r11, CC_R21
  833. lwi r20, r11, CC_R20
  834. lwi r19, r11, CC_R19
  835. /* dedicated registers */
  836. lwi r18, r11, CC_R18
  837. lwi r17, r11, CC_R17
  838. lwi r16, r11, CC_R16
  839. lwi r15, r11, CC_R15
  840. lwi r14, r11, CC_R14
  841. lwi r13, r11, CC_R13
  842. /* skip volatile registers */
  843. lwi r2, r11, CC_R2
  844. lwi r1, r11, CC_R1
  845. /* special purpose registers */
  846. lwi r12, r11, CC_FSR
  847. mts rfsr, r12
  848. lwi r12, r11, CC_MSR
  849. mts rmsr, r12
  850. rtsd r15, 8
  851. nop
  852. ENTRY(_reset)
  853. brai 0x70; /* Jump back to FS-boot */
  854. /* These are compiled and loaded into high memory, then
  855. * copied into place in mach_early_setup */
  856. .section .init.ivt, "ax"
  857. .org 0x0
  858. /* this is very important - here is the reset vector */
  859. /* in current MMU branch you don't care what is here - it is
  860. * used from bootloader site - but this is correct for FS-BOOT */
  861. brai 0x70
  862. nop
  863. brai TOPHYS(_user_exception); /* syscall handler */
  864. brai TOPHYS(_interrupt); /* Interrupt handler */
  865. brai TOPHYS(_debug_exception); /* debug trap handler */
  866. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  867. .section .rodata,"a"
  868. #include "syscall_table.S"
  869. syscall_table_size=(.-sys_call_table)
  870. type_SYSCALL:
  871. .ascii "SYSCALL\0"
  872. type_IRQ:
  873. .ascii "IRQ\0"
  874. type_IRQ_PREEMPT:
  875. .ascii "IRQ (PREEMPTED)\0"
  876. type_SYSCALL_PREEMPT:
  877. .ascii " SYSCALL (PREEMPTED)\0"
  878. /*
  879. * Trap decoding for stack unwinder
  880. * Tuples are (start addr, end addr, string)
  881. * If return address lies on [start addr, end addr],
  882. * unwinder displays 'string'
  883. */
  884. .align 4
  885. .global microblaze_trap_handlers
  886. microblaze_trap_handlers:
  887. /* Exact matches come first */
  888. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  889. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  890. /* Fuzzy matches go here */
  891. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  892. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  893. /* End of table */
  894. .word 0 ; .word 0 ; .word 0