entry.S 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. #ifdef DEBUG
  31. /* Create space for syscalls counting. */
  32. .section .data
  33. .global syscall_debug_table
  34. .align 4
  35. syscall_debug_table:
  36. .space (__NR_syscalls * 4)
  37. #endif /* DEBUG */
  38. #define C_ENTRY(name) .globl name; .align 4; name
  39. /*
  40. * Various ways of setting and clearing BIP in flags reg.
  41. * This is mucky, but necessary using microblaze version that
  42. * allows msr ops to write to BIP
  43. */
  44. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  45. .macro clear_bip
  46. msrclr r0, MSR_BIP
  47. .endm
  48. .macro set_bip
  49. msrset r0, MSR_BIP
  50. .endm
  51. .macro clear_eip
  52. msrclr r0, MSR_EIP
  53. .endm
  54. .macro set_ee
  55. msrset r0, MSR_EE
  56. .endm
  57. .macro disable_irq
  58. msrclr r0, MSR_IE
  59. .endm
  60. .macro enable_irq
  61. msrset r0, MSR_IE
  62. .endm
  63. .macro set_ums
  64. msrset r0, MSR_UMS
  65. msrclr r0, MSR_VMS
  66. .endm
  67. .macro set_vms
  68. msrclr r0, MSR_UMS
  69. msrset r0, MSR_VMS
  70. .endm
  71. .macro clear_ums
  72. msrclr r0, MSR_UMS
  73. .endm
  74. .macro clear_vms_ums
  75. msrclr r0, MSR_VMS | MSR_UMS
  76. .endm
  77. #else
  78. .macro clear_bip
  79. mfs r11, rmsr
  80. andi r11, r11, ~MSR_BIP
  81. mts rmsr, r11
  82. .endm
  83. .macro set_bip
  84. mfs r11, rmsr
  85. ori r11, r11, MSR_BIP
  86. mts rmsr, r11
  87. .endm
  88. .macro clear_eip
  89. mfs r11, rmsr
  90. andi r11, r11, ~MSR_EIP
  91. mts rmsr, r11
  92. .endm
  93. .macro set_ee
  94. mfs r11, rmsr
  95. ori r11, r11, MSR_EE
  96. mts rmsr, r11
  97. .endm
  98. .macro disable_irq
  99. mfs r11, rmsr
  100. andi r11, r11, ~MSR_IE
  101. mts rmsr, r11
  102. .endm
  103. .macro enable_irq
  104. mfs r11, rmsr
  105. ori r11, r11, MSR_IE
  106. mts rmsr, r11
  107. .endm
  108. .macro set_ums
  109. mfs r11, rmsr
  110. ori r11, r11, MSR_VMS
  111. andni r11, r11, MSR_UMS
  112. mts rmsr, r11
  113. .endm
  114. .macro set_vms
  115. mfs r11, rmsr
  116. ori r11, r11, MSR_VMS
  117. andni r11, r11, MSR_UMS
  118. mts rmsr, r11
  119. .endm
  120. .macro clear_ums
  121. mfs r11, rmsr
  122. andni r11, r11, MSR_UMS
  123. mts rmsr,r11
  124. .endm
  125. .macro clear_vms_ums
  126. mfs r11, rmsr
  127. andni r11, r11, (MSR_VMS|MSR_UMS)
  128. mts rmsr,r11
  129. .endm
  130. #endif
  131. /* Define how to call high-level functions. With MMU, virtual mode must be
  132. * enabled when calling the high-level function. Clobbers R11.
  133. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  134. */
  135. /* turn on virtual protected mode save */
  136. #define VM_ON \
  137. set_ums; \
  138. rted r0, 2f; \
  139. nop; \
  140. 2:
  141. /* turn off virtual protected mode save and user mode save*/
  142. #define VM_OFF \
  143. clear_vms_ums; \
  144. rted r0, TOPHYS(1f); \
  145. nop; \
  146. 1:
  147. #define SAVE_REGS \
  148. swi r2, r1, PT_R2; /* Save SDA */ \
  149. swi r3, r1, PT_R3; \
  150. swi r4, r1, PT_R4; \
  151. swi r5, r1, PT_R5; \
  152. swi r6, r1, PT_R6; \
  153. swi r7, r1, PT_R7; \
  154. swi r8, r1, PT_R8; \
  155. swi r9, r1, PT_R9; \
  156. swi r10, r1, PT_R10; \
  157. swi r11, r1, PT_R11; /* save clobbered regs after rval */\
  158. swi r12, r1, PT_R12; \
  159. swi r13, r1, PT_R13; /* Save SDA2 */ \
  160. swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
  161. swi r15, r1, PT_R15; /* Save LP */ \
  162. swi r16, r1, PT_R16; \
  163. swi r17, r1, PT_R17; \
  164. swi r18, r1, PT_R18; /* Save asm scratch reg */ \
  165. swi r19, r1, PT_R19; \
  166. swi r20, r1, PT_R20; \
  167. swi r21, r1, PT_R21; \
  168. swi r22, r1, PT_R22; \
  169. swi r23, r1, PT_R23; \
  170. swi r24, r1, PT_R24; \
  171. swi r25, r1, PT_R25; \
  172. swi r26, r1, PT_R26; \
  173. swi r27, r1, PT_R27; \
  174. swi r28, r1, PT_R28; \
  175. swi r29, r1, PT_R29; \
  176. swi r30, r1, PT_R30; \
  177. swi r31, r1, PT_R31; /* Save current task reg */ \
  178. mfs r11, rmsr; /* save MSR */ \
  179. swi r11, r1, PT_MSR;
  180. #define RESTORE_REGS \
  181. lwi r11, r1, PT_MSR; \
  182. mts rmsr , r11; \
  183. lwi r2, r1, PT_R2; /* restore SDA */ \
  184. lwi r3, r1, PT_R3; \
  185. lwi r4, r1, PT_R4; \
  186. lwi r5, r1, PT_R5; \
  187. lwi r6, r1, PT_R6; \
  188. lwi r7, r1, PT_R7; \
  189. lwi r8, r1, PT_R8; \
  190. lwi r9, r1, PT_R9; \
  191. lwi r10, r1, PT_R10; \
  192. lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
  193. lwi r12, r1, PT_R12; \
  194. lwi r13, r1, PT_R13; /* restore SDA2 */ \
  195. lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  196. lwi r15, r1, PT_R15; /* restore LP */ \
  197. lwi r16, r1, PT_R16; \
  198. lwi r17, r1, PT_R17; \
  199. lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
  200. lwi r19, r1, PT_R19; \
  201. lwi r20, r1, PT_R20; \
  202. lwi r21, r1, PT_R21; \
  203. lwi r22, r1, PT_R22; \
  204. lwi r23, r1, PT_R23; \
  205. lwi r24, r1, PT_R24; \
  206. lwi r25, r1, PT_R25; \
  207. lwi r26, r1, PT_R26; \
  208. lwi r27, r1, PT_R27; \
  209. lwi r28, r1, PT_R28; \
  210. lwi r29, r1, PT_R29; \
  211. lwi r30, r1, PT_R30; \
  212. lwi r31, r1, PT_R31; /* Restore cur task reg */
  213. #define SAVE_STATE \
  214. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  215. /* See if already in kernel mode.*/ \
  216. mfs r1, rmsr; \
  217. andi r1, r1, MSR_UMS; \
  218. bnei r1, 1f; \
  219. /* Kernel-mode state save. */ \
  220. /* Reload kernel stack-ptr. */ \
  221. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  222. /* FIXME: I can add these two lines to one */ \
  223. /* tophys(r1,r1); */ \
  224. /* addik r1, r1, -PT_SIZE; */ \
  225. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  226. SAVE_REGS \
  227. brid 2f; \
  228. swi r1, r1, PT_MODE; \
  229. 1: /* User-mode state save. */ \
  230. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  231. tophys(r1,r1); \
  232. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  233. /* MS these three instructions can be added to one */ \
  234. /* addik r1, r1, THREAD_SIZE; */ \
  235. /* tophys(r1,r1); */ \
  236. /* addik r1, r1, -PT_SIZE; */ \
  237. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  238. SAVE_REGS \
  239. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  240. swi r11, r1, PT_R1; /* Store user SP. */ \
  241. swi r0, r1, PT_MODE; /* Was in user-mode. */ \
  242. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  243. clear_ums; \
  244. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  245. .text
  246. /*
  247. * User trap.
  248. *
  249. * System calls are handled here.
  250. *
  251. * Syscall protocol:
  252. * Syscall number in r12, args in r5-r10
  253. * Return value in r3
  254. *
  255. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  256. * are masked. This is nice, means we don't have to CLI before state save
  257. */
  258. C_ENTRY(_user_exception):
  259. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  260. addi r14, r14, 4 /* return address is 4 byte after call */
  261. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  262. tophys(r1,r1);
  263. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  264. /* calculate kernel stack pointer from task struct 8k */
  265. addik r1, r1, THREAD_SIZE;
  266. tophys(r1,r1);
  267. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  268. SAVE_REGS
  269. swi r0, r1, PT_R3
  270. swi r0, r1, PT_R4
  271. swi r0, r1, PT_MODE; /* Was in user-mode. */
  272. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  273. swi r11, r1, PT_R1; /* Store user SP. */
  274. clear_ums;
  275. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  276. /* Save away the syscall number. */
  277. swi r12, r1, PT_R0;
  278. tovirt(r1,r1)
  279. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  280. /* Jump to the appropriate function for the system call number in r12
  281. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  282. * register should point to the location where
  283. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  284. /* Step into virtual mode */
  285. rtbd r0, 3f
  286. nop
  287. 3:
  288. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  289. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  290. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  291. beqi r11, 4f
  292. addik r3, r0, -ENOSYS
  293. swi r3, r1, PT_R3
  294. brlid r15, do_syscall_trace_enter
  295. addik r5, r1, PT_R0
  296. # do_syscall_trace_enter returns the new syscall nr.
  297. addk r12, r0, r3
  298. lwi r5, r1, PT_R5;
  299. lwi r6, r1, PT_R6;
  300. lwi r7, r1, PT_R7;
  301. lwi r8, r1, PT_R8;
  302. lwi r9, r1, PT_R9;
  303. lwi r10, r1, PT_R10;
  304. 4:
  305. /* Jump to the appropriate function for the system call number in r12
  306. * (r12 is not preserved), or return an error if r12 is not valid.
  307. * The LP register should point to the location where the called function
  308. * should return. [note that MAKE_SYS_CALL uses label 1] */
  309. /* See if the system call number is valid */
  310. addi r11, r12, -__NR_syscalls;
  311. bgei r11,5f;
  312. /* Figure out which function to use for this system call. */
  313. /* Note Microblaze barrel shift is optional, so don't rely on it */
  314. add r12, r12, r12; /* convert num -> ptr */
  315. add r12, r12, r12;
  316. #ifdef DEBUG
  317. /* Trac syscalls and stored them to syscall_debug_table */
  318. /* The first syscall location stores total syscall number */
  319. lwi r3, r0, syscall_debug_table
  320. addi r3, r3, 1
  321. swi r3, r0, syscall_debug_table
  322. lwi r3, r12, syscall_debug_table
  323. addi r3, r3, 1
  324. swi r3, r12, syscall_debug_table
  325. #endif
  326. # Find and jump into the syscall handler.
  327. lwi r12, r12, sys_call_table
  328. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  329. addi r15, r0, ret_from_trap-8
  330. bra r12
  331. /* The syscall number is invalid, return an error. */
  332. 5:
  333. rtsd r15, 8; /* looks like a normal subroutine return */
  334. addi r3, r0, -ENOSYS;
  335. /* Entry point used to return from a syscall/trap */
  336. /* We re-enable BIP bit before state restore */
  337. C_ENTRY(ret_from_trap):
  338. swi r3, r1, PT_R3
  339. swi r4, r1, PT_R4
  340. lwi r11, r1, PT_MODE;
  341. /* See if returning to kernel mode, if so, skip resched &c. */
  342. bnei r11, 2f;
  343. /* We're returning to user mode, so check for various conditions that
  344. * trigger rescheduling. */
  345. /* FIXME: Restructure all these flag checks. */
  346. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  347. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  348. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  349. beqi r11, 1f
  350. brlid r15, do_syscall_trace_leave
  351. addik r5, r1, PT_R0
  352. 1:
  353. /* We're returning to user mode, so check for various conditions that
  354. * trigger rescheduling. */
  355. /* get thread info from current task */
  356. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  357. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  358. andi r11, r11, _TIF_NEED_RESCHED;
  359. beqi r11, 5f;
  360. bralid r15, schedule; /* Call scheduler */
  361. nop; /* delay slot */
  362. /* Maybe handle a signal */
  363. 5: /* get thread info from current task*/
  364. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  365. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  366. andi r11, r11, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  367. beqi r11, 1f; /* Signals to handle, handle them */
  368. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  369. bralid r15, do_notify_resume; /* Handle any signals */
  370. addi r6, r0, 1; /* Arg 2: int in_syscall */
  371. /* Finally, return to user state. */
  372. 1: set_bip; /* Ints masked for state restore */
  373. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  374. VM_OFF;
  375. tophys(r1,r1);
  376. RESTORE_REGS;
  377. addik r1, r1, PT_SIZE /* Clean up stack space. */
  378. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  379. bri 6f;
  380. /* Return to kernel state. */
  381. 2: set_bip; /* Ints masked for state restore */
  382. VM_OFF;
  383. tophys(r1,r1);
  384. RESTORE_REGS;
  385. addik r1, r1, PT_SIZE /* Clean up stack space. */
  386. tovirt(r1,r1);
  387. 6:
  388. TRAP_return: /* Make global symbol for debugging */
  389. rtbd r14, 0; /* Instructions to return from an IRQ */
  390. nop;
  391. /* This the initial entry point for a new child thread, with an appropriate
  392. stack in place that makes it look the the child is in the middle of an
  393. syscall. This function is actually `returned to' from switch_thread
  394. (copy_thread makes ret_from_fork the return address in each new thread's
  395. saved context). */
  396. C_ENTRY(ret_from_fork):
  397. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  398. add r5, r3, r0; /* switch_thread returns the prev task */
  399. /* ( in the delay slot ) */
  400. brid ret_from_trap; /* Do normal trap return */
  401. add r3, r0, r0; /* Child's fork call should return 0. */
  402. C_ENTRY(ret_from_kernel_thread):
  403. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  404. add r5, r3, r0; /* switch_thread returns the prev task */
  405. /* ( in the delay slot ) */
  406. brald r15, r20 /* fn was left in r20 */
  407. addk r5, r0, r19 /* ... and argument - in r19 */
  408. brid ret_from_trap
  409. add r3, r0, r0
  410. C_ENTRY(sys_rt_sigreturn_wrapper):
  411. brid sys_rt_sigreturn /* Do real work */
  412. addik r5, r1, 0; /* add user context as 1st arg */
  413. /*
  414. * HW EXCEPTION rutine start
  415. */
  416. C_ENTRY(full_exception_trap):
  417. /* adjust exception address for privileged instruction
  418. * for finding where is it */
  419. addik r17, r17, -4
  420. SAVE_STATE /* Save registers */
  421. /* PC, before IRQ/trap - this is one instruction above */
  422. swi r17, r1, PT_PC;
  423. tovirt(r1,r1)
  424. /* FIXME this can be store directly in PT_ESR reg.
  425. * I tested it but there is a fault */
  426. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  427. addik r15, r0, ret_from_exc - 8
  428. mfs r6, resr
  429. mfs r7, rfsr; /* save FSR */
  430. mts rfsr, r0; /* Clear sticky fsr */
  431. rted r0, full_exception
  432. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  433. /*
  434. * Unaligned data trap.
  435. *
  436. * Unaligned data trap last on 4k page is handled here.
  437. *
  438. * Trap entered via exception, so EE bit is set, and interrupts
  439. * are masked. This is nice, means we don't have to CLI before state save
  440. *
  441. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  442. */
  443. C_ENTRY(unaligned_data_trap):
  444. /* MS: I have to save r11 value and then restore it because
  445. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  446. * instructions are not used. We don't need to do if MSR instructions
  447. * are used and they use r0 instead of r11.
  448. * I am using ENTRY_SP which should be primary used only for stack
  449. * pointer saving. */
  450. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  451. set_bip; /* equalize initial state for all possible entries */
  452. clear_eip;
  453. set_ee;
  454. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  455. SAVE_STATE /* Save registers.*/
  456. /* PC, before IRQ/trap - this is one instruction above */
  457. swi r17, r1, PT_PC;
  458. tovirt(r1,r1)
  459. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  460. addik r15, r0, ret_from_exc-8
  461. mfs r3, resr /* ESR */
  462. mfs r4, rear /* EAR */
  463. rtbd r0, _unaligned_data_exception
  464. addik r7, r1, 0 /* parameter struct pt_regs * regs */
  465. /*
  466. * Page fault traps.
  467. *
  468. * If the real exception handler (from hw_exception_handler.S) didn't find
  469. * the mapping for the process, then we're thrown here to handle such situation.
  470. *
  471. * Trap entered via exceptions, so EE bit is set, and interrupts
  472. * are masked. This is nice, means we don't have to CLI before state save
  473. *
  474. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  475. * will bail out to this point if they can't resolve the lightweight TLB fault.
  476. *
  477. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  478. * void do_page_fault(struct pt_regs *regs,
  479. * unsigned long address,
  480. * unsigned long error_code)
  481. */
  482. /* data and intruction trap - which is choose is resolved int fault.c */
  483. C_ENTRY(page_fault_data_trap):
  484. SAVE_STATE /* Save registers.*/
  485. /* PC, before IRQ/trap - this is one instruction above */
  486. swi r17, r1, PT_PC;
  487. tovirt(r1,r1)
  488. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  489. addik r15, r0, ret_from_exc-8
  490. mfs r6, rear /* parameter unsigned long address */
  491. mfs r7, resr /* parameter unsigned long error_code */
  492. rted r0, do_page_fault
  493. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  494. C_ENTRY(page_fault_instr_trap):
  495. SAVE_STATE /* Save registers.*/
  496. /* PC, before IRQ/trap - this is one instruction above */
  497. swi r17, r1, PT_PC;
  498. tovirt(r1,r1)
  499. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  500. addik r15, r0, ret_from_exc-8
  501. mfs r6, rear /* parameter unsigned long address */
  502. ori r7, r0, 0 /* parameter unsigned long error_code */
  503. rted r0, do_page_fault
  504. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  505. /* Entry point used to return from an exception. */
  506. C_ENTRY(ret_from_exc):
  507. lwi r11, r1, PT_MODE;
  508. bnei r11, 2f; /* See if returning to kernel mode, */
  509. /* ... if so, skip resched &c. */
  510. /* We're returning to user mode, so check for various conditions that
  511. trigger rescheduling. */
  512. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  513. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  514. andi r11, r11, _TIF_NEED_RESCHED;
  515. beqi r11, 5f;
  516. /* Call the scheduler before returning from a syscall/trap. */
  517. bralid r15, schedule; /* Call scheduler */
  518. nop; /* delay slot */
  519. /* Maybe handle a signal */
  520. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  521. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  522. andi r11, r11, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  523. beqi r11, 1f; /* Signals to handle, handle them */
  524. /*
  525. * Handle a signal return; Pending signals should be in r18.
  526. *
  527. * Not all registers are saved by the normal trap/interrupt entry
  528. * points (for instance, call-saved registers (because the normal
  529. * C-compiler calling sequence in the kernel makes sure they're
  530. * preserved), and call-clobbered registers in the case of
  531. * traps), but signal handlers may want to examine or change the
  532. * complete register state. Here we save anything not saved by
  533. * the normal entry sequence, so that it may be safely restored
  534. * (in a possibly modified form) after do_notify_resume returns. */
  535. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  536. bralid r15, do_notify_resume; /* Handle any signals */
  537. addi r6, r0, 0; /* Arg 2: int in_syscall */
  538. /* Finally, return to user state. */
  539. 1: set_bip; /* Ints masked for state restore */
  540. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  541. VM_OFF;
  542. tophys(r1,r1);
  543. RESTORE_REGS;
  544. addik r1, r1, PT_SIZE /* Clean up stack space. */
  545. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  546. bri 6f;
  547. /* Return to kernel state. */
  548. 2: set_bip; /* Ints masked for state restore */
  549. VM_OFF;
  550. tophys(r1,r1);
  551. RESTORE_REGS;
  552. addik r1, r1, PT_SIZE /* Clean up stack space. */
  553. tovirt(r1,r1);
  554. 6:
  555. EXC_return: /* Make global symbol for debugging */
  556. rtbd r14, 0; /* Instructions to return from an IRQ */
  557. nop;
  558. /*
  559. * HW EXCEPTION rutine end
  560. */
  561. /*
  562. * Hardware maskable interrupts.
  563. *
  564. * The stack-pointer (r1) should have already been saved to the memory
  565. * location PER_CPU(ENTRY_SP).
  566. */
  567. C_ENTRY(_interrupt):
  568. /* MS: we are in physical address */
  569. /* Save registers, switch to proper stack, convert SP to virtual.*/
  570. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  571. /* MS: See if already in kernel mode. */
  572. mfs r1, rmsr
  573. nop
  574. andi r1, r1, MSR_UMS
  575. bnei r1, 1f
  576. /* Kernel-mode state save. */
  577. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  578. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  579. /* save registers */
  580. /* MS: Make room on the stack -> activation record */
  581. addik r1, r1, -PT_SIZE;
  582. SAVE_REGS
  583. brid 2f;
  584. swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
  585. 1:
  586. /* User-mode state save. */
  587. /* MS: get the saved current */
  588. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  589. tophys(r1,r1);
  590. lwi r1, r1, TS_THREAD_INFO;
  591. addik r1, r1, THREAD_SIZE;
  592. tophys(r1,r1);
  593. /* save registers */
  594. addik r1, r1, -PT_SIZE;
  595. SAVE_REGS
  596. /* calculate mode */
  597. swi r0, r1, PT_MODE;
  598. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  599. swi r11, r1, PT_R1;
  600. clear_ums;
  601. 2:
  602. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  603. tovirt(r1,r1)
  604. addik r15, r0, irq_call;
  605. irq_call:rtbd r0, do_IRQ;
  606. addik r5, r1, 0;
  607. /* MS: we are in virtual mode */
  608. ret_from_irq:
  609. lwi r11, r1, PT_MODE;
  610. bnei r11, 2f;
  611. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  612. lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
  613. andi r11, r11, _TIF_NEED_RESCHED;
  614. beqi r11, 5f
  615. bralid r15, schedule;
  616. nop; /* delay slot */
  617. /* Maybe handle a signal */
  618. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
  619. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  620. andi r11, r11, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  621. beqid r11, no_intr_resched
  622. /* Handle a signal return; Pending signals should be in r18. */
  623. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  624. bralid r15, do_notify_resume; /* Handle any signals */
  625. addi r6, r0, 0; /* Arg 2: int in_syscall */
  626. /* Finally, return to user state. */
  627. no_intr_resched:
  628. /* Disable interrupts, we are now committed to the state restore */
  629. disable_irq
  630. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  631. VM_OFF;
  632. tophys(r1,r1);
  633. RESTORE_REGS
  634. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  635. lwi r1, r1, PT_R1 - PT_SIZE;
  636. bri 6f;
  637. /* MS: Return to kernel state. */
  638. 2:
  639. #ifdef CONFIG_PREEMPT
  640. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  641. /* MS: get preempt_count from thread info */
  642. lwi r5, r11, TI_PREEMPT_COUNT;
  643. bgti r5, restore;
  644. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  645. andi r5, r5, _TIF_NEED_RESCHED;
  646. beqi r5, restore /* if zero jump over */
  647. preempt:
  648. /* interrupts are off that's why I am calling preempt_chedule_irq */
  649. bralid r15, preempt_schedule_irq
  650. nop
  651. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  652. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  653. andi r5, r5, _TIF_NEED_RESCHED;
  654. bnei r5, preempt /* if non zero jump to resched */
  655. restore:
  656. #endif
  657. VM_OFF /* MS: turn off MMU */
  658. tophys(r1,r1)
  659. RESTORE_REGS
  660. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  661. tovirt(r1,r1);
  662. 6:
  663. IRQ_return: /* MS: Make global symbol for debugging */
  664. rtid r14, 0
  665. nop
  666. /*
  667. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  668. * and call handling function with saved pt_regs
  669. */
  670. C_ENTRY(_debug_exception):
  671. /* BIP bit is set on entry, no interrupts can occur */
  672. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  673. mfs r1, rmsr
  674. nop
  675. andi r1, r1, MSR_UMS
  676. bnei r1, 1f
  677. /* MS: Kernel-mode state save - kgdb */
  678. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  679. /* BIP bit is set on entry, no interrupts can occur */
  680. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
  681. SAVE_REGS;
  682. /* save all regs to pt_reg structure */
  683. swi r0, r1, PT_R0; /* R0 must be saved too */
  684. swi r14, r1, PT_R14 /* rewrite saved R14 value */
  685. swi r16, r1, PT_PC; /* PC and r16 are the same */
  686. /* save special purpose registers to pt_regs */
  687. mfs r11, rear;
  688. swi r11, r1, PT_EAR;
  689. mfs r11, resr;
  690. swi r11, r1, PT_ESR;
  691. mfs r11, rfsr;
  692. swi r11, r1, PT_FSR;
  693. /* stack pointer is in physical address at it is decrease
  694. * by PT_SIZE but we need to get correct R1 value */
  695. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
  696. swi r11, r1, PT_R1
  697. /* MS: r31 - current pointer isn't changed */
  698. tovirt(r1,r1)
  699. #ifdef CONFIG_KGDB
  700. addi r5, r1, 0 /* pass pt_reg address as the first arg */
  701. addik r15, r0, dbtrap_call; /* return address */
  702. rtbd r0, microblaze_kgdb_break
  703. nop;
  704. #endif
  705. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  706. * It is very unlikely that another brki instruction is called. */
  707. bri 0
  708. /* MS: User-mode state save - gdb */
  709. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  710. tophys(r1,r1);
  711. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  712. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  713. tophys(r1,r1);
  714. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  715. SAVE_REGS;
  716. swi r16, r1, PT_PC; /* Save LP */
  717. swi r0, r1, PT_MODE; /* Was in user-mode. */
  718. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  719. swi r11, r1, PT_R1; /* Store user SP. */
  720. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  721. tovirt(r1,r1)
  722. set_vms;
  723. addik r5, r1, 0;
  724. addik r15, r0, dbtrap_call;
  725. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  726. rtbd r0, sw_exception
  727. nop
  728. /* MS: The first instruction for the second part of the gdb/kgdb */
  729. set_bip; /* Ints masked for state restore */
  730. lwi r11, r1, PT_MODE;
  731. bnei r11, 2f;
  732. /* MS: Return to user space - gdb */
  733. /* Get current task ptr into r11 */
  734. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  735. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  736. andi r11, r11, _TIF_NEED_RESCHED;
  737. beqi r11, 5f;
  738. /* Call the scheduler before returning from a syscall/trap. */
  739. bralid r15, schedule; /* Call scheduler */
  740. nop; /* delay slot */
  741. /* Maybe handle a signal */
  742. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  743. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  744. andi r11, r11, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  745. beqi r11, 1f; /* Signals to handle, handle them */
  746. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  747. bralid r15, do_notify_resume; /* Handle any signals */
  748. addi r6, r0, 0; /* Arg 2: int in_syscall */
  749. /* Finally, return to user state. */
  750. 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  751. VM_OFF;
  752. tophys(r1,r1);
  753. /* MS: Restore all regs */
  754. RESTORE_REGS
  755. addik r1, r1, PT_SIZE /* Clean up stack space */
  756. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  757. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  758. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  759. nop;
  760. /* MS: Return to kernel state - kgdb */
  761. 2: VM_OFF;
  762. tophys(r1,r1);
  763. /* MS: Restore all regs */
  764. RESTORE_REGS
  765. lwi r14, r1, PT_R14;
  766. lwi r16, r1, PT_PC;
  767. addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
  768. tovirt(r1,r1);
  769. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  770. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  771. nop;
  772. ENTRY(_switch_to)
  773. /* prepare return value */
  774. addk r3, r0, CURRENT_TASK
  775. /* save registers in cpu_context */
  776. /* use r11 and r12, volatile registers, as temp register */
  777. /* give start of cpu_context for previous process */
  778. addik r11, r5, TI_CPU_CONTEXT
  779. swi r1, r11, CC_R1
  780. swi r2, r11, CC_R2
  781. /* skip volatile registers.
  782. * they are saved on stack when we jumped to _switch_to() */
  783. /* dedicated registers */
  784. swi r13, r11, CC_R13
  785. swi r14, r11, CC_R14
  786. swi r15, r11, CC_R15
  787. swi r16, r11, CC_R16
  788. swi r17, r11, CC_R17
  789. swi r18, r11, CC_R18
  790. /* save non-volatile registers */
  791. swi r19, r11, CC_R19
  792. swi r20, r11, CC_R20
  793. swi r21, r11, CC_R21
  794. swi r22, r11, CC_R22
  795. swi r23, r11, CC_R23
  796. swi r24, r11, CC_R24
  797. swi r25, r11, CC_R25
  798. swi r26, r11, CC_R26
  799. swi r27, r11, CC_R27
  800. swi r28, r11, CC_R28
  801. swi r29, r11, CC_R29
  802. swi r30, r11, CC_R30
  803. /* special purpose registers */
  804. mfs r12, rmsr
  805. swi r12, r11, CC_MSR
  806. mfs r12, rear
  807. swi r12, r11, CC_EAR
  808. mfs r12, resr
  809. swi r12, r11, CC_ESR
  810. mfs r12, rfsr
  811. swi r12, r11, CC_FSR
  812. /* update r31, the current-give me pointer to task which will be next */
  813. lwi CURRENT_TASK, r6, TI_TASK
  814. /* stored it to current_save too */
  815. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  816. /* get new process' cpu context and restore */
  817. /* give me start where start context of next task */
  818. addik r11, r6, TI_CPU_CONTEXT
  819. /* non-volatile registers */
  820. lwi r30, r11, CC_R30
  821. lwi r29, r11, CC_R29
  822. lwi r28, r11, CC_R28
  823. lwi r27, r11, CC_R27
  824. lwi r26, r11, CC_R26
  825. lwi r25, r11, CC_R25
  826. lwi r24, r11, CC_R24
  827. lwi r23, r11, CC_R23
  828. lwi r22, r11, CC_R22
  829. lwi r21, r11, CC_R21
  830. lwi r20, r11, CC_R20
  831. lwi r19, r11, CC_R19
  832. /* dedicated registers */
  833. lwi r18, r11, CC_R18
  834. lwi r17, r11, CC_R17
  835. lwi r16, r11, CC_R16
  836. lwi r15, r11, CC_R15
  837. lwi r14, r11, CC_R14
  838. lwi r13, r11, CC_R13
  839. /* skip volatile registers */
  840. lwi r2, r11, CC_R2
  841. lwi r1, r11, CC_R1
  842. /* special purpose registers */
  843. lwi r12, r11, CC_FSR
  844. mts rfsr, r12
  845. lwi r12, r11, CC_MSR
  846. mts rmsr, r12
  847. rtsd r15, 8
  848. nop
  849. ENTRY(_reset)
  850. brai 0; /* Jump to reset vector */
  851. /* These are compiled and loaded into high memory, then
  852. * copied into place in mach_early_setup */
  853. .section .init.ivt, "ax"
  854. #if CONFIG_MANUAL_RESET_VECTOR
  855. .org 0x0
  856. brai CONFIG_MANUAL_RESET_VECTOR
  857. #endif
  858. .org 0x8
  859. brai TOPHYS(_user_exception); /* syscall handler */
  860. .org 0x10
  861. brai TOPHYS(_interrupt); /* Interrupt handler */
  862. .org 0x18
  863. brai TOPHYS(_debug_exception); /* debug trap handler */
  864. .org 0x20
  865. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  866. .section .rodata,"a"
  867. #include "syscall_table.S"
  868. syscall_table_size=(.-sys_call_table)
  869. type_SYSCALL:
  870. .ascii "SYSCALL\0"
  871. type_IRQ:
  872. .ascii "IRQ\0"
  873. type_IRQ_PREEMPT:
  874. .ascii "IRQ (PREEMPTED)\0"
  875. type_SYSCALL_PREEMPT:
  876. .ascii " SYSCALL (PREEMPTED)\0"
  877. /*
  878. * Trap decoding for stack unwinder
  879. * Tuples are (start addr, end addr, string)
  880. * If return address lies on [start addr, end addr],
  881. * unwinder displays 'string'
  882. */
  883. .align 4
  884. .global microblaze_trap_handlers
  885. microblaze_trap_handlers:
  886. /* Exact matches come first */
  887. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  888. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  889. /* Fuzzy matches go here */
  890. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  891. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  892. /* End of table */
  893. .word 0 ; .word 0 ; .word 0