entry.S 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. #ifdef DEBUG
  31. /* Create space for syscalls counting. */
  32. .section .data
  33. .global syscall_debug_table
  34. .align 4
  35. syscall_debug_table:
  36. .space (__NR_syscalls * 4)
  37. #endif /* DEBUG */
  38. #define C_ENTRY(name) .globl name; .align 4; name
  39. /*
  40. * Various ways of setting and clearing BIP in flags reg.
  41. * This is mucky, but necessary using microblaze version that
  42. * allows msr ops to write to BIP
  43. */
  44. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  45. .macro clear_bip
  46. msrclr r0, MSR_BIP
  47. .endm
  48. .macro set_bip
  49. msrset r0, MSR_BIP
  50. .endm
  51. .macro clear_eip
  52. msrclr r0, MSR_EIP
  53. .endm
  54. .macro set_ee
  55. msrset r0, MSR_EE
  56. .endm
  57. .macro disable_irq
  58. msrclr r0, MSR_IE
  59. .endm
  60. .macro enable_irq
  61. msrset r0, MSR_IE
  62. .endm
  63. .macro set_ums
  64. msrset r0, MSR_UMS
  65. msrclr r0, MSR_VMS
  66. .endm
  67. .macro set_vms
  68. msrclr r0, MSR_UMS
  69. msrset r0, MSR_VMS
  70. .endm
  71. .macro clear_ums
  72. msrclr r0, MSR_UMS
  73. .endm
  74. .macro clear_vms_ums
  75. msrclr r0, MSR_VMS | MSR_UMS
  76. .endm
  77. #else
  78. .macro clear_bip
  79. mfs r11, rmsr
  80. andi r11, r11, ~MSR_BIP
  81. mts rmsr, r11
  82. .endm
  83. .macro set_bip
  84. mfs r11, rmsr
  85. ori r11, r11, MSR_BIP
  86. mts rmsr, r11
  87. .endm
  88. .macro clear_eip
  89. mfs r11, rmsr
  90. andi r11, r11, ~MSR_EIP
  91. mts rmsr, r11
  92. .endm
  93. .macro set_ee
  94. mfs r11, rmsr
  95. ori r11, r11, MSR_EE
  96. mts rmsr, r11
  97. .endm
  98. .macro disable_irq
  99. mfs r11, rmsr
  100. andi r11, r11, ~MSR_IE
  101. mts rmsr, r11
  102. .endm
  103. .macro enable_irq
  104. mfs r11, rmsr
  105. ori r11, r11, MSR_IE
  106. mts rmsr, r11
  107. .endm
  108. .macro set_ums
  109. mfs r11, rmsr
  110. ori r11, r11, MSR_VMS
  111. andni r11, r11, MSR_UMS
  112. mts rmsr, r11
  113. .endm
  114. .macro set_vms
  115. mfs r11, rmsr
  116. ori r11, r11, MSR_VMS
  117. andni r11, r11, MSR_UMS
  118. mts rmsr, r11
  119. .endm
  120. .macro clear_ums
  121. mfs r11, rmsr
  122. andni r11, r11, MSR_UMS
  123. mts rmsr,r11
  124. .endm
  125. .macro clear_vms_ums
  126. mfs r11, rmsr
  127. andni r11, r11, (MSR_VMS|MSR_UMS)
  128. mts rmsr,r11
  129. .endm
  130. #endif
  131. /* Define how to call high-level functions. With MMU, virtual mode must be
  132. * enabled when calling the high-level function. Clobbers R11.
  133. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  134. */
  135. /* turn on virtual protected mode save */
  136. #define VM_ON \
  137. set_ums; \
  138. rted r0, 2f; \
  139. nop; \
  140. 2:
  141. /* turn off virtual protected mode save and user mode save*/
  142. #define VM_OFF \
  143. clear_vms_ums; \
  144. rted r0, TOPHYS(1f); \
  145. nop; \
  146. 1:
  147. #define SAVE_REGS \
  148. swi r2, r1, PT_R2; /* Save SDA */ \
  149. swi r3, r1, PT_R3; \
  150. swi r4, r1, PT_R4; \
  151. swi r5, r1, PT_R5; \
  152. swi r6, r1, PT_R6; \
  153. swi r7, r1, PT_R7; \
  154. swi r8, r1, PT_R8; \
  155. swi r9, r1, PT_R9; \
  156. swi r10, r1, PT_R10; \
  157. swi r11, r1, PT_R11; /* save clobbered regs after rval */\
  158. swi r12, r1, PT_R12; \
  159. swi r13, r1, PT_R13; /* Save SDA2 */ \
  160. swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
  161. swi r15, r1, PT_R15; /* Save LP */ \
  162. swi r16, r1, PT_R16; \
  163. swi r17, r1, PT_R17; \
  164. swi r18, r1, PT_R18; /* Save asm scratch reg */ \
  165. swi r19, r1, PT_R19; \
  166. swi r20, r1, PT_R20; \
  167. swi r21, r1, PT_R21; \
  168. swi r22, r1, PT_R22; \
  169. swi r23, r1, PT_R23; \
  170. swi r24, r1, PT_R24; \
  171. swi r25, r1, PT_R25; \
  172. swi r26, r1, PT_R26; \
  173. swi r27, r1, PT_R27; \
  174. swi r28, r1, PT_R28; \
  175. swi r29, r1, PT_R29; \
  176. swi r30, r1, PT_R30; \
  177. swi r31, r1, PT_R31; /* Save current task reg */ \
  178. mfs r11, rmsr; /* save MSR */ \
  179. swi r11, r1, PT_MSR;
  180. #define RESTORE_REGS \
  181. lwi r11, r1, PT_MSR; \
  182. mts rmsr , r11; \
  183. lwi r2, r1, PT_R2; /* restore SDA */ \
  184. lwi r3, r1, PT_R3; \
  185. lwi r4, r1, PT_R4; \
  186. lwi r5, r1, PT_R5; \
  187. lwi r6, r1, PT_R6; \
  188. lwi r7, r1, PT_R7; \
  189. lwi r8, r1, PT_R8; \
  190. lwi r9, r1, PT_R9; \
  191. lwi r10, r1, PT_R10; \
  192. lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
  193. lwi r12, r1, PT_R12; \
  194. lwi r13, r1, PT_R13; /* restore SDA2 */ \
  195. lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  196. lwi r15, r1, PT_R15; /* restore LP */ \
  197. lwi r16, r1, PT_R16; \
  198. lwi r17, r1, PT_R17; \
  199. lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
  200. lwi r19, r1, PT_R19; \
  201. lwi r20, r1, PT_R20; \
  202. lwi r21, r1, PT_R21; \
  203. lwi r22, r1, PT_R22; \
  204. lwi r23, r1, PT_R23; \
  205. lwi r24, r1, PT_R24; \
  206. lwi r25, r1, PT_R25; \
  207. lwi r26, r1, PT_R26; \
  208. lwi r27, r1, PT_R27; \
  209. lwi r28, r1, PT_R28; \
  210. lwi r29, r1, PT_R29; \
  211. lwi r30, r1, PT_R30; \
  212. lwi r31, r1, PT_R31; /* Restore cur task reg */
  213. #define SAVE_STATE \
  214. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  215. /* See if already in kernel mode.*/ \
  216. mfs r1, rmsr; \
  217. andi r1, r1, MSR_UMS; \
  218. bnei r1, 1f; \
  219. /* Kernel-mode state save. */ \
  220. /* Reload kernel stack-ptr. */ \
  221. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  222. /* FIXME: I can add these two lines to one */ \
  223. /* tophys(r1,r1); */ \
  224. /* addik r1, r1, -PT_SIZE; */ \
  225. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  226. SAVE_REGS \
  227. brid 2f; \
  228. swi r1, r1, PT_MODE; \
  229. 1: /* User-mode state save. */ \
  230. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  231. tophys(r1,r1); \
  232. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  233. /* MS these three instructions can be added to one */ \
  234. /* addik r1, r1, THREAD_SIZE; */ \
  235. /* tophys(r1,r1); */ \
  236. /* addik r1, r1, -PT_SIZE; */ \
  237. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  238. SAVE_REGS \
  239. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  240. swi r11, r1, PT_R1; /* Store user SP. */ \
  241. swi r0, r1, PT_MODE; /* Was in user-mode. */ \
  242. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  243. clear_ums; \
  244. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  245. .text
  246. /*
  247. * User trap.
  248. *
  249. * System calls are handled here.
  250. *
  251. * Syscall protocol:
  252. * Syscall number in r12, args in r5-r10
  253. * Return value in r3
  254. *
  255. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  256. * are masked. This is nice, means we don't have to CLI before state save
  257. */
  258. C_ENTRY(_user_exception):
  259. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  260. addi r14, r14, 4 /* return address is 4 byte after call */
  261. mfs r1, rmsr
  262. nop
  263. andi r1, r1, MSR_UMS
  264. bnei r1, 1f
  265. /* Kernel-mode state save - kernel execve */
  266. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  267. tophys(r1,r1);
  268. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  269. SAVE_REGS
  270. swi r1, r1, PT_MODE; /* pt_regs -> kernel mode */
  271. brid 2f;
  272. nop; /* Fill delay slot */
  273. /* User-mode state save. */
  274. 1:
  275. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  276. tophys(r1,r1);
  277. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  278. /* calculate kernel stack pointer from task struct 8k */
  279. addik r1, r1, THREAD_SIZE;
  280. tophys(r1,r1);
  281. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  282. SAVE_REGS
  283. swi r0, r1, PT_R3
  284. swi r0, r1, PT_R4
  285. swi r0, r1, PT_MODE; /* Was in user-mode. */
  286. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  287. swi r11, r1, PT_R1; /* Store user SP. */
  288. clear_ums;
  289. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  290. /* Save away the syscall number. */
  291. swi r12, r1, PT_R0;
  292. tovirt(r1,r1)
  293. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  294. /* Jump to the appropriate function for the system call number in r12
  295. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  296. * register should point to the location where
  297. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  298. /* Step into virtual mode */
  299. rtbd r0, 3f
  300. nop
  301. 3:
  302. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  303. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  304. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  305. beqi r11, 4f
  306. addik r3, r0, -ENOSYS
  307. swi r3, r1, PT_R3
  308. brlid r15, do_syscall_trace_enter
  309. addik r5, r1, PT_R0
  310. # do_syscall_trace_enter returns the new syscall nr.
  311. addk r12, r0, r3
  312. lwi r5, r1, PT_R5;
  313. lwi r6, r1, PT_R6;
  314. lwi r7, r1, PT_R7;
  315. lwi r8, r1, PT_R8;
  316. lwi r9, r1, PT_R9;
  317. lwi r10, r1, PT_R10;
  318. 4:
  319. /* Jump to the appropriate function for the system call number in r12
  320. * (r12 is not preserved), or return an error if r12 is not valid.
  321. * The LP register should point to the location where the called function
  322. * should return. [note that MAKE_SYS_CALL uses label 1] */
  323. /* See if the system call number is valid */
  324. addi r11, r12, -__NR_syscalls;
  325. bgei r11,5f;
  326. /* Figure out which function to use for this system call. */
  327. /* Note Microblaze barrel shift is optional, so don't rely on it */
  328. add r12, r12, r12; /* convert num -> ptr */
  329. add r12, r12, r12;
  330. #ifdef DEBUG
  331. /* Trac syscalls and stored them to syscall_debug_table */
  332. /* The first syscall location stores total syscall number */
  333. lwi r3, r0, syscall_debug_table
  334. addi r3, r3, 1
  335. swi r3, r0, syscall_debug_table
  336. lwi r3, r12, syscall_debug_table
  337. addi r3, r3, 1
  338. swi r3, r12, syscall_debug_table
  339. #endif
  340. # Find and jump into the syscall handler.
  341. lwi r12, r12, sys_call_table
  342. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  343. addi r15, r0, ret_from_trap-8
  344. bra r12
  345. /* The syscall number is invalid, return an error. */
  346. 5:
  347. rtsd r15, 8; /* looks like a normal subroutine return */
  348. addi r3, r0, -ENOSYS;
  349. /* Entry point used to return from a syscall/trap */
  350. /* We re-enable BIP bit before state restore */
  351. C_ENTRY(ret_from_trap):
  352. swi r3, r1, PT_R3
  353. swi r4, r1, PT_R4
  354. lwi r11, r1, PT_MODE;
  355. /* See if returning to kernel mode, if so, skip resched &c. */
  356. bnei r11, 2f;
  357. /* We're returning to user mode, so check for various conditions that
  358. * trigger rescheduling. */
  359. /* FIXME: Restructure all these flag checks. */
  360. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  361. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  362. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  363. beqi r11, 1f
  364. brlid r15, do_syscall_trace_leave
  365. addik r5, r1, PT_R0
  366. 1:
  367. /* We're returning to user mode, so check for various conditions that
  368. * trigger rescheduling. */
  369. /* get thread info from current task */
  370. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  371. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  372. andi r11, r11, _TIF_NEED_RESCHED;
  373. beqi r11, 5f;
  374. bralid r15, schedule; /* Call scheduler */
  375. nop; /* delay slot */
  376. /* Maybe handle a signal */
  377. 5: /* get thread info from current task*/
  378. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  379. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  380. andi r11, r11, _TIF_SIGPENDING;
  381. beqi r11, 1f; /* Signals to handle, handle them */
  382. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  383. addi r7, r0, 1; /* Arg 3: int in_syscall */
  384. bralid r15, do_signal; /* Handle any signals */
  385. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  386. /* Finally, return to user state. */
  387. 1: set_bip; /* Ints masked for state restore */
  388. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  389. VM_OFF;
  390. tophys(r1,r1);
  391. RESTORE_REGS;
  392. addik r1, r1, PT_SIZE /* Clean up stack space. */
  393. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  394. bri 6f;
  395. /* Return to kernel state. */
  396. 2: set_bip; /* Ints masked for state restore */
  397. VM_OFF;
  398. tophys(r1,r1);
  399. RESTORE_REGS;
  400. addik r1, r1, PT_SIZE /* Clean up stack space. */
  401. tovirt(r1,r1);
  402. 6:
  403. TRAP_return: /* Make global symbol for debugging */
  404. rtbd r14, 0; /* Instructions to return from an IRQ */
  405. nop;
  406. /* These syscalls need access to the struct pt_regs on the stack, so we
  407. implement them in assembly (they're basically all wrappers anyway). */
  408. C_ENTRY(sys_fork_wrapper):
  409. addi r5, r0, SIGCHLD /* Arg 0: flags */
  410. lwi r6, r1, PT_R1 /* Arg 1: child SP (use parent's) */
  411. addik r7, r1, 0 /* Arg 2: parent context */
  412. add r8, r0, r0 /* Arg 3: (unused) */
  413. add r9, r0, r0; /* Arg 4: (unused) */
  414. brid do_fork /* Do real work (tail-call) */
  415. add r10, r0, r0; /* Arg 5: (unused) */
  416. /* This the initial entry point for a new child thread, with an appropriate
  417. stack in place that makes it look the the child is in the middle of an
  418. syscall. This function is actually `returned to' from switch_thread
  419. (copy_thread makes ret_from_fork the return address in each new thread's
  420. saved context). */
  421. C_ENTRY(ret_from_fork):
  422. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  423. add r3, r5, r0; /* switch_thread returns the prev task */
  424. /* ( in the delay slot ) */
  425. brid ret_from_trap; /* Do normal trap return */
  426. add r3, r0, r0; /* Child's fork call should return 0. */
  427. C_ENTRY(sys_vfork):
  428. brid microblaze_vfork /* Do real work (tail-call) */
  429. addik r5, r1, 0
  430. C_ENTRY(sys_clone):
  431. bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
  432. lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */
  433. 1: addik r7, r1, 0; /* Arg 2: parent context */
  434. lwi r9, r1, PT_R8; /* parent tid. */
  435. lwi r10, r1, PT_R9; /* child tid. */
  436. /* do_fork will pick up TLS from regs->r10. */
  437. brid do_fork /* Do real work (tail-call) */
  438. add r8, r0, r0; /* Arg 3: (unused) */
  439. C_ENTRY(sys_execve):
  440. brid microblaze_execve; /* Do real work (tail-call).*/
  441. addik r8, r1, 0; /* add user context as 4th arg */
  442. C_ENTRY(sys_rt_sigreturn_wrapper):
  443. brid sys_rt_sigreturn /* Do real work */
  444. addik r5, r1, 0; /* add user context as 1st arg */
  445. /*
  446. * HW EXCEPTION rutine start
  447. */
  448. C_ENTRY(full_exception_trap):
  449. /* adjust exception address for privileged instruction
  450. * for finding where is it */
  451. addik r17, r17, -4
  452. SAVE_STATE /* Save registers */
  453. /* PC, before IRQ/trap - this is one instruction above */
  454. swi r17, r1, PT_PC;
  455. tovirt(r1,r1)
  456. /* FIXME this can be store directly in PT_ESR reg.
  457. * I tested it but there is a fault */
  458. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  459. addik r15, r0, ret_from_exc - 8
  460. mfs r6, resr
  461. mfs r7, rfsr; /* save FSR */
  462. mts rfsr, r0; /* Clear sticky fsr */
  463. rted r0, full_exception
  464. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  465. /*
  466. * Unaligned data trap.
  467. *
  468. * Unaligned data trap last on 4k page is handled here.
  469. *
  470. * Trap entered via exception, so EE bit is set, and interrupts
  471. * are masked. This is nice, means we don't have to CLI before state save
  472. *
  473. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  474. */
  475. C_ENTRY(unaligned_data_trap):
  476. /* MS: I have to save r11 value and then restore it because
  477. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  478. * instructions are not used. We don't need to do if MSR instructions
  479. * are used and they use r0 instead of r11.
  480. * I am using ENTRY_SP which should be primary used only for stack
  481. * pointer saving. */
  482. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  483. set_bip; /* equalize initial state for all possible entries */
  484. clear_eip;
  485. set_ee;
  486. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  487. SAVE_STATE /* Save registers.*/
  488. /* PC, before IRQ/trap - this is one instruction above */
  489. swi r17, r1, PT_PC;
  490. tovirt(r1,r1)
  491. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  492. addik r15, r0, ret_from_exc-8
  493. mfs r3, resr /* ESR */
  494. mfs r4, rear /* EAR */
  495. rtbd r0, _unaligned_data_exception
  496. addik r7, r1, 0 /* parameter struct pt_regs * regs */
  497. /*
  498. * Page fault traps.
  499. *
  500. * If the real exception handler (from hw_exception_handler.S) didn't find
  501. * the mapping for the process, then we're thrown here to handle such situation.
  502. *
  503. * Trap entered via exceptions, so EE bit is set, and interrupts
  504. * are masked. This is nice, means we don't have to CLI before state save
  505. *
  506. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  507. * will bail out to this point if they can't resolve the lightweight TLB fault.
  508. *
  509. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  510. * void do_page_fault(struct pt_regs *regs,
  511. * unsigned long address,
  512. * unsigned long error_code)
  513. */
  514. /* data and intruction trap - which is choose is resolved int fault.c */
  515. C_ENTRY(page_fault_data_trap):
  516. SAVE_STATE /* Save registers.*/
  517. /* PC, before IRQ/trap - this is one instruction above */
  518. swi r17, r1, PT_PC;
  519. tovirt(r1,r1)
  520. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  521. addik r15, r0, ret_from_exc-8
  522. mfs r6, rear /* parameter unsigned long address */
  523. mfs r7, resr /* parameter unsigned long error_code */
  524. rted r0, do_page_fault
  525. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  526. C_ENTRY(page_fault_instr_trap):
  527. SAVE_STATE /* Save registers.*/
  528. /* PC, before IRQ/trap - this is one instruction above */
  529. swi r17, r1, PT_PC;
  530. tovirt(r1,r1)
  531. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  532. addik r15, r0, ret_from_exc-8
  533. mfs r6, rear /* parameter unsigned long address */
  534. ori r7, r0, 0 /* parameter unsigned long error_code */
  535. rted r0, do_page_fault
  536. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  537. /* Entry point used to return from an exception. */
  538. C_ENTRY(ret_from_exc):
  539. lwi r11, r1, PT_MODE;
  540. bnei r11, 2f; /* See if returning to kernel mode, */
  541. /* ... if so, skip resched &c. */
  542. /* We're returning to user mode, so check for various conditions that
  543. trigger rescheduling. */
  544. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  545. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  546. andi r11, r11, _TIF_NEED_RESCHED;
  547. beqi r11, 5f;
  548. /* Call the scheduler before returning from a syscall/trap. */
  549. bralid r15, schedule; /* Call scheduler */
  550. nop; /* delay slot */
  551. /* Maybe handle a signal */
  552. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  553. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  554. andi r11, r11, _TIF_SIGPENDING;
  555. beqi r11, 1f; /* Signals to handle, handle them */
  556. /*
  557. * Handle a signal return; Pending signals should be in r18.
  558. *
  559. * Not all registers are saved by the normal trap/interrupt entry
  560. * points (for instance, call-saved registers (because the normal
  561. * C-compiler calling sequence in the kernel makes sure they're
  562. * preserved), and call-clobbered registers in the case of
  563. * traps), but signal handlers may want to examine or change the
  564. * complete register state. Here we save anything not saved by
  565. * the normal entry sequence, so that it may be safely restored
  566. * (in a possibly modified form) after do_signal returns. */
  567. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  568. addi r7, r0, 0; /* Arg 3: int in_syscall */
  569. bralid r15, do_signal; /* Handle any signals */
  570. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  571. /* Finally, return to user state. */
  572. 1: set_bip; /* Ints masked for state restore */
  573. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  574. VM_OFF;
  575. tophys(r1,r1);
  576. RESTORE_REGS;
  577. addik r1, r1, PT_SIZE /* Clean up stack space. */
  578. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  579. bri 6f;
  580. /* Return to kernel state. */
  581. 2: set_bip; /* Ints masked for state restore */
  582. VM_OFF;
  583. tophys(r1,r1);
  584. RESTORE_REGS;
  585. addik r1, r1, PT_SIZE /* Clean up stack space. */
  586. tovirt(r1,r1);
  587. 6:
  588. EXC_return: /* Make global symbol for debugging */
  589. rtbd r14, 0; /* Instructions to return from an IRQ */
  590. nop;
  591. /*
  592. * HW EXCEPTION rutine end
  593. */
  594. /*
  595. * Hardware maskable interrupts.
  596. *
  597. * The stack-pointer (r1) should have already been saved to the memory
  598. * location PER_CPU(ENTRY_SP).
  599. */
  600. C_ENTRY(_interrupt):
  601. /* MS: we are in physical address */
  602. /* Save registers, switch to proper stack, convert SP to virtual.*/
  603. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  604. /* MS: See if already in kernel mode. */
  605. mfs r1, rmsr
  606. nop
  607. andi r1, r1, MSR_UMS
  608. bnei r1, 1f
  609. /* Kernel-mode state save. */
  610. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  611. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  612. /* save registers */
  613. /* MS: Make room on the stack -> activation record */
  614. addik r1, r1, -PT_SIZE;
  615. SAVE_REGS
  616. brid 2f;
  617. swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
  618. 1:
  619. /* User-mode state save. */
  620. /* MS: get the saved current */
  621. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  622. tophys(r1,r1);
  623. lwi r1, r1, TS_THREAD_INFO;
  624. addik r1, r1, THREAD_SIZE;
  625. tophys(r1,r1);
  626. /* save registers */
  627. addik r1, r1, -PT_SIZE;
  628. SAVE_REGS
  629. /* calculate mode */
  630. swi r0, r1, PT_MODE;
  631. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  632. swi r11, r1, PT_R1;
  633. clear_ums;
  634. 2:
  635. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  636. tovirt(r1,r1)
  637. addik r15, r0, irq_call;
  638. irq_call:rtbd r0, do_IRQ;
  639. addik r5, r1, 0;
  640. /* MS: we are in virtual mode */
  641. ret_from_irq:
  642. lwi r11, r1, PT_MODE;
  643. bnei r11, 2f;
  644. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  645. lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
  646. andi r11, r11, _TIF_NEED_RESCHED;
  647. beqi r11, 5f
  648. bralid r15, schedule;
  649. nop; /* delay slot */
  650. /* Maybe handle a signal */
  651. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
  652. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  653. andi r11, r11, _TIF_SIGPENDING;
  654. beqid r11, no_intr_resched
  655. /* Handle a signal return; Pending signals should be in r18. */
  656. addi r7, r0, 0; /* Arg 3: int in_syscall */
  657. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  658. bralid r15, do_signal; /* Handle any signals */
  659. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  660. /* Finally, return to user state. */
  661. no_intr_resched:
  662. /* Disable interrupts, we are now committed to the state restore */
  663. disable_irq
  664. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  665. VM_OFF;
  666. tophys(r1,r1);
  667. RESTORE_REGS
  668. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  669. lwi r1, r1, PT_R1 - PT_SIZE;
  670. bri 6f;
  671. /* MS: Return to kernel state. */
  672. 2:
  673. #ifdef CONFIG_PREEMPT
  674. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  675. /* MS: get preempt_count from thread info */
  676. lwi r5, r11, TI_PREEMPT_COUNT;
  677. bgti r5, restore;
  678. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  679. andi r5, r5, _TIF_NEED_RESCHED;
  680. beqi r5, restore /* if zero jump over */
  681. preempt:
  682. /* interrupts are off that's why I am calling preempt_chedule_irq */
  683. bralid r15, preempt_schedule_irq
  684. nop
  685. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  686. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  687. andi r5, r5, _TIF_NEED_RESCHED;
  688. bnei r5, preempt /* if non zero jump to resched */
  689. restore:
  690. #endif
  691. VM_OFF /* MS: turn off MMU */
  692. tophys(r1,r1)
  693. RESTORE_REGS
  694. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  695. tovirt(r1,r1);
  696. 6:
  697. IRQ_return: /* MS: Make global symbol for debugging */
  698. rtid r14, 0
  699. nop
  700. /*
  701. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  702. * and call handling function with saved pt_regs
  703. */
  704. C_ENTRY(_debug_exception):
  705. /* BIP bit is set on entry, no interrupts can occur */
  706. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  707. mfs r1, rmsr
  708. nop
  709. andi r1, r1, MSR_UMS
  710. bnei r1, 1f
  711. /* MS: Kernel-mode state save - kgdb */
  712. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  713. /* BIP bit is set on entry, no interrupts can occur */
  714. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
  715. SAVE_REGS;
  716. /* save all regs to pt_reg structure */
  717. swi r0, r1, PT_R0; /* R0 must be saved too */
  718. swi r14, r1, PT_R14 /* rewrite saved R14 value */
  719. swi r16, r1, PT_PC; /* PC and r16 are the same */
  720. /* save special purpose registers to pt_regs */
  721. mfs r11, rear;
  722. swi r11, r1, PT_EAR;
  723. mfs r11, resr;
  724. swi r11, r1, PT_ESR;
  725. mfs r11, rfsr;
  726. swi r11, r1, PT_FSR;
  727. /* stack pointer is in physical address at it is decrease
  728. * by PT_SIZE but we need to get correct R1 value */
  729. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
  730. swi r11, r1, PT_R1
  731. /* MS: r31 - current pointer isn't changed */
  732. tovirt(r1,r1)
  733. #ifdef CONFIG_KGDB
  734. addi r5, r1, 0 /* pass pt_reg address as the first arg */
  735. addik r15, r0, dbtrap_call; /* return address */
  736. rtbd r0, microblaze_kgdb_break
  737. nop;
  738. #endif
  739. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  740. * It is very unlikely that another brki instruction is called. */
  741. bri 0
  742. /* MS: User-mode state save - gdb */
  743. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  744. tophys(r1,r1);
  745. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  746. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  747. tophys(r1,r1);
  748. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  749. SAVE_REGS;
  750. swi r16, r1, PT_PC; /* Save LP */
  751. swi r0, r1, PT_MODE; /* Was in user-mode. */
  752. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  753. swi r11, r1, PT_R1; /* Store user SP. */
  754. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  755. tovirt(r1,r1)
  756. set_vms;
  757. addik r5, r1, 0;
  758. addik r15, r0, dbtrap_call;
  759. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  760. rtbd r0, sw_exception
  761. nop
  762. /* MS: The first instruction for the second part of the gdb/kgdb */
  763. set_bip; /* Ints masked for state restore */
  764. lwi r11, r1, PT_MODE;
  765. bnei r11, 2f;
  766. /* MS: Return to user space - gdb */
  767. /* Get current task ptr into r11 */
  768. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  769. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  770. andi r11, r11, _TIF_NEED_RESCHED;
  771. beqi r11, 5f;
  772. /* Call the scheduler before returning from a syscall/trap. */
  773. bralid r15, schedule; /* Call scheduler */
  774. nop; /* delay slot */
  775. /* Maybe handle a signal */
  776. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  777. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  778. andi r11, r11, _TIF_SIGPENDING;
  779. beqi r11, 1f; /* Signals to handle, handle them */
  780. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  781. addi r7, r0, 0; /* Arg 3: int in_syscall */
  782. bralid r15, do_signal; /* Handle any signals */
  783. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  784. /* Finally, return to user state. */
  785. 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  786. VM_OFF;
  787. tophys(r1,r1);
  788. /* MS: Restore all regs */
  789. RESTORE_REGS
  790. addik r1, r1, PT_SIZE /* Clean up stack space */
  791. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  792. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  793. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  794. nop;
  795. /* MS: Return to kernel state - kgdb */
  796. 2: VM_OFF;
  797. tophys(r1,r1);
  798. /* MS: Restore all regs */
  799. RESTORE_REGS
  800. lwi r14, r1, PT_R14;
  801. lwi r16, r1, PT_PC;
  802. addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
  803. tovirt(r1,r1);
  804. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  805. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  806. nop;
  807. ENTRY(_switch_to)
  808. /* prepare return value */
  809. addk r3, r0, CURRENT_TASK
  810. /* save registers in cpu_context */
  811. /* use r11 and r12, volatile registers, as temp register */
  812. /* give start of cpu_context for previous process */
  813. addik r11, r5, TI_CPU_CONTEXT
  814. swi r1, r11, CC_R1
  815. swi r2, r11, CC_R2
  816. /* skip volatile registers.
  817. * they are saved on stack when we jumped to _switch_to() */
  818. /* dedicated registers */
  819. swi r13, r11, CC_R13
  820. swi r14, r11, CC_R14
  821. swi r15, r11, CC_R15
  822. swi r16, r11, CC_R16
  823. swi r17, r11, CC_R17
  824. swi r18, r11, CC_R18
  825. /* save non-volatile registers */
  826. swi r19, r11, CC_R19
  827. swi r20, r11, CC_R20
  828. swi r21, r11, CC_R21
  829. swi r22, r11, CC_R22
  830. swi r23, r11, CC_R23
  831. swi r24, r11, CC_R24
  832. swi r25, r11, CC_R25
  833. swi r26, r11, CC_R26
  834. swi r27, r11, CC_R27
  835. swi r28, r11, CC_R28
  836. swi r29, r11, CC_R29
  837. swi r30, r11, CC_R30
  838. /* special purpose registers */
  839. mfs r12, rmsr
  840. swi r12, r11, CC_MSR
  841. mfs r12, rear
  842. swi r12, r11, CC_EAR
  843. mfs r12, resr
  844. swi r12, r11, CC_ESR
  845. mfs r12, rfsr
  846. swi r12, r11, CC_FSR
  847. /* update r31, the current-give me pointer to task which will be next */
  848. lwi CURRENT_TASK, r6, TI_TASK
  849. /* stored it to current_save too */
  850. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  851. /* get new process' cpu context and restore */
  852. /* give me start where start context of next task */
  853. addik r11, r6, TI_CPU_CONTEXT
  854. /* non-volatile registers */
  855. lwi r30, r11, CC_R30
  856. lwi r29, r11, CC_R29
  857. lwi r28, r11, CC_R28
  858. lwi r27, r11, CC_R27
  859. lwi r26, r11, CC_R26
  860. lwi r25, r11, CC_R25
  861. lwi r24, r11, CC_R24
  862. lwi r23, r11, CC_R23
  863. lwi r22, r11, CC_R22
  864. lwi r21, r11, CC_R21
  865. lwi r20, r11, CC_R20
  866. lwi r19, r11, CC_R19
  867. /* dedicated registers */
  868. lwi r18, r11, CC_R18
  869. lwi r17, r11, CC_R17
  870. lwi r16, r11, CC_R16
  871. lwi r15, r11, CC_R15
  872. lwi r14, r11, CC_R14
  873. lwi r13, r11, CC_R13
  874. /* skip volatile registers */
  875. lwi r2, r11, CC_R2
  876. lwi r1, r11, CC_R1
  877. /* special purpose registers */
  878. lwi r12, r11, CC_FSR
  879. mts rfsr, r12
  880. lwi r12, r11, CC_MSR
  881. mts rmsr, r12
  882. rtsd r15, 8
  883. nop
  884. ENTRY(_reset)
  885. brai 0; /* Jump to reset vector */
  886. /* These are compiled and loaded into high memory, then
  887. * copied into place in mach_early_setup */
  888. .section .init.ivt, "ax"
  889. #if CONFIG_MANUAL_RESET_VECTOR
  890. .org 0x0
  891. brai CONFIG_MANUAL_RESET_VECTOR
  892. #endif
  893. .org 0x8
  894. brai TOPHYS(_user_exception); /* syscall handler */
  895. .org 0x10
  896. brai TOPHYS(_interrupt); /* Interrupt handler */
  897. .org 0x18
  898. brai TOPHYS(_debug_exception); /* debug trap handler */
  899. .org 0x20
  900. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  901. .section .rodata,"a"
  902. #include "syscall_table.S"
  903. syscall_table_size=(.-sys_call_table)
  904. type_SYSCALL:
  905. .ascii "SYSCALL\0"
  906. type_IRQ:
  907. .ascii "IRQ\0"
  908. type_IRQ_PREEMPT:
  909. .ascii "IRQ (PREEMPTED)\0"
  910. type_SYSCALL_PREEMPT:
  911. .ascii " SYSCALL (PREEMPTED)\0"
  912. /*
  913. * Trap decoding for stack unwinder
  914. * Tuples are (start addr, end addr, string)
  915. * If return address lies on [start addr, end addr],
  916. * unwinder displays 'string'
  917. */
  918. .align 4
  919. .global microblaze_trap_handlers
  920. microblaze_trap_handlers:
  921. /* Exact matches come first */
  922. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  923. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  924. /* Fuzzy matches go here */
  925. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  926. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  927. /* End of table */
  928. .word 0 ; .word 0 ; .word 0