entry.S 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. /* The size of a state save frame. */
  31. #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
  32. /* The offset of the struct pt_regs in a `state save frame' on the stack. */
  33. #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
  34. #define C_ENTRY(name) .globl name; .align 4; name
  35. /*
  36. * Various ways of setting and clearing BIP in flags reg.
  37. * This is mucky, but necessary using microblaze version that
  38. * allows msr ops to write to BIP
  39. */
  40. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  41. .macro clear_bip
  42. msrclr r0, MSR_BIP
  43. .endm
  44. .macro set_bip
  45. msrset r0, MSR_BIP
  46. .endm
  47. .macro clear_eip
  48. msrclr r0, MSR_EIP
  49. .endm
  50. .macro set_ee
  51. msrset r0, MSR_EE
  52. .endm
  53. .macro disable_irq
  54. msrclr r0, MSR_IE
  55. .endm
  56. .macro enable_irq
  57. msrset r0, MSR_IE
  58. .endm
  59. .macro set_ums
  60. msrset r0, MSR_UMS
  61. msrclr r0, MSR_VMS
  62. .endm
  63. .macro set_vms
  64. msrclr r0, MSR_UMS
  65. msrset r0, MSR_VMS
  66. .endm
  67. .macro clear_ums
  68. msrclr r0, MSR_UMS
  69. .endm
  70. .macro clear_vms_ums
  71. msrclr r0, MSR_VMS | MSR_UMS
  72. .endm
  73. #else
  74. .macro clear_bip
  75. mfs r11, rmsr
  76. andi r11, r11, ~MSR_BIP
  77. mts rmsr, r11
  78. .endm
  79. .macro set_bip
  80. mfs r11, rmsr
  81. ori r11, r11, MSR_BIP
  82. mts rmsr, r11
  83. .endm
  84. .macro clear_eip
  85. mfs r11, rmsr
  86. andi r11, r11, ~MSR_EIP
  87. mts rmsr, r11
  88. .endm
  89. .macro set_ee
  90. mfs r11, rmsr
  91. ori r11, r11, MSR_EE
  92. mts rmsr, r11
  93. .endm
  94. .macro disable_irq
  95. mfs r11, rmsr
  96. andi r11, r11, ~MSR_IE
  97. mts rmsr, r11
  98. .endm
  99. .macro enable_irq
  100. mfs r11, rmsr
  101. ori r11, r11, MSR_IE
  102. mts rmsr, r11
  103. .endm
  104. .macro set_ums
  105. mfs r11, rmsr
  106. ori r11, r11, MSR_VMS
  107. andni r11, r11, MSR_UMS
  108. mts rmsr, r11
  109. .endm
  110. .macro set_vms
  111. mfs r11, rmsr
  112. ori r11, r11, MSR_VMS
  113. andni r11, r11, MSR_UMS
  114. mts rmsr, r11
  115. .endm
  116. .macro clear_ums
  117. mfs r11, rmsr
  118. andni r11, r11, MSR_UMS
  119. mts rmsr,r11
  120. .endm
  121. .macro clear_vms_ums
  122. mfs r11, rmsr
  123. andni r11, r11, (MSR_VMS|MSR_UMS)
  124. mts rmsr,r11
  125. .endm
  126. #endif
  127. /* Define how to call high-level functions. With MMU, virtual mode must be
  128. * enabled when calling the high-level function. Clobbers R11.
  129. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  130. */
  131. /* turn on virtual protected mode save */
  132. #define VM_ON \
  133. set_ums; \
  134. rted r0, 2f; \
  135. nop; \
  136. 2:
  137. /* turn off virtual protected mode save and user mode save*/
  138. #define VM_OFF \
  139. clear_vms_ums; \
  140. rted r0, TOPHYS(1f); \
  141. nop; \
  142. 1:
  143. #define SAVE_REGS \
  144. swi r2, r1, PTO+PT_R2; /* Save SDA */ \
  145. swi r3, r1, PTO+PT_R3; \
  146. swi r4, r1, PTO+PT_R4; \
  147. swi r5, r1, PTO+PT_R5; \
  148. swi r6, r1, PTO+PT_R6; \
  149. swi r7, r1, PTO+PT_R7; \
  150. swi r8, r1, PTO+PT_R8; \
  151. swi r9, r1, PTO+PT_R9; \
  152. swi r10, r1, PTO+PT_R10; \
  153. swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
  154. swi r12, r1, PTO+PT_R12; \
  155. swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
  156. swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
  157. swi r15, r1, PTO+PT_R15; /* Save LP */ \
  158. swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
  159. swi r19, r1, PTO+PT_R19; \
  160. swi r20, r1, PTO+PT_R20; \
  161. swi r21, r1, PTO+PT_R21; \
  162. swi r22, r1, PTO+PT_R22; \
  163. swi r23, r1, PTO+PT_R23; \
  164. swi r24, r1, PTO+PT_R24; \
  165. swi r25, r1, PTO+PT_R25; \
  166. swi r26, r1, PTO+PT_R26; \
  167. swi r27, r1, PTO+PT_R27; \
  168. swi r28, r1, PTO+PT_R28; \
  169. swi r29, r1, PTO+PT_R29; \
  170. swi r30, r1, PTO+PT_R30; \
  171. swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
  172. mfs r11, rmsr; /* save MSR */ \
  173. swi r11, r1, PTO+PT_MSR;
  174. #define RESTORE_REGS \
  175. lwi r11, r1, PTO+PT_MSR; \
  176. mts rmsr , r11; \
  177. lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
  178. lwi r3, r1, PTO+PT_R3; \
  179. lwi r4, r1, PTO+PT_R4; \
  180. lwi r5, r1, PTO+PT_R5; \
  181. lwi r6, r1, PTO+PT_R6; \
  182. lwi r7, r1, PTO+PT_R7; \
  183. lwi r8, r1, PTO+PT_R8; \
  184. lwi r9, r1, PTO+PT_R9; \
  185. lwi r10, r1, PTO+PT_R10; \
  186. lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
  187. lwi r12, r1, PTO+PT_R12; \
  188. lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
  189. lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  190. lwi r15, r1, PTO+PT_R15; /* restore LP */ \
  191. lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
  192. lwi r19, r1, PTO+PT_R19; \
  193. lwi r20, r1, PTO+PT_R20; \
  194. lwi r21, r1, PTO+PT_R21; \
  195. lwi r22, r1, PTO+PT_R22; \
  196. lwi r23, r1, PTO+PT_R23; \
  197. lwi r24, r1, PTO+PT_R24; \
  198. lwi r25, r1, PTO+PT_R25; \
  199. lwi r26, r1, PTO+PT_R26; \
  200. lwi r27, r1, PTO+PT_R27; \
  201. lwi r28, r1, PTO+PT_R28; \
  202. lwi r29, r1, PTO+PT_R29; \
  203. lwi r30, r1, PTO+PT_R30; \
  204. lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
  205. #define SAVE_STATE \
  206. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  207. /* See if already in kernel mode.*/ \
  208. mfs r1, rmsr; \
  209. andi r1, r1, MSR_UMS; \
  210. bnei r1, 1f; \
  211. /* Kernel-mode state save. */ \
  212. /* Reload kernel stack-ptr. */ \
  213. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  214. /* FIXME: I can add these two lines to one */ \
  215. /* tophys(r1,r1); */ \
  216. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  217. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  218. SAVE_REGS \
  219. brid 2f; \
  220. swi r1, r1, PTO+PT_MODE; \
  221. 1: /* User-mode state save. */ \
  222. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  223. tophys(r1,r1); \
  224. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  225. /* MS these three instructions can be added to one */ \
  226. /* addik r1, r1, THREAD_SIZE; */ \
  227. /* tophys(r1,r1); */ \
  228. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  229. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  230. SAVE_REGS \
  231. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  232. swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
  233. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
  234. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  235. clear_ums; \
  236. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  237. .text
  238. /*
  239. * User trap.
  240. *
  241. * System calls are handled here.
  242. *
  243. * Syscall protocol:
  244. * Syscall number in r12, args in r5-r10
  245. * Return value in r3
  246. *
  247. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  248. * are masked. This is nice, means we don't have to CLI before state save
  249. */
  250. C_ENTRY(_user_exception):
  251. addi r14, r14, 4 /* return address is 4 byte after call */
  252. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  253. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  254. tophys(r1,r1);
  255. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  256. /* MS these three instructions can be added to one */
  257. /* addik r1, r1, THREAD_SIZE; */
  258. /* tophys(r1,r1); */
  259. /* addik r1, r1, -STATE_SAVE_SIZE; */
  260. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
  261. SAVE_REGS
  262. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  263. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  264. clear_ums;
  265. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  266. /* Save away the syscall number. */
  267. swi r12, r1, PTO+PT_R0;
  268. tovirt(r1,r1)
  269. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  270. /* Jump to the appropriate function for the system call number in r12
  271. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  272. * register should point to the location where
  273. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  274. /* Step into virtual mode */
  275. rtbd r0, 3f
  276. nop
  277. 3:
  278. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  279. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  280. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  281. beqi r11, 4f
  282. addik r3, r0, -ENOSYS
  283. swi r3, r1, PTO + PT_R3
  284. brlid r15, do_syscall_trace_enter
  285. addik r5, r1, PTO + PT_R0
  286. # do_syscall_trace_enter returns the new syscall nr.
  287. addk r12, r0, r3
  288. lwi r5, r1, PTO+PT_R5;
  289. lwi r6, r1, PTO+PT_R6;
  290. lwi r7, r1, PTO+PT_R7;
  291. lwi r8, r1, PTO+PT_R8;
  292. lwi r9, r1, PTO+PT_R9;
  293. lwi r10, r1, PTO+PT_R10;
  294. 4:
  295. /* Jump to the appropriate function for the system call number in r12
  296. * (r12 is not preserved), or return an error if r12 is not valid.
  297. * The LP register should point to the location where the called function
  298. * should return. [note that MAKE_SYS_CALL uses label 1] */
  299. /* See if the system call number is valid */
  300. addi r11, r12, -__NR_syscalls;
  301. bgei r11,5f;
  302. /* Figure out which function to use for this system call. */
  303. /* Note Microblaze barrel shift is optional, so don't rely on it */
  304. add r12, r12, r12; /* convert num -> ptr */
  305. add r12, r12, r12;
  306. #ifdef DEBUG
  307. /* Trac syscalls and stored them to r0_ram */
  308. lwi r3, r12, 0x400 + r0_ram
  309. addi r3, r3, 1
  310. swi r3, r12, 0x400 + r0_ram
  311. #endif
  312. # Find and jump into the syscall handler.
  313. lwi r12, r12, sys_call_table
  314. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  315. addi r15, r0, ret_from_trap-8
  316. bra r12
  317. /* The syscall number is invalid, return an error. */
  318. 5:
  319. rtsd r15, 8; /* looks like a normal subroutine return */
  320. addi r3, r0, -ENOSYS;
  321. /* Entry point used to return from a syscall/trap */
  322. /* We re-enable BIP bit before state restore */
  323. C_ENTRY(ret_from_trap):
  324. swi r3, r1, PTO + PT_R3
  325. swi r4, r1, PTO + PT_R4
  326. /* We're returning to user mode, so check for various conditions that
  327. * trigger rescheduling. */
  328. /* FIXME: Restructure all these flag checks. */
  329. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  330. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  331. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  332. beqi r11, 1f
  333. brlid r15, do_syscall_trace_leave
  334. addik r5, r1, PTO + PT_R0
  335. 1:
  336. /* We're returning to user mode, so check for various conditions that
  337. * trigger rescheduling. */
  338. /* get thread info from current task */
  339. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  340. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  341. andi r11, r11, _TIF_NEED_RESCHED;
  342. beqi r11, 5f;
  343. bralid r15, schedule; /* Call scheduler */
  344. nop; /* delay slot */
  345. /* Maybe handle a signal */
  346. 5: /* get thread info from current task*/
  347. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  348. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  349. andi r11, r11, _TIF_SIGPENDING;
  350. beqi r11, 1f; /* Signals to handle, handle them */
  351. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  352. addi r7, r0, 1; /* Arg 3: int in_syscall */
  353. bralid r15, do_signal; /* Handle any signals */
  354. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  355. /* Finally, return to user state. */
  356. 1: set_bip; /* Ints masked for state restore */
  357. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  358. VM_OFF;
  359. tophys(r1,r1);
  360. RESTORE_REGS;
  361. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  362. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  363. TRAP_return: /* Make global symbol for debugging */
  364. rtbd r14, 0; /* Instructions to return from an IRQ */
  365. nop;
  366. /* These syscalls need access to the struct pt_regs on the stack, so we
  367. implement them in assembly (they're basically all wrappers anyway). */
  368. C_ENTRY(sys_fork_wrapper):
  369. addi r5, r0, SIGCHLD /* Arg 0: flags */
  370. lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
  371. addik r7, r1, PTO /* Arg 2: parent context */
  372. add r8. r0, r0 /* Arg 3: (unused) */
  373. add r9, r0, r0; /* Arg 4: (unused) */
  374. brid do_fork /* Do real work (tail-call) */
  375. add r10, r0, r0; /* Arg 5: (unused) */
  376. /* This the initial entry point for a new child thread, with an appropriate
  377. stack in place that makes it look the the child is in the middle of an
  378. syscall. This function is actually `returned to' from switch_thread
  379. (copy_thread makes ret_from_fork the return address in each new thread's
  380. saved context). */
  381. C_ENTRY(ret_from_fork):
  382. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  383. add r3, r5, r0; /* switch_thread returns the prev task */
  384. /* ( in the delay slot ) */
  385. brid ret_from_trap; /* Do normal trap return */
  386. add r3, r0, r0; /* Child's fork call should return 0. */
  387. C_ENTRY(sys_vfork):
  388. brid microblaze_vfork /* Do real work (tail-call) */
  389. addik r5, r1, PTO
  390. C_ENTRY(sys_clone):
  391. bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
  392. lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
  393. 1: addik r7, r1, PTO; /* Arg 2: parent context */
  394. add r8, r0, r0; /* Arg 3: (unused) */
  395. add r9, r0, r0; /* Arg 4: (unused) */
  396. brid do_fork /* Do real work (tail-call) */
  397. add r10, r0, r0; /* Arg 5: (unused) */
  398. C_ENTRY(sys_execve):
  399. brid microblaze_execve; /* Do real work (tail-call).*/
  400. addik r8, r1, PTO; /* add user context as 4th arg */
  401. C_ENTRY(sys_rt_sigreturn_wrapper):
  402. swi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
  403. swi r4, r1, PTO+PT_R4;
  404. brlid r15, sys_rt_sigreturn /* Do real work */
  405. addik r5, r1, PTO; /* add user context as 1st arg */
  406. lwi r3, r1, PTO+PT_R3; /* restore saved r3, r4 registers */
  407. lwi r4, r1, PTO+PT_R4;
  408. bri ret_from_trap /* fall through will not work here due to align */
  409. nop;
  410. /*
  411. * HW EXCEPTION rutine start
  412. */
  413. C_ENTRY(full_exception_trap):
  414. /* adjust exception address for privileged instruction
  415. * for finding where is it */
  416. addik r17, r17, -4
  417. SAVE_STATE /* Save registers */
  418. /* PC, before IRQ/trap - this is one instruction above */
  419. swi r17, r1, PTO+PT_PC;
  420. tovirt(r1,r1)
  421. /* FIXME this can be store directly in PT_ESR reg.
  422. * I tested it but there is a fault */
  423. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  424. addik r15, r0, ret_from_exc - 8
  425. mfs r6, resr
  426. mfs r7, rfsr; /* save FSR */
  427. mts rfsr, r0; /* Clear sticky fsr */
  428. rted r0, full_exception
  429. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  430. /*
  431. * Unaligned data trap.
  432. *
  433. * Unaligned data trap last on 4k page is handled here.
  434. *
  435. * Trap entered via exception, so EE bit is set, and interrupts
  436. * are masked. This is nice, means we don't have to CLI before state save
  437. *
  438. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  439. */
  440. C_ENTRY(unaligned_data_trap):
  441. /* MS: I have to save r11 value and then restore it because
  442. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  443. * instructions are not used. We don't need to do if MSR instructions
  444. * are used and they use r0 instead of r11.
  445. * I am using ENTRY_SP which should be primary used only for stack
  446. * pointer saving. */
  447. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  448. set_bip; /* equalize initial state for all possible entries */
  449. clear_eip;
  450. set_ee;
  451. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  452. SAVE_STATE /* Save registers.*/
  453. /* PC, before IRQ/trap - this is one instruction above */
  454. swi r17, r1, PTO+PT_PC;
  455. tovirt(r1,r1)
  456. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  457. addik r15, r0, ret_from_exc-8
  458. mfs r3, resr /* ESR */
  459. mfs r4, rear /* EAR */
  460. rtbd r0, _unaligned_data_exception
  461. addik r7, r1, PTO /* parameter struct pt_regs * regs */
  462. /*
  463. * Page fault traps.
  464. *
  465. * If the real exception handler (from hw_exception_handler.S) didn't find
  466. * the mapping for the process, then we're thrown here to handle such situation.
  467. *
  468. * Trap entered via exceptions, so EE bit is set, and interrupts
  469. * are masked. This is nice, means we don't have to CLI before state save
  470. *
  471. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  472. * will bail out to this point if they can't resolve the lightweight TLB fault.
  473. *
  474. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  475. * void do_page_fault(struct pt_regs *regs,
  476. * unsigned long address,
  477. * unsigned long error_code)
  478. */
  479. /* data and intruction trap - which is choose is resolved int fault.c */
  480. C_ENTRY(page_fault_data_trap):
  481. SAVE_STATE /* Save registers.*/
  482. /* PC, before IRQ/trap - this is one instruction above */
  483. swi r17, r1, PTO+PT_PC;
  484. tovirt(r1,r1)
  485. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  486. addik r15, r0, ret_from_exc-8
  487. mfs r6, rear /* parameter unsigned long address */
  488. mfs r7, resr /* parameter unsigned long error_code */
  489. rted r0, do_page_fault
  490. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  491. C_ENTRY(page_fault_instr_trap):
  492. SAVE_STATE /* Save registers.*/
  493. /* PC, before IRQ/trap - this is one instruction above */
  494. swi r17, r1, PTO+PT_PC;
  495. tovirt(r1,r1)
  496. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  497. addik r15, r0, ret_from_exc-8
  498. mfs r6, rear /* parameter unsigned long address */
  499. ori r7, r0, 0 /* parameter unsigned long error_code */
  500. rted r0, do_page_fault
  501. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  502. /* Entry point used to return from an exception. */
  503. C_ENTRY(ret_from_exc):
  504. lwi r11, r1, PTO + PT_MODE;
  505. bnei r11, 2f; /* See if returning to kernel mode, */
  506. /* ... if so, skip resched &c. */
  507. /* We're returning to user mode, so check for various conditions that
  508. trigger rescheduling. */
  509. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  510. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  511. andi r11, r11, _TIF_NEED_RESCHED;
  512. beqi r11, 5f;
  513. /* Call the scheduler before returning from a syscall/trap. */
  514. bralid r15, schedule; /* Call scheduler */
  515. nop; /* delay slot */
  516. /* Maybe handle a signal */
  517. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  518. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  519. andi r11, r11, _TIF_SIGPENDING;
  520. beqi r11, 1f; /* Signals to handle, handle them */
  521. /*
  522. * Handle a signal return; Pending signals should be in r18.
  523. *
  524. * Not all registers are saved by the normal trap/interrupt entry
  525. * points (for instance, call-saved registers (because the normal
  526. * C-compiler calling sequence in the kernel makes sure they're
  527. * preserved), and call-clobbered registers in the case of
  528. * traps), but signal handlers may want to examine or change the
  529. * complete register state. Here we save anything not saved by
  530. * the normal entry sequence, so that it may be safely restored
  531. * (in a possibly modified form) after do_signal returns. */
  532. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  533. addi r7, r0, 0; /* Arg 3: int in_syscall */
  534. bralid r15, do_signal; /* Handle any signals */
  535. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  536. /* Finally, return to user state. */
  537. 1: set_bip; /* Ints masked for state restore */
  538. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  539. VM_OFF;
  540. tophys(r1,r1);
  541. RESTORE_REGS;
  542. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  543. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  544. bri 6f;
  545. /* Return to kernel state. */
  546. 2: set_bip; /* Ints masked for state restore */
  547. VM_OFF;
  548. tophys(r1,r1);
  549. RESTORE_REGS;
  550. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  551. tovirt(r1,r1);
  552. 6:
  553. EXC_return: /* Make global symbol for debugging */
  554. rtbd r14, 0; /* Instructions to return from an IRQ */
  555. nop;
  556. /*
  557. * HW EXCEPTION rutine end
  558. */
  559. /*
  560. * Hardware maskable interrupts.
  561. *
  562. * The stack-pointer (r1) should have already been saved to the memory
  563. * location PER_CPU(ENTRY_SP).
  564. */
  565. C_ENTRY(_interrupt):
  566. /* MS: we are in physical address */
  567. /* Save registers, switch to proper stack, convert SP to virtual.*/
  568. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  569. /* MS: See if already in kernel mode. */
  570. mfs r1, rmsr
  571. nop
  572. andi r1, r1, MSR_UMS
  573. bnei r1, 1f
  574. /* Kernel-mode state save. */
  575. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  576. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  577. /* save registers */
  578. /* MS: Make room on the stack -> activation record */
  579. addik r1, r1, -STATE_SAVE_SIZE;
  580. SAVE_REGS
  581. brid 2f;
  582. swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
  583. 1:
  584. /* User-mode state save. */
  585. /* MS: get the saved current */
  586. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  587. tophys(r1,r1);
  588. lwi r1, r1, TS_THREAD_INFO;
  589. addik r1, r1, THREAD_SIZE;
  590. tophys(r1,r1);
  591. /* save registers */
  592. addik r1, r1, -STATE_SAVE_SIZE;
  593. SAVE_REGS
  594. /* calculate mode */
  595. swi r0, r1, PTO + PT_MODE;
  596. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  597. swi r11, r1, PTO+PT_R1;
  598. clear_ums;
  599. 2:
  600. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  601. tovirt(r1,r1)
  602. addik r15, r0, irq_call;
  603. irq_call:rtbd r0, do_IRQ;
  604. addik r5, r1, PTO;
  605. /* MS: we are in virtual mode */
  606. ret_from_irq:
  607. lwi r11, r1, PTO + PT_MODE;
  608. bnei r11, 2f;
  609. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  610. lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
  611. andi r11, r11, _TIF_NEED_RESCHED;
  612. beqi r11, 5f
  613. bralid r15, schedule;
  614. nop; /* delay slot */
  615. /* Maybe handle a signal */
  616. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
  617. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  618. andi r11, r11, _TIF_SIGPENDING;
  619. beqid r11, no_intr_resched
  620. /* Handle a signal return; Pending signals should be in r18. */
  621. addi r7, r0, 0; /* Arg 3: int in_syscall */
  622. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  623. bralid r15, do_signal; /* Handle any signals */
  624. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  625. /* Finally, return to user state. */
  626. no_intr_resched:
  627. /* Disable interrupts, we are now committed to the state restore */
  628. disable_irq
  629. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  630. VM_OFF;
  631. tophys(r1,r1);
  632. RESTORE_REGS
  633. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  634. lwi r1, r1, PT_R1 - PT_SIZE;
  635. bri 6f;
  636. /* MS: Return to kernel state. */
  637. 2:
  638. #ifdef CONFIG_PREEMPT
  639. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  640. /* MS: get preempt_count from thread info */
  641. lwi r5, r11, TI_PREEMPT_COUNT;
  642. bgti r5, restore;
  643. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  644. andi r5, r5, _TIF_NEED_RESCHED;
  645. beqi r5, restore /* if zero jump over */
  646. preempt:
  647. /* interrupts are off that's why I am calling preempt_chedule_irq */
  648. bralid r15, preempt_schedule_irq
  649. nop
  650. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  651. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  652. andi r5, r5, _TIF_NEED_RESCHED;
  653. bnei r5, preempt /* if non zero jump to resched */
  654. restore:
  655. #endif
  656. VM_OFF /* MS: turn off MMU */
  657. tophys(r1,r1)
  658. RESTORE_REGS
  659. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  660. tovirt(r1,r1);
  661. 6:
  662. IRQ_return: /* MS: Make global symbol for debugging */
  663. rtid r14, 0
  664. nop
  665. /*
  666. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  667. * and call handling function with saved pt_regs
  668. */
  669. C_ENTRY(_debug_exception):
  670. /* BIP bit is set on entry, no interrupts can occur */
  671. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  672. mfs r1, rmsr
  673. nop
  674. andi r1, r1, MSR_UMS
  675. bnei r1, 1f
  676. /* MS: Kernel-mode state save - kgdb */
  677. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  678. /* BIP bit is set on entry, no interrupts can occur */
  679. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
  680. SAVE_REGS;
  681. /* save all regs to pt_reg structure */
  682. swi r0, r1, PTO+PT_R0; /* R0 must be saved too */
  683. swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */
  684. swi r16, r1, PTO+PT_R16
  685. swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */
  686. swi r17, r1, PTO+PT_R17
  687. /* save special purpose registers to pt_regs */
  688. mfs r11, rear;
  689. swi r11, r1, PTO+PT_EAR;
  690. mfs r11, resr;
  691. swi r11, r1, PTO+PT_ESR;
  692. mfs r11, rfsr;
  693. swi r11, r1, PTO+PT_FSR;
  694. /* stack pointer is in physical address at it is decrease
  695. * by STATE_SAVE_SIZE but we need to get correct R1 value */
  696. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE;
  697. swi r11, r1, PTO+PT_R1
  698. /* MS: r31 - current pointer isn't changed */
  699. tovirt(r1,r1)
  700. #ifdef CONFIG_KGDB
  701. addi r5, r1, PTO /* pass pt_reg address as the first arg */
  702. la r15, r0, dbtrap_call; /* return address */
  703. rtbd r0, microblaze_kgdb_break
  704. nop;
  705. #endif
  706. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  707. * It is very unlikely that another brki instruction is called. */
  708. bri 0
  709. /* MS: User-mode state save - gdb */
  710. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  711. tophys(r1,r1);
  712. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  713. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  714. tophys(r1,r1);
  715. addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
  716. SAVE_REGS;
  717. swi r17, r1, PTO+PT_R17;
  718. swi r16, r1, PTO+PT_R16;
  719. swi r16, r1, PTO+PT_PC; /* Save LP */
  720. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
  721. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  722. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  723. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  724. tovirt(r1,r1)
  725. set_vms;
  726. addik r5, r1, PTO;
  727. addik r15, r0, dbtrap_call;
  728. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  729. rtbd r0, sw_exception
  730. nop
  731. /* MS: The first instruction for the second part of the gdb/kgdb */
  732. set_bip; /* Ints masked for state restore */
  733. lwi r11, r1, PTO + PT_MODE;
  734. bnei r11, 2f;
  735. /* MS: Return to user space - gdb */
  736. /* Get current task ptr into r11 */
  737. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  738. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  739. andi r11, r11, _TIF_NEED_RESCHED;
  740. beqi r11, 5f;
  741. /* Call the scheduler before returning from a syscall/trap. */
  742. bralid r15, schedule; /* Call scheduler */
  743. nop; /* delay slot */
  744. /* Maybe handle a signal */
  745. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  746. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  747. andi r11, r11, _TIF_SIGPENDING;
  748. beqi r11, 1f; /* Signals to handle, handle them */
  749. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  750. addi r7, r0, 0; /* Arg 3: int in_syscall */
  751. bralid r15, do_signal; /* Handle any signals */
  752. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  753. /* Finally, return to user state. */
  754. 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  755. VM_OFF;
  756. tophys(r1,r1);
  757. /* MS: Restore all regs */
  758. RESTORE_REGS
  759. lwi r17, r1, PTO+PT_R17;
  760. lwi r16, r1, PTO+PT_R16;
  761. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */
  762. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  763. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  764. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  765. nop;
  766. /* MS: Return to kernel state - kgdb */
  767. 2: VM_OFF;
  768. tophys(r1,r1);
  769. /* MS: Restore all regs */
  770. RESTORE_REGS
  771. lwi r14, r1, PTO+PT_R14;
  772. lwi r16, r1, PTO+PT_PC;
  773. lwi r17, r1, PTO+PT_R17;
  774. addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
  775. tovirt(r1,r1);
  776. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  777. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  778. nop;
  779. ENTRY(_switch_to)
  780. /* prepare return value */
  781. addk r3, r0, CURRENT_TASK
  782. /* save registers in cpu_context */
  783. /* use r11 and r12, volatile registers, as temp register */
  784. /* give start of cpu_context for previous process */
  785. addik r11, r5, TI_CPU_CONTEXT
  786. swi r1, r11, CC_R1
  787. swi r2, r11, CC_R2
  788. /* skip volatile registers.
  789. * they are saved on stack when we jumped to _switch_to() */
  790. /* dedicated registers */
  791. swi r13, r11, CC_R13
  792. swi r14, r11, CC_R14
  793. swi r15, r11, CC_R15
  794. swi r16, r11, CC_R16
  795. swi r17, r11, CC_R17
  796. swi r18, r11, CC_R18
  797. /* save non-volatile registers */
  798. swi r19, r11, CC_R19
  799. swi r20, r11, CC_R20
  800. swi r21, r11, CC_R21
  801. swi r22, r11, CC_R22
  802. swi r23, r11, CC_R23
  803. swi r24, r11, CC_R24
  804. swi r25, r11, CC_R25
  805. swi r26, r11, CC_R26
  806. swi r27, r11, CC_R27
  807. swi r28, r11, CC_R28
  808. swi r29, r11, CC_R29
  809. swi r30, r11, CC_R30
  810. /* special purpose registers */
  811. mfs r12, rmsr
  812. swi r12, r11, CC_MSR
  813. mfs r12, rear
  814. swi r12, r11, CC_EAR
  815. mfs r12, resr
  816. swi r12, r11, CC_ESR
  817. mfs r12, rfsr
  818. swi r12, r11, CC_FSR
  819. /* update r31, the current-give me pointer to task which will be next */
  820. lwi CURRENT_TASK, r6, TI_TASK
  821. /* stored it to current_save too */
  822. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  823. /* get new process' cpu context and restore */
  824. /* give me start where start context of next task */
  825. addik r11, r6, TI_CPU_CONTEXT
  826. /* non-volatile registers */
  827. lwi r30, r11, CC_R30
  828. lwi r29, r11, CC_R29
  829. lwi r28, r11, CC_R28
  830. lwi r27, r11, CC_R27
  831. lwi r26, r11, CC_R26
  832. lwi r25, r11, CC_R25
  833. lwi r24, r11, CC_R24
  834. lwi r23, r11, CC_R23
  835. lwi r22, r11, CC_R22
  836. lwi r21, r11, CC_R21
  837. lwi r20, r11, CC_R20
  838. lwi r19, r11, CC_R19
  839. /* dedicated registers */
  840. lwi r18, r11, CC_R18
  841. lwi r17, r11, CC_R17
  842. lwi r16, r11, CC_R16
  843. lwi r15, r11, CC_R15
  844. lwi r14, r11, CC_R14
  845. lwi r13, r11, CC_R13
  846. /* skip volatile registers */
  847. lwi r2, r11, CC_R2
  848. lwi r1, r11, CC_R1
  849. /* special purpose registers */
  850. lwi r12, r11, CC_FSR
  851. mts rfsr, r12
  852. lwi r12, r11, CC_MSR
  853. mts rmsr, r12
  854. rtsd r15, 8
  855. nop
  856. ENTRY(_reset)
  857. brai 0x70; /* Jump back to FS-boot */
  858. /* These are compiled and loaded into high memory, then
  859. * copied into place in mach_early_setup */
  860. .section .init.ivt, "ax"
  861. .org 0x0
  862. /* this is very important - here is the reset vector */
  863. /* in current MMU branch you don't care what is here - it is
  864. * used from bootloader site - but this is correct for FS-BOOT */
  865. brai 0x70
  866. nop
  867. brai TOPHYS(_user_exception); /* syscall handler */
  868. brai TOPHYS(_interrupt); /* Interrupt handler */
  869. brai TOPHYS(_debug_exception); /* debug trap handler */
  870. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  871. .section .rodata,"a"
  872. #include "syscall_table.S"
  873. syscall_table_size=(.-sys_call_table)
  874. type_SYSCALL:
  875. .ascii "SYSCALL\0"
  876. type_IRQ:
  877. .ascii "IRQ\0"
  878. type_IRQ_PREEMPT:
  879. .ascii "IRQ (PREEMPTED)\0"
  880. type_SYSCALL_PREEMPT:
  881. .ascii " SYSCALL (PREEMPTED)\0"
  882. /*
  883. * Trap decoding for stack unwinder
  884. * Tuples are (start addr, end addr, string)
  885. * If return address lies on [start addr, end addr],
  886. * unwinder displays 'string'
  887. */
  888. .align 4
  889. .global microblaze_trap_handlers
  890. microblaze_trap_handlers:
  891. /* Exact matches come first */
  892. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  893. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  894. /* Fuzzy matches go here */
  895. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  896. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  897. /* End of table */
  898. .word 0 ; .word 0 ; .word 0