entry.S 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. /* The size of a state save frame. */
  31. #define STATE_SAVE_SIZE (PT_SIZE + STATE_SAVE_ARG_SPACE)
  32. /* The offset of the struct pt_regs in a `state save frame' on the stack. */
  33. #define PTO STATE_SAVE_ARG_SPACE /* 24 the space for args */
  34. #define C_ENTRY(name) .globl name; .align 4; name
  35. /*
  36. * Various ways of setting and clearing BIP in flags reg.
  37. * This is mucky, but necessary using microblaze version that
  38. * allows msr ops to write to BIP
  39. */
  40. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  41. .macro clear_bip
  42. msrclr r0, MSR_BIP
  43. .endm
  44. .macro set_bip
  45. msrset r0, MSR_BIP
  46. .endm
  47. .macro clear_eip
  48. msrclr r0, MSR_EIP
  49. .endm
  50. .macro set_ee
  51. msrset r0, MSR_EE
  52. .endm
  53. .macro disable_irq
  54. msrclr r0, MSR_IE
  55. .endm
  56. .macro enable_irq
  57. msrset r0, MSR_IE
  58. .endm
  59. .macro set_ums
  60. msrset r0, MSR_UMS
  61. msrclr r0, MSR_VMS
  62. .endm
  63. .macro set_vms
  64. msrclr r0, MSR_UMS
  65. msrset r0, MSR_VMS
  66. .endm
  67. .macro clear_ums
  68. msrclr r0, MSR_UMS
  69. .endm
  70. .macro clear_vms_ums
  71. msrclr r0, MSR_VMS | MSR_UMS
  72. .endm
  73. #else
  74. .macro clear_bip
  75. mfs r11, rmsr
  76. andi r11, r11, ~MSR_BIP
  77. mts rmsr, r11
  78. .endm
  79. .macro set_bip
  80. mfs r11, rmsr
  81. ori r11, r11, MSR_BIP
  82. mts rmsr, r11
  83. .endm
  84. .macro clear_eip
  85. mfs r11, rmsr
  86. andi r11, r11, ~MSR_EIP
  87. mts rmsr, r11
  88. .endm
  89. .macro set_ee
  90. mfs r11, rmsr
  91. ori r11, r11, MSR_EE
  92. mts rmsr, r11
  93. .endm
  94. .macro disable_irq
  95. mfs r11, rmsr
  96. andi r11, r11, ~MSR_IE
  97. mts rmsr, r11
  98. .endm
  99. .macro enable_irq
  100. mfs r11, rmsr
  101. ori r11, r11, MSR_IE
  102. mts rmsr, r11
  103. .endm
  104. .macro set_ums
  105. mfs r11, rmsr
  106. ori r11, r11, MSR_VMS
  107. andni r11, r11, MSR_UMS
  108. mts rmsr, r11
  109. .endm
  110. .macro set_vms
  111. mfs r11, rmsr
  112. ori r11, r11, MSR_VMS
  113. andni r11, r11, MSR_UMS
  114. mts rmsr, r11
  115. .endm
  116. .macro clear_ums
  117. mfs r11, rmsr
  118. andni r11, r11, MSR_UMS
  119. mts rmsr,r11
  120. .endm
  121. .macro clear_vms_ums
  122. mfs r11, rmsr
  123. andni r11, r11, (MSR_VMS|MSR_UMS)
  124. mts rmsr,r11
  125. .endm
  126. #endif
  127. /* Define how to call high-level functions. With MMU, virtual mode must be
  128. * enabled when calling the high-level function. Clobbers R11.
  129. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  130. */
  131. /* turn on virtual protected mode save */
  132. #define VM_ON \
  133. set_ums; \
  134. rted r0, 2f; \
  135. nop; \
  136. 2:
  137. /* turn off virtual protected mode save and user mode save*/
  138. #define VM_OFF \
  139. clear_vms_ums; \
  140. rted r0, TOPHYS(1f); \
  141. nop; \
  142. 1:
  143. #define SAVE_REGS \
  144. swi r2, r1, PTO+PT_R2; /* Save SDA */ \
  145. swi r3, r1, PTO+PT_R3; \
  146. swi r4, r1, PTO+PT_R4; \
  147. swi r5, r1, PTO+PT_R5; \
  148. swi r6, r1, PTO+PT_R6; \
  149. swi r7, r1, PTO+PT_R7; \
  150. swi r8, r1, PTO+PT_R8; \
  151. swi r9, r1, PTO+PT_R9; \
  152. swi r10, r1, PTO+PT_R10; \
  153. swi r11, r1, PTO+PT_R11; /* save clobbered regs after rval */\
  154. swi r12, r1, PTO+PT_R12; \
  155. swi r13, r1, PTO+PT_R13; /* Save SDA2 */ \
  156. swi r14, r1, PTO+PT_PC; /* PC, before IRQ/trap */ \
  157. swi r15, r1, PTO+PT_R15; /* Save LP */ \
  158. swi r16, r1, PTO+PT_R16; \
  159. swi r17, r1, PTO+PT_R17; \
  160. swi r18, r1, PTO+PT_R18; /* Save asm scratch reg */ \
  161. swi r19, r1, PTO+PT_R19; \
  162. swi r20, r1, PTO+PT_R20; \
  163. swi r21, r1, PTO+PT_R21; \
  164. swi r22, r1, PTO+PT_R22; \
  165. swi r23, r1, PTO+PT_R23; \
  166. swi r24, r1, PTO+PT_R24; \
  167. swi r25, r1, PTO+PT_R25; \
  168. swi r26, r1, PTO+PT_R26; \
  169. swi r27, r1, PTO+PT_R27; \
  170. swi r28, r1, PTO+PT_R28; \
  171. swi r29, r1, PTO+PT_R29; \
  172. swi r30, r1, PTO+PT_R30; \
  173. swi r31, r1, PTO+PT_R31; /* Save current task reg */ \
  174. mfs r11, rmsr; /* save MSR */ \
  175. swi r11, r1, PTO+PT_MSR;
  176. #define RESTORE_REGS \
  177. lwi r11, r1, PTO+PT_MSR; \
  178. mts rmsr , r11; \
  179. lwi r2, r1, PTO+PT_R2; /* restore SDA */ \
  180. lwi r3, r1, PTO+PT_R3; \
  181. lwi r4, r1, PTO+PT_R4; \
  182. lwi r5, r1, PTO+PT_R5; \
  183. lwi r6, r1, PTO+PT_R6; \
  184. lwi r7, r1, PTO+PT_R7; \
  185. lwi r8, r1, PTO+PT_R8; \
  186. lwi r9, r1, PTO+PT_R9; \
  187. lwi r10, r1, PTO+PT_R10; \
  188. lwi r11, r1, PTO+PT_R11; /* restore clobbered regs after rval */\
  189. lwi r12, r1, PTO+PT_R12; \
  190. lwi r13, r1, PTO+PT_R13; /* restore SDA2 */ \
  191. lwi r14, r1, PTO+PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  192. lwi r15, r1, PTO+PT_R15; /* restore LP */ \
  193. lwi r16, r1, PTO+PT_R16; \
  194. lwi r17, r1, PTO+PT_R17; \
  195. lwi r18, r1, PTO+PT_R18; /* restore asm scratch reg */ \
  196. lwi r19, r1, PTO+PT_R19; \
  197. lwi r20, r1, PTO+PT_R20; \
  198. lwi r21, r1, PTO+PT_R21; \
  199. lwi r22, r1, PTO+PT_R22; \
  200. lwi r23, r1, PTO+PT_R23; \
  201. lwi r24, r1, PTO+PT_R24; \
  202. lwi r25, r1, PTO+PT_R25; \
  203. lwi r26, r1, PTO+PT_R26; \
  204. lwi r27, r1, PTO+PT_R27; \
  205. lwi r28, r1, PTO+PT_R28; \
  206. lwi r29, r1, PTO+PT_R29; \
  207. lwi r30, r1, PTO+PT_R30; \
  208. lwi r31, r1, PTO+PT_R31; /* Restore cur task reg */
  209. #define SAVE_STATE \
  210. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  211. /* See if already in kernel mode.*/ \
  212. mfs r1, rmsr; \
  213. andi r1, r1, MSR_UMS; \
  214. bnei r1, 1f; \
  215. /* Kernel-mode state save. */ \
  216. /* Reload kernel stack-ptr. */ \
  217. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  218. /* FIXME: I can add these two lines to one */ \
  219. /* tophys(r1,r1); */ \
  220. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  221. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  222. SAVE_REGS \
  223. brid 2f; \
  224. swi r1, r1, PTO+PT_MODE; \
  225. 1: /* User-mode state save. */ \
  226. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  227. tophys(r1,r1); \
  228. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  229. /* MS these three instructions can be added to one */ \
  230. /* addik r1, r1, THREAD_SIZE; */ \
  231. /* tophys(r1,r1); */ \
  232. /* addik r1, r1, -STATE_SAVE_SIZE; */ \
  233. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE; \
  234. SAVE_REGS \
  235. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  236. swi r11, r1, PTO+PT_R1; /* Store user SP. */ \
  237. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */ \
  238. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  239. clear_ums; \
  240. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  241. .text
  242. /*
  243. * User trap.
  244. *
  245. * System calls are handled here.
  246. *
  247. * Syscall protocol:
  248. * Syscall number in r12, args in r5-r10
  249. * Return value in r3
  250. *
  251. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  252. * are masked. This is nice, means we don't have to CLI before state save
  253. */
  254. C_ENTRY(_user_exception):
  255. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  256. addi r14, r14, 4 /* return address is 4 byte after call */
  257. mfs r1, rmsr
  258. nop
  259. andi r1, r1, MSR_UMS
  260. bnei r1, 1f
  261. /* Kernel-mode state save - kernel execve */
  262. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  263. tophys(r1,r1);
  264. addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
  265. SAVE_REGS
  266. swi r1, r1, PTO + PT_MODE; /* pt_regs -> kernel mode */
  267. brid 2f;
  268. nop; /* Fill delay slot */
  269. /* User-mode state save. */
  270. 1:
  271. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  272. tophys(r1,r1);
  273. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  274. /* calculate kernel stack pointer from task struct 8k */
  275. addik r1, r1, THREAD_SIZE;
  276. tophys(r1,r1);
  277. addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
  278. SAVE_REGS
  279. swi r0, r1, PTO + PT_R3
  280. swi r0, r1, PTO + PT_R4
  281. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
  282. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  283. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  284. clear_ums;
  285. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  286. /* Save away the syscall number. */
  287. swi r12, r1, PTO+PT_R0;
  288. tovirt(r1,r1)
  289. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  290. /* Jump to the appropriate function for the system call number in r12
  291. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  292. * register should point to the location where
  293. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  294. /* Step into virtual mode */
  295. rtbd r0, 3f
  296. nop
  297. 3:
  298. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  299. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  300. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  301. beqi r11, 4f
  302. addik r3, r0, -ENOSYS
  303. swi r3, r1, PTO + PT_R3
  304. brlid r15, do_syscall_trace_enter
  305. addik r5, r1, PTO + PT_R0
  306. # do_syscall_trace_enter returns the new syscall nr.
  307. addk r12, r0, r3
  308. lwi r5, r1, PTO+PT_R5;
  309. lwi r6, r1, PTO+PT_R6;
  310. lwi r7, r1, PTO+PT_R7;
  311. lwi r8, r1, PTO+PT_R8;
  312. lwi r9, r1, PTO+PT_R9;
  313. lwi r10, r1, PTO+PT_R10;
  314. 4:
  315. /* Jump to the appropriate function for the system call number in r12
  316. * (r12 is not preserved), or return an error if r12 is not valid.
  317. * The LP register should point to the location where the called function
  318. * should return. [note that MAKE_SYS_CALL uses label 1] */
  319. /* See if the system call number is valid */
  320. addi r11, r12, -__NR_syscalls;
  321. bgei r11,5f;
  322. /* Figure out which function to use for this system call. */
  323. /* Note Microblaze barrel shift is optional, so don't rely on it */
  324. add r12, r12, r12; /* convert num -> ptr */
  325. add r12, r12, r12;
  326. #ifdef DEBUG
  327. /* Trac syscalls and stored them to r0_ram */
  328. lwi r3, r12, 0x400 + r0_ram
  329. addi r3, r3, 1
  330. swi r3, r12, 0x400 + r0_ram
  331. #endif
  332. # Find and jump into the syscall handler.
  333. lwi r12, r12, sys_call_table
  334. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  335. addi r15, r0, ret_from_trap-8
  336. bra r12
  337. /* The syscall number is invalid, return an error. */
  338. 5:
  339. rtsd r15, 8; /* looks like a normal subroutine return */
  340. addi r3, r0, -ENOSYS;
  341. /* Entry point used to return from a syscall/trap */
  342. /* We re-enable BIP bit before state restore */
  343. C_ENTRY(ret_from_trap):
  344. swi r3, r1, PTO + PT_R3
  345. swi r4, r1, PTO + PT_R4
  346. lwi r11, r1, PTO + PT_MODE;
  347. /* See if returning to kernel mode, if so, skip resched &c. */
  348. bnei r11, 2f;
  349. /* We're returning to user mode, so check for various conditions that
  350. * trigger rescheduling. */
  351. /* FIXME: Restructure all these flag checks. */
  352. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  353. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  354. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  355. beqi r11, 1f
  356. brlid r15, do_syscall_trace_leave
  357. addik r5, r1, PTO + PT_R0
  358. 1:
  359. /* We're returning to user mode, so check for various conditions that
  360. * trigger rescheduling. */
  361. /* get thread info from current task */
  362. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  363. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  364. andi r11, r11, _TIF_NEED_RESCHED;
  365. beqi r11, 5f;
  366. bralid r15, schedule; /* Call scheduler */
  367. nop; /* delay slot */
  368. /* Maybe handle a signal */
  369. 5: /* get thread info from current task*/
  370. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  371. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  372. andi r11, r11, _TIF_SIGPENDING;
  373. beqi r11, 1f; /* Signals to handle, handle them */
  374. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  375. addi r7, r0, 1; /* Arg 3: int in_syscall */
  376. bralid r15, do_signal; /* Handle any signals */
  377. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  378. /* Finally, return to user state. */
  379. 1: set_bip; /* Ints masked for state restore */
  380. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  381. VM_OFF;
  382. tophys(r1,r1);
  383. RESTORE_REGS;
  384. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  385. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  386. bri 6f;
  387. /* Return to kernel state. */
  388. 2: set_bip; /* Ints masked for state restore */
  389. VM_OFF;
  390. tophys(r1,r1);
  391. RESTORE_REGS;
  392. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  393. tovirt(r1,r1);
  394. 6:
  395. TRAP_return: /* Make global symbol for debugging */
  396. rtbd r14, 0; /* Instructions to return from an IRQ */
  397. nop;
  398. /* These syscalls need access to the struct pt_regs on the stack, so we
  399. implement them in assembly (they're basically all wrappers anyway). */
  400. C_ENTRY(sys_fork_wrapper):
  401. addi r5, r0, SIGCHLD /* Arg 0: flags */
  402. lwi r6, r1, PTO+PT_R1 /* Arg 1: child SP (use parent's) */
  403. addik r7, r1, PTO /* Arg 2: parent context */
  404. add r8. r0, r0 /* Arg 3: (unused) */
  405. add r9, r0, r0; /* Arg 4: (unused) */
  406. brid do_fork /* Do real work (tail-call) */
  407. add r10, r0, r0; /* Arg 5: (unused) */
  408. /* This the initial entry point for a new child thread, with an appropriate
  409. stack in place that makes it look the the child is in the middle of an
  410. syscall. This function is actually `returned to' from switch_thread
  411. (copy_thread makes ret_from_fork the return address in each new thread's
  412. saved context). */
  413. C_ENTRY(ret_from_fork):
  414. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  415. add r3, r5, r0; /* switch_thread returns the prev task */
  416. /* ( in the delay slot ) */
  417. brid ret_from_trap; /* Do normal trap return */
  418. add r3, r0, r0; /* Child's fork call should return 0. */
  419. C_ENTRY(sys_vfork):
  420. brid microblaze_vfork /* Do real work (tail-call) */
  421. addik r5, r1, PTO
  422. C_ENTRY(sys_clone):
  423. bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
  424. lwi r6, r1, PTO + PT_R1; /* If so, use paret's stack ptr */
  425. 1: addik r7, r1, PTO; /* Arg 2: parent context */
  426. add r8, r0, r0; /* Arg 3: (unused) */
  427. add r9, r0, r0; /* Arg 4: (unused) */
  428. brid do_fork /* Do real work (tail-call) */
  429. add r10, r0, r0; /* Arg 5: (unused) */
  430. C_ENTRY(sys_execve):
  431. brid microblaze_execve; /* Do real work (tail-call).*/
  432. addik r8, r1, PTO; /* add user context as 4th arg */
  433. C_ENTRY(sys_rt_sigreturn_wrapper):
  434. brid sys_rt_sigreturn /* Do real work */
  435. addik r5, r1, PTO; /* add user context as 1st arg */
  436. /*
  437. * HW EXCEPTION rutine start
  438. */
  439. C_ENTRY(full_exception_trap):
  440. /* adjust exception address for privileged instruction
  441. * for finding where is it */
  442. addik r17, r17, -4
  443. SAVE_STATE /* Save registers */
  444. /* PC, before IRQ/trap - this is one instruction above */
  445. swi r17, r1, PTO+PT_PC;
  446. tovirt(r1,r1)
  447. /* FIXME this can be store directly in PT_ESR reg.
  448. * I tested it but there is a fault */
  449. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  450. addik r15, r0, ret_from_exc - 8
  451. mfs r6, resr
  452. mfs r7, rfsr; /* save FSR */
  453. mts rfsr, r0; /* Clear sticky fsr */
  454. rted r0, full_exception
  455. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  456. /*
  457. * Unaligned data trap.
  458. *
  459. * Unaligned data trap last on 4k page is handled here.
  460. *
  461. * Trap entered via exception, so EE bit is set, and interrupts
  462. * are masked. This is nice, means we don't have to CLI before state save
  463. *
  464. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  465. */
  466. C_ENTRY(unaligned_data_trap):
  467. /* MS: I have to save r11 value and then restore it because
  468. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  469. * instructions are not used. We don't need to do if MSR instructions
  470. * are used and they use r0 instead of r11.
  471. * I am using ENTRY_SP which should be primary used only for stack
  472. * pointer saving. */
  473. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  474. set_bip; /* equalize initial state for all possible entries */
  475. clear_eip;
  476. set_ee;
  477. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  478. SAVE_STATE /* Save registers.*/
  479. /* PC, before IRQ/trap - this is one instruction above */
  480. swi r17, r1, PTO+PT_PC;
  481. tovirt(r1,r1)
  482. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  483. addik r15, r0, ret_from_exc-8
  484. mfs r3, resr /* ESR */
  485. mfs r4, rear /* EAR */
  486. rtbd r0, _unaligned_data_exception
  487. addik r7, r1, PTO /* parameter struct pt_regs * regs */
  488. /*
  489. * Page fault traps.
  490. *
  491. * If the real exception handler (from hw_exception_handler.S) didn't find
  492. * the mapping for the process, then we're thrown here to handle such situation.
  493. *
  494. * Trap entered via exceptions, so EE bit is set, and interrupts
  495. * are masked. This is nice, means we don't have to CLI before state save
  496. *
  497. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  498. * will bail out to this point if they can't resolve the lightweight TLB fault.
  499. *
  500. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  501. * void do_page_fault(struct pt_regs *regs,
  502. * unsigned long address,
  503. * unsigned long error_code)
  504. */
  505. /* data and intruction trap - which is choose is resolved int fault.c */
  506. C_ENTRY(page_fault_data_trap):
  507. SAVE_STATE /* Save registers.*/
  508. /* PC, before IRQ/trap - this is one instruction above */
  509. swi r17, r1, PTO+PT_PC;
  510. tovirt(r1,r1)
  511. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  512. addik r15, r0, ret_from_exc-8
  513. mfs r6, rear /* parameter unsigned long address */
  514. mfs r7, resr /* parameter unsigned long error_code */
  515. rted r0, do_page_fault
  516. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  517. C_ENTRY(page_fault_instr_trap):
  518. SAVE_STATE /* Save registers.*/
  519. /* PC, before IRQ/trap - this is one instruction above */
  520. swi r17, r1, PTO+PT_PC;
  521. tovirt(r1,r1)
  522. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  523. addik r15, r0, ret_from_exc-8
  524. mfs r6, rear /* parameter unsigned long address */
  525. ori r7, r0, 0 /* parameter unsigned long error_code */
  526. rted r0, do_page_fault
  527. addik r5, r1, PTO /* parameter struct pt_regs * regs */
  528. /* Entry point used to return from an exception. */
  529. C_ENTRY(ret_from_exc):
  530. lwi r11, r1, PTO + PT_MODE;
  531. bnei r11, 2f; /* See if returning to kernel mode, */
  532. /* ... if so, skip resched &c. */
  533. /* We're returning to user mode, so check for various conditions that
  534. trigger rescheduling. */
  535. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  536. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  537. andi r11, r11, _TIF_NEED_RESCHED;
  538. beqi r11, 5f;
  539. /* Call the scheduler before returning from a syscall/trap. */
  540. bralid r15, schedule; /* Call scheduler */
  541. nop; /* delay slot */
  542. /* Maybe handle a signal */
  543. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  544. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  545. andi r11, r11, _TIF_SIGPENDING;
  546. beqi r11, 1f; /* Signals to handle, handle them */
  547. /*
  548. * Handle a signal return; Pending signals should be in r18.
  549. *
  550. * Not all registers are saved by the normal trap/interrupt entry
  551. * points (for instance, call-saved registers (because the normal
  552. * C-compiler calling sequence in the kernel makes sure they're
  553. * preserved), and call-clobbered registers in the case of
  554. * traps), but signal handlers may want to examine or change the
  555. * complete register state. Here we save anything not saved by
  556. * the normal entry sequence, so that it may be safely restored
  557. * (in a possibly modified form) after do_signal returns. */
  558. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  559. addi r7, r0, 0; /* Arg 3: int in_syscall */
  560. bralid r15, do_signal; /* Handle any signals */
  561. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  562. /* Finally, return to user state. */
  563. 1: set_bip; /* Ints masked for state restore */
  564. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  565. VM_OFF;
  566. tophys(r1,r1);
  567. RESTORE_REGS;
  568. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  569. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  570. bri 6f;
  571. /* Return to kernel state. */
  572. 2: set_bip; /* Ints masked for state restore */
  573. VM_OFF;
  574. tophys(r1,r1);
  575. RESTORE_REGS;
  576. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space. */
  577. tovirt(r1,r1);
  578. 6:
  579. EXC_return: /* Make global symbol for debugging */
  580. rtbd r14, 0; /* Instructions to return from an IRQ */
  581. nop;
  582. /*
  583. * HW EXCEPTION rutine end
  584. */
  585. /*
  586. * Hardware maskable interrupts.
  587. *
  588. * The stack-pointer (r1) should have already been saved to the memory
  589. * location PER_CPU(ENTRY_SP).
  590. */
  591. C_ENTRY(_interrupt):
  592. /* MS: we are in physical address */
  593. /* Save registers, switch to proper stack, convert SP to virtual.*/
  594. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  595. /* MS: See if already in kernel mode. */
  596. mfs r1, rmsr
  597. nop
  598. andi r1, r1, MSR_UMS
  599. bnei r1, 1f
  600. /* Kernel-mode state save. */
  601. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  602. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  603. /* save registers */
  604. /* MS: Make room on the stack -> activation record */
  605. addik r1, r1, -STATE_SAVE_SIZE;
  606. SAVE_REGS
  607. brid 2f;
  608. swi r1, r1, PTO + PT_MODE; /* 0 - user mode, 1 - kernel mode */
  609. 1:
  610. /* User-mode state save. */
  611. /* MS: get the saved current */
  612. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  613. tophys(r1,r1);
  614. lwi r1, r1, TS_THREAD_INFO;
  615. addik r1, r1, THREAD_SIZE;
  616. tophys(r1,r1);
  617. /* save registers */
  618. addik r1, r1, -STATE_SAVE_SIZE;
  619. SAVE_REGS
  620. /* calculate mode */
  621. swi r0, r1, PTO + PT_MODE;
  622. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  623. swi r11, r1, PTO+PT_R1;
  624. clear_ums;
  625. 2:
  626. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  627. tovirt(r1,r1)
  628. addik r15, r0, irq_call;
  629. irq_call:rtbd r0, do_IRQ;
  630. addik r5, r1, PTO;
  631. /* MS: we are in virtual mode */
  632. ret_from_irq:
  633. lwi r11, r1, PTO + PT_MODE;
  634. bnei r11, 2f;
  635. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  636. lwi r11, r11, TI_FLAGS; /* MS: get flags from thread info */
  637. andi r11, r11, _TIF_NEED_RESCHED;
  638. beqi r11, 5f
  639. bralid r15, schedule;
  640. nop; /* delay slot */
  641. /* Maybe handle a signal */
  642. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* MS: get thread info */
  643. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  644. andi r11, r11, _TIF_SIGPENDING;
  645. beqid r11, no_intr_resched
  646. /* Handle a signal return; Pending signals should be in r18. */
  647. addi r7, r0, 0; /* Arg 3: int in_syscall */
  648. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  649. bralid r15, do_signal; /* Handle any signals */
  650. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  651. /* Finally, return to user state. */
  652. no_intr_resched:
  653. /* Disable interrupts, we are now committed to the state restore */
  654. disable_irq
  655. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  656. VM_OFF;
  657. tophys(r1,r1);
  658. RESTORE_REGS
  659. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  660. lwi r1, r1, PT_R1 - PT_SIZE;
  661. bri 6f;
  662. /* MS: Return to kernel state. */
  663. 2:
  664. #ifdef CONFIG_PREEMPT
  665. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  666. /* MS: get preempt_count from thread info */
  667. lwi r5, r11, TI_PREEMPT_COUNT;
  668. bgti r5, restore;
  669. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  670. andi r5, r5, _TIF_NEED_RESCHED;
  671. beqi r5, restore /* if zero jump over */
  672. preempt:
  673. /* interrupts are off that's why I am calling preempt_chedule_irq */
  674. bralid r15, preempt_schedule_irq
  675. nop
  676. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  677. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  678. andi r5, r5, _TIF_NEED_RESCHED;
  679. bnei r5, preempt /* if non zero jump to resched */
  680. restore:
  681. #endif
  682. VM_OFF /* MS: turn off MMU */
  683. tophys(r1,r1)
  684. RESTORE_REGS
  685. addik r1, r1, STATE_SAVE_SIZE /* MS: Clean up stack space. */
  686. tovirt(r1,r1);
  687. 6:
  688. IRQ_return: /* MS: Make global symbol for debugging */
  689. rtid r14, 0
  690. nop
  691. /*
  692. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  693. * and call handling function with saved pt_regs
  694. */
  695. C_ENTRY(_debug_exception):
  696. /* BIP bit is set on entry, no interrupts can occur */
  697. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  698. mfs r1, rmsr
  699. nop
  700. andi r1, r1, MSR_UMS
  701. bnei r1, 1f
  702. /* MS: Kernel-mode state save - kgdb */
  703. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  704. /* BIP bit is set on entry, no interrupts can occur */
  705. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - STATE_SAVE_SIZE;
  706. SAVE_REGS;
  707. /* save all regs to pt_reg structure */
  708. swi r0, r1, PTO+PT_R0; /* R0 must be saved too */
  709. swi r14, r1, PTO+PT_R14 /* rewrite saved R14 value */
  710. swi r16, r1, PTO+PT_PC; /* PC and r16 are the same */
  711. /* save special purpose registers to pt_regs */
  712. mfs r11, rear;
  713. swi r11, r1, PTO+PT_EAR;
  714. mfs r11, resr;
  715. swi r11, r1, PTO+PT_ESR;
  716. mfs r11, rfsr;
  717. swi r11, r1, PTO+PT_FSR;
  718. /* stack pointer is in physical address at it is decrease
  719. * by STATE_SAVE_SIZE but we need to get correct R1 value */
  720. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + STATE_SAVE_SIZE;
  721. swi r11, r1, PTO+PT_R1
  722. /* MS: r31 - current pointer isn't changed */
  723. tovirt(r1,r1)
  724. #ifdef CONFIG_KGDB
  725. addi r5, r1, PTO /* pass pt_reg address as the first arg */
  726. la r15, r0, dbtrap_call; /* return address */
  727. rtbd r0, microblaze_kgdb_break
  728. nop;
  729. #endif
  730. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  731. * It is very unlikely that another brki instruction is called. */
  732. bri 0
  733. /* MS: User-mode state save - gdb */
  734. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  735. tophys(r1,r1);
  736. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  737. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  738. tophys(r1,r1);
  739. addik r1, r1, -STATE_SAVE_SIZE; /* Make room on the stack. */
  740. SAVE_REGS;
  741. swi r16, r1, PTO+PT_PC; /* Save LP */
  742. swi r0, r1, PTO + PT_MODE; /* Was in user-mode. */
  743. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  744. swi r11, r1, PTO+PT_R1; /* Store user SP. */
  745. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  746. tovirt(r1,r1)
  747. set_vms;
  748. addik r5, r1, PTO;
  749. addik r15, r0, dbtrap_call;
  750. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  751. rtbd r0, sw_exception
  752. nop
  753. /* MS: The first instruction for the second part of the gdb/kgdb */
  754. set_bip; /* Ints masked for state restore */
  755. lwi r11, r1, PTO + PT_MODE;
  756. bnei r11, 2f;
  757. /* MS: Return to user space - gdb */
  758. /* Get current task ptr into r11 */
  759. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  760. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  761. andi r11, r11, _TIF_NEED_RESCHED;
  762. beqi r11, 5f;
  763. /* Call the scheduler before returning from a syscall/trap. */
  764. bralid r15, schedule; /* Call scheduler */
  765. nop; /* delay slot */
  766. /* Maybe handle a signal */
  767. 5: lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  768. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  769. andi r11, r11, _TIF_SIGPENDING;
  770. beqi r11, 1f; /* Signals to handle, handle them */
  771. addik r5, r1, PTO; /* Arg 1: struct pt_regs *regs */
  772. addi r7, r0, 0; /* Arg 3: int in_syscall */
  773. bralid r15, do_signal; /* Handle any signals */
  774. add r6, r0, r0; /* Arg 2: sigset_t *oldset */
  775. /* Finally, return to user state. */
  776. 1: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  777. VM_OFF;
  778. tophys(r1,r1);
  779. /* MS: Restore all regs */
  780. RESTORE_REGS
  781. addik r1, r1, STATE_SAVE_SIZE /* Clean up stack space */
  782. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  783. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  784. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  785. nop;
  786. /* MS: Return to kernel state - kgdb */
  787. 2: VM_OFF;
  788. tophys(r1,r1);
  789. /* MS: Restore all regs */
  790. RESTORE_REGS
  791. lwi r14, r1, PTO+PT_R14;
  792. lwi r16, r1, PTO+PT_PC;
  793. addik r1, r1, STATE_SAVE_SIZE; /* MS: Clean up stack space */
  794. tovirt(r1,r1);
  795. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  796. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  797. nop;
  798. ENTRY(_switch_to)
  799. /* prepare return value */
  800. addk r3, r0, CURRENT_TASK
  801. /* save registers in cpu_context */
  802. /* use r11 and r12, volatile registers, as temp register */
  803. /* give start of cpu_context for previous process */
  804. addik r11, r5, TI_CPU_CONTEXT
  805. swi r1, r11, CC_R1
  806. swi r2, r11, CC_R2
  807. /* skip volatile registers.
  808. * they are saved on stack when we jumped to _switch_to() */
  809. /* dedicated registers */
  810. swi r13, r11, CC_R13
  811. swi r14, r11, CC_R14
  812. swi r15, r11, CC_R15
  813. swi r16, r11, CC_R16
  814. swi r17, r11, CC_R17
  815. swi r18, r11, CC_R18
  816. /* save non-volatile registers */
  817. swi r19, r11, CC_R19
  818. swi r20, r11, CC_R20
  819. swi r21, r11, CC_R21
  820. swi r22, r11, CC_R22
  821. swi r23, r11, CC_R23
  822. swi r24, r11, CC_R24
  823. swi r25, r11, CC_R25
  824. swi r26, r11, CC_R26
  825. swi r27, r11, CC_R27
  826. swi r28, r11, CC_R28
  827. swi r29, r11, CC_R29
  828. swi r30, r11, CC_R30
  829. /* special purpose registers */
  830. mfs r12, rmsr
  831. swi r12, r11, CC_MSR
  832. mfs r12, rear
  833. swi r12, r11, CC_EAR
  834. mfs r12, resr
  835. swi r12, r11, CC_ESR
  836. mfs r12, rfsr
  837. swi r12, r11, CC_FSR
  838. /* update r31, the current-give me pointer to task which will be next */
  839. lwi CURRENT_TASK, r6, TI_TASK
  840. /* stored it to current_save too */
  841. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  842. /* get new process' cpu context and restore */
  843. /* give me start where start context of next task */
  844. addik r11, r6, TI_CPU_CONTEXT
  845. /* non-volatile registers */
  846. lwi r30, r11, CC_R30
  847. lwi r29, r11, CC_R29
  848. lwi r28, r11, CC_R28
  849. lwi r27, r11, CC_R27
  850. lwi r26, r11, CC_R26
  851. lwi r25, r11, CC_R25
  852. lwi r24, r11, CC_R24
  853. lwi r23, r11, CC_R23
  854. lwi r22, r11, CC_R22
  855. lwi r21, r11, CC_R21
  856. lwi r20, r11, CC_R20
  857. lwi r19, r11, CC_R19
  858. /* dedicated registers */
  859. lwi r18, r11, CC_R18
  860. lwi r17, r11, CC_R17
  861. lwi r16, r11, CC_R16
  862. lwi r15, r11, CC_R15
  863. lwi r14, r11, CC_R14
  864. lwi r13, r11, CC_R13
  865. /* skip volatile registers */
  866. lwi r2, r11, CC_R2
  867. lwi r1, r11, CC_R1
  868. /* special purpose registers */
  869. lwi r12, r11, CC_FSR
  870. mts rfsr, r12
  871. lwi r12, r11, CC_MSR
  872. mts rmsr, r12
  873. rtsd r15, 8
  874. nop
  875. ENTRY(_reset)
  876. brai 0x70; /* Jump back to FS-boot */
  877. /* These are compiled and loaded into high memory, then
  878. * copied into place in mach_early_setup */
  879. .section .init.ivt, "ax"
  880. .org 0x0
  881. /* this is very important - here is the reset vector */
  882. /* in current MMU branch you don't care what is here - it is
  883. * used from bootloader site - but this is correct for FS-BOOT */
  884. brai 0x70
  885. nop
  886. brai TOPHYS(_user_exception); /* syscall handler */
  887. brai TOPHYS(_interrupt); /* Interrupt handler */
  888. brai TOPHYS(_debug_exception); /* debug trap handler */
  889. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  890. .section .rodata,"a"
  891. #include "syscall_table.S"
  892. syscall_table_size=(.-sys_call_table)
  893. type_SYSCALL:
  894. .ascii "SYSCALL\0"
  895. type_IRQ:
  896. .ascii "IRQ\0"
  897. type_IRQ_PREEMPT:
  898. .ascii "IRQ (PREEMPTED)\0"
  899. type_SYSCALL_PREEMPT:
  900. .ascii " SYSCALL (PREEMPTED)\0"
  901. /*
  902. * Trap decoding for stack unwinder
  903. * Tuples are (start addr, end addr, string)
  904. * If return address lies on [start addr, end addr],
  905. * unwinder displays 'string'
  906. */
  907. .align 4
  908. .global microblaze_trap_handlers
  909. microblaze_trap_handlers:
  910. /* Exact matches come first */
  911. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  912. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  913. /* Fuzzy matches go here */
  914. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  915. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  916. /* End of table */
  917. .word 0 ; .word 0 ; .word 0