entry.S 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004
  1. /*
  2. * Low-level system-call handling, trap handlers and context-switching
  3. *
  4. * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2008-2009 PetaLogix
  6. * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
  7. * Copyright (C) 2001,2002 NEC Corporation
  8. * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
  9. *
  10. * This file is subject to the terms and conditions of the GNU General
  11. * Public License. See the file COPYING in the main directory of this
  12. * archive for more details.
  13. *
  14. * Written by Miles Bader <miles@gnu.org>
  15. * Heavily modified by John Williams for Microblaze
  16. */
  17. #include <linux/sys.h>
  18. #include <linux/linkage.h>
  19. #include <asm/entry.h>
  20. #include <asm/current.h>
  21. #include <asm/processor.h>
  22. #include <asm/exceptions.h>
  23. #include <asm/asm-offsets.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/page.h>
  26. #include <asm/unistd.h>
  27. #include <linux/errno.h>
  28. #include <asm/signal.h>
  29. #undef DEBUG
  30. #ifdef DEBUG
  31. /* Create space for syscalls counting. */
  32. .section .data
  33. .global syscall_debug_table
  34. .align 4
  35. syscall_debug_table:
  36. .space (__NR_syscalls * 4)
  37. #endif /* DEBUG */
  38. #define C_ENTRY(name) .globl name; .align 4; name
  39. /*
  40. * Various ways of setting and clearing BIP in flags reg.
  41. * This is mucky, but necessary using microblaze version that
  42. * allows msr ops to write to BIP
  43. */
  44. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  45. .macro clear_bip
  46. msrclr r0, MSR_BIP
  47. .endm
  48. .macro set_bip
  49. msrset r0, MSR_BIP
  50. .endm
  51. .macro clear_eip
  52. msrclr r0, MSR_EIP
  53. .endm
  54. .macro set_ee
  55. msrset r0, MSR_EE
  56. .endm
  57. .macro disable_irq
  58. msrclr r0, MSR_IE
  59. .endm
  60. .macro enable_irq
  61. msrset r0, MSR_IE
  62. .endm
  63. .macro set_ums
  64. msrset r0, MSR_UMS
  65. msrclr r0, MSR_VMS
  66. .endm
  67. .macro set_vms
  68. msrclr r0, MSR_UMS
  69. msrset r0, MSR_VMS
  70. .endm
  71. .macro clear_ums
  72. msrclr r0, MSR_UMS
  73. .endm
  74. .macro clear_vms_ums
  75. msrclr r0, MSR_VMS | MSR_UMS
  76. .endm
  77. #else
  78. .macro clear_bip
  79. mfs r11, rmsr
  80. andi r11, r11, ~MSR_BIP
  81. mts rmsr, r11
  82. .endm
  83. .macro set_bip
  84. mfs r11, rmsr
  85. ori r11, r11, MSR_BIP
  86. mts rmsr, r11
  87. .endm
  88. .macro clear_eip
  89. mfs r11, rmsr
  90. andi r11, r11, ~MSR_EIP
  91. mts rmsr, r11
  92. .endm
  93. .macro set_ee
  94. mfs r11, rmsr
  95. ori r11, r11, MSR_EE
  96. mts rmsr, r11
  97. .endm
  98. .macro disable_irq
  99. mfs r11, rmsr
  100. andi r11, r11, ~MSR_IE
  101. mts rmsr, r11
  102. .endm
  103. .macro enable_irq
  104. mfs r11, rmsr
  105. ori r11, r11, MSR_IE
  106. mts rmsr, r11
  107. .endm
  108. .macro set_ums
  109. mfs r11, rmsr
  110. ori r11, r11, MSR_VMS
  111. andni r11, r11, MSR_UMS
  112. mts rmsr, r11
  113. .endm
  114. .macro set_vms
  115. mfs r11, rmsr
  116. ori r11, r11, MSR_VMS
  117. andni r11, r11, MSR_UMS
  118. mts rmsr, r11
  119. .endm
  120. .macro clear_ums
  121. mfs r11, rmsr
  122. andni r11, r11, MSR_UMS
  123. mts rmsr,r11
  124. .endm
  125. .macro clear_vms_ums
  126. mfs r11, rmsr
  127. andni r11, r11, (MSR_VMS|MSR_UMS)
  128. mts rmsr,r11
  129. .endm
  130. #endif
  131. /* Define how to call high-level functions. With MMU, virtual mode must be
  132. * enabled when calling the high-level function. Clobbers R11.
  133. * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
  134. */
  135. /* turn on virtual protected mode save */
  136. #define VM_ON \
  137. set_ums; \
  138. rted r0, 2f; \
  139. nop; \
  140. 2:
  141. /* turn off virtual protected mode save and user mode save*/
  142. #define VM_OFF \
  143. clear_vms_ums; \
  144. rted r0, TOPHYS(1f); \
  145. nop; \
  146. 1:
  147. #define SAVE_REGS \
  148. swi r2, r1, PT_R2; /* Save SDA */ \
  149. swi r3, r1, PT_R3; \
  150. swi r4, r1, PT_R4; \
  151. swi r5, r1, PT_R5; \
  152. swi r6, r1, PT_R6; \
  153. swi r7, r1, PT_R7; \
  154. swi r8, r1, PT_R8; \
  155. swi r9, r1, PT_R9; \
  156. swi r10, r1, PT_R10; \
  157. swi r11, r1, PT_R11; /* save clobbered regs after rval */\
  158. swi r12, r1, PT_R12; \
  159. swi r13, r1, PT_R13; /* Save SDA2 */ \
  160. swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
  161. swi r15, r1, PT_R15; /* Save LP */ \
  162. swi r16, r1, PT_R16; \
  163. swi r17, r1, PT_R17; \
  164. swi r18, r1, PT_R18; /* Save asm scratch reg */ \
  165. swi r19, r1, PT_R19; \
  166. swi r20, r1, PT_R20; \
  167. swi r21, r1, PT_R21; \
  168. swi r22, r1, PT_R22; \
  169. swi r23, r1, PT_R23; \
  170. swi r24, r1, PT_R24; \
  171. swi r25, r1, PT_R25; \
  172. swi r26, r1, PT_R26; \
  173. swi r27, r1, PT_R27; \
  174. swi r28, r1, PT_R28; \
  175. swi r29, r1, PT_R29; \
  176. swi r30, r1, PT_R30; \
  177. swi r31, r1, PT_R31; /* Save current task reg */ \
  178. mfs r11, rmsr; /* save MSR */ \
  179. swi r11, r1, PT_MSR;
  180. #define RESTORE_REGS \
  181. lwi r11, r1, PT_MSR; \
  182. mts rmsr , r11; \
  183. lwi r2, r1, PT_R2; /* restore SDA */ \
  184. lwi r3, r1, PT_R3; \
  185. lwi r4, r1, PT_R4; \
  186. lwi r5, r1, PT_R5; \
  187. lwi r6, r1, PT_R6; \
  188. lwi r7, r1, PT_R7; \
  189. lwi r8, r1, PT_R8; \
  190. lwi r9, r1, PT_R9; \
  191. lwi r10, r1, PT_R10; \
  192. lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
  193. lwi r12, r1, PT_R12; \
  194. lwi r13, r1, PT_R13; /* restore SDA2 */ \
  195. lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
  196. lwi r15, r1, PT_R15; /* restore LP */ \
  197. lwi r16, r1, PT_R16; \
  198. lwi r17, r1, PT_R17; \
  199. lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
  200. lwi r19, r1, PT_R19; \
  201. lwi r20, r1, PT_R20; \
  202. lwi r21, r1, PT_R21; \
  203. lwi r22, r1, PT_R22; \
  204. lwi r23, r1, PT_R23; \
  205. lwi r24, r1, PT_R24; \
  206. lwi r25, r1, PT_R25; \
  207. lwi r26, r1, PT_R26; \
  208. lwi r27, r1, PT_R27; \
  209. lwi r28, r1, PT_R28; \
  210. lwi r29, r1, PT_R29; \
  211. lwi r30, r1, PT_R30; \
  212. lwi r31, r1, PT_R31; /* Restore cur task reg */
  213. #define SAVE_STATE \
  214. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
  215. /* See if already in kernel mode.*/ \
  216. mfs r1, rmsr; \
  217. andi r1, r1, MSR_UMS; \
  218. bnei r1, 1f; \
  219. /* Kernel-mode state save. */ \
  220. /* Reload kernel stack-ptr. */ \
  221. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  222. /* FIXME: I can add these two lines to one */ \
  223. /* tophys(r1,r1); */ \
  224. /* addik r1, r1, -PT_SIZE; */ \
  225. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  226. SAVE_REGS \
  227. brid 2f; \
  228. swi r1, r1, PT_MODE; \
  229. 1: /* User-mode state save. */ \
  230. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
  231. tophys(r1,r1); \
  232. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
  233. /* MS these three instructions can be added to one */ \
  234. /* addik r1, r1, THREAD_SIZE; */ \
  235. /* tophys(r1,r1); */ \
  236. /* addik r1, r1, -PT_SIZE; */ \
  237. addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
  238. SAVE_REGS \
  239. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
  240. swi r11, r1, PT_R1; /* Store user SP. */ \
  241. swi r0, r1, PT_MODE; /* Was in user-mode. */ \
  242. /* MS: I am clearing UMS even in case when I come from kernel space */ \
  243. clear_ums; \
  244. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  245. .text
  246. /*
  247. * User trap.
  248. *
  249. * System calls are handled here.
  250. *
  251. * Syscall protocol:
  252. * Syscall number in r12, args in r5-r10
  253. * Return value in r3
  254. *
  255. * Trap entered via brki instruction, so BIP bit is set, and interrupts
  256. * are masked. This is nice, means we don't have to CLI before state save
  257. */
  258. C_ENTRY(_user_exception):
  259. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
  260. addi r14, r14, 4 /* return address is 4 byte after call */
  261. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  262. tophys(r1,r1);
  263. lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
  264. /* calculate kernel stack pointer from task struct 8k */
  265. addik r1, r1, THREAD_SIZE;
  266. tophys(r1,r1);
  267. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  268. SAVE_REGS
  269. swi r0, r1, PT_R3
  270. swi r0, r1, PT_R4
  271. swi r0, r1, PT_MODE; /* Was in user-mode. */
  272. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  273. swi r11, r1, PT_R1; /* Store user SP. */
  274. clear_ums;
  275. 2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  276. /* Save away the syscall number. */
  277. swi r12, r1, PT_R0;
  278. tovirt(r1,r1)
  279. /* where the trap should return need -8 to adjust for rtsd r15, 8*/
  280. /* Jump to the appropriate function for the system call number in r12
  281. * (r12 is not preserved), or return an error if r12 is not valid. The LP
  282. * register should point to the location where
  283. * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
  284. /* Step into virtual mode */
  285. rtbd r0, 3f
  286. nop
  287. 3:
  288. lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
  289. lwi r11, r11, TI_FLAGS /* get flags in thread info */
  290. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  291. beqi r11, 4f
  292. addik r3, r0, -ENOSYS
  293. swi r3, r1, PT_R3
  294. brlid r15, do_syscall_trace_enter
  295. addik r5, r1, PT_R0
  296. # do_syscall_trace_enter returns the new syscall nr.
  297. addk r12, r0, r3
  298. lwi r5, r1, PT_R5;
  299. lwi r6, r1, PT_R6;
  300. lwi r7, r1, PT_R7;
  301. lwi r8, r1, PT_R8;
  302. lwi r9, r1, PT_R9;
  303. lwi r10, r1, PT_R10;
  304. 4:
  305. /* Jump to the appropriate function for the system call number in r12
  306. * (r12 is not preserved), or return an error if r12 is not valid.
  307. * The LP register should point to the location where the called function
  308. * should return. [note that MAKE_SYS_CALL uses label 1] */
  309. /* See if the system call number is valid */
  310. addi r11, r12, -__NR_syscalls;
  311. bgei r11,5f;
  312. /* Figure out which function to use for this system call. */
  313. /* Note Microblaze barrel shift is optional, so don't rely on it */
  314. add r12, r12, r12; /* convert num -> ptr */
  315. add r12, r12, r12;
  316. addi r30, r0, 1 /* restarts allowed */
  317. #ifdef DEBUG
  318. /* Trac syscalls and stored them to syscall_debug_table */
  319. /* The first syscall location stores total syscall number */
  320. lwi r3, r0, syscall_debug_table
  321. addi r3, r3, 1
  322. swi r3, r0, syscall_debug_table
  323. lwi r3, r12, syscall_debug_table
  324. addi r3, r3, 1
  325. swi r3, r12, syscall_debug_table
  326. #endif
  327. # Find and jump into the syscall handler.
  328. lwi r12, r12, sys_call_table
  329. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  330. addi r15, r0, ret_from_trap-8
  331. bra r12
  332. /* The syscall number is invalid, return an error. */
  333. 5:
  334. rtsd r15, 8; /* looks like a normal subroutine return */
  335. addi r3, r0, -ENOSYS;
  336. /* Entry point used to return from a syscall/trap */
  337. /* We re-enable BIP bit before state restore */
  338. C_ENTRY(ret_from_trap):
  339. swi r3, r1, PT_R3
  340. swi r4, r1, PT_R4
  341. lwi r11, r1, PT_MODE;
  342. /* See if returning to kernel mode, if so, skip resched &c. */
  343. bnei r11, 2f;
  344. /* We're returning to user mode, so check for various conditions that
  345. * trigger rescheduling. */
  346. /* FIXME: Restructure all these flag checks. */
  347. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  348. lwi r11, r11, TI_FLAGS; /* get flags in thread info */
  349. andi r11, r11, _TIF_WORK_SYSCALL_MASK
  350. beqi r11, 1f
  351. brlid r15, do_syscall_trace_leave
  352. addik r5, r1, PT_R0
  353. 1:
  354. /* We're returning to user mode, so check for various conditions that
  355. * trigger rescheduling. */
  356. /* get thread info from current task */
  357. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  358. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  359. andi r11, r19, _TIF_NEED_RESCHED;
  360. beqi r11, 5f;
  361. bralid r15, schedule; /* Call scheduler */
  362. nop; /* delay slot */
  363. bri 1b
  364. /* Maybe handle a signal */
  365. 5:
  366. andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  367. beqi r11, 4f; /* Signals to handle, handle them */
  368. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  369. bralid r15, do_notify_resume; /* Handle any signals */
  370. add r6, r30, r0; /* Arg 2: int in_syscall */
  371. add r30, r0, r0 /* no more restarts */
  372. bri 1b
  373. /* Finally, return to user state. */
  374. 4: set_bip; /* Ints masked for state restore */
  375. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  376. VM_OFF;
  377. tophys(r1,r1);
  378. RESTORE_REGS;
  379. addik r1, r1, PT_SIZE /* Clean up stack space. */
  380. lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
  381. bri 6f;
  382. /* Return to kernel state. */
  383. 2: set_bip; /* Ints masked for state restore */
  384. VM_OFF;
  385. tophys(r1,r1);
  386. RESTORE_REGS;
  387. addik r1, r1, PT_SIZE /* Clean up stack space. */
  388. tovirt(r1,r1);
  389. 6:
  390. TRAP_return: /* Make global symbol for debugging */
  391. rtbd r14, 0; /* Instructions to return from an IRQ */
  392. nop;
  393. /* This the initial entry point for a new child thread, with an appropriate
  394. stack in place that makes it look the the child is in the middle of an
  395. syscall. This function is actually `returned to' from switch_thread
  396. (copy_thread makes ret_from_fork the return address in each new thread's
  397. saved context). */
  398. C_ENTRY(ret_from_fork):
  399. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  400. add r5, r3, r0; /* switch_thread returns the prev task */
  401. /* ( in the delay slot ) */
  402. brid ret_from_trap; /* Do normal trap return */
  403. add r3, r0, r0; /* Child's fork call should return 0. */
  404. C_ENTRY(ret_from_kernel_thread):
  405. bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
  406. add r5, r3, r0; /* switch_thread returns the prev task */
  407. /* ( in the delay slot ) */
  408. brald r15, r20 /* fn was left in r20 */
  409. addk r5, r0, r19 /* ... and argument - in r19 */
  410. brid ret_from_trap
  411. add r3, r0, r0
  412. C_ENTRY(sys_rt_sigreturn_wrapper):
  413. addik r30, r0, 0 /* no restarts */
  414. brid sys_rt_sigreturn /* Do real work */
  415. addik r5, r1, 0; /* add user context as 1st arg */
  416. /*
  417. * HW EXCEPTION rutine start
  418. */
  419. C_ENTRY(full_exception_trap):
  420. /* adjust exception address for privileged instruction
  421. * for finding where is it */
  422. addik r17, r17, -4
  423. SAVE_STATE /* Save registers */
  424. /* PC, before IRQ/trap - this is one instruction above */
  425. swi r17, r1, PT_PC;
  426. tovirt(r1,r1)
  427. /* FIXME this can be store directly in PT_ESR reg.
  428. * I tested it but there is a fault */
  429. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  430. addik r15, r0, ret_from_exc - 8
  431. mfs r6, resr
  432. mfs r7, rfsr; /* save FSR */
  433. mts rfsr, r0; /* Clear sticky fsr */
  434. rted r0, full_exception
  435. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  436. /*
  437. * Unaligned data trap.
  438. *
  439. * Unaligned data trap last on 4k page is handled here.
  440. *
  441. * Trap entered via exception, so EE bit is set, and interrupts
  442. * are masked. This is nice, means we don't have to CLI before state save
  443. *
  444. * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
  445. */
  446. C_ENTRY(unaligned_data_trap):
  447. /* MS: I have to save r11 value and then restore it because
  448. * set_bit, clear_eip, set_ee use r11 as temp register if MSR
  449. * instructions are not used. We don't need to do if MSR instructions
  450. * are used and they use r0 instead of r11.
  451. * I am using ENTRY_SP which should be primary used only for stack
  452. * pointer saving. */
  453. swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  454. set_bip; /* equalize initial state for all possible entries */
  455. clear_eip;
  456. set_ee;
  457. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  458. SAVE_STATE /* Save registers.*/
  459. /* PC, before IRQ/trap - this is one instruction above */
  460. swi r17, r1, PT_PC;
  461. tovirt(r1,r1)
  462. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  463. addik r15, r0, ret_from_exc-8
  464. mfs r3, resr /* ESR */
  465. mfs r4, rear /* EAR */
  466. rtbd r0, _unaligned_data_exception
  467. addik r7, r1, 0 /* parameter struct pt_regs * regs */
  468. /*
  469. * Page fault traps.
  470. *
  471. * If the real exception handler (from hw_exception_handler.S) didn't find
  472. * the mapping for the process, then we're thrown here to handle such situation.
  473. *
  474. * Trap entered via exceptions, so EE bit is set, and interrupts
  475. * are masked. This is nice, means we don't have to CLI before state save
  476. *
  477. * Build a standard exception frame for TLB Access errors. All TLB exceptions
  478. * will bail out to this point if they can't resolve the lightweight TLB fault.
  479. *
  480. * The C function called is in "arch/microblaze/mm/fault.c", declared as:
  481. * void do_page_fault(struct pt_regs *regs,
  482. * unsigned long address,
  483. * unsigned long error_code)
  484. */
  485. /* data and intruction trap - which is choose is resolved int fault.c */
  486. C_ENTRY(page_fault_data_trap):
  487. SAVE_STATE /* Save registers.*/
  488. /* PC, before IRQ/trap - this is one instruction above */
  489. swi r17, r1, PT_PC;
  490. tovirt(r1,r1)
  491. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  492. addik r15, r0, ret_from_exc-8
  493. mfs r6, rear /* parameter unsigned long address */
  494. mfs r7, resr /* parameter unsigned long error_code */
  495. rted r0, do_page_fault
  496. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  497. C_ENTRY(page_fault_instr_trap):
  498. SAVE_STATE /* Save registers.*/
  499. /* PC, before IRQ/trap - this is one instruction above */
  500. swi r17, r1, PT_PC;
  501. tovirt(r1,r1)
  502. /* where the trap should return need -8 to adjust for rtsd r15, 8 */
  503. addik r15, r0, ret_from_exc-8
  504. mfs r6, rear /* parameter unsigned long address */
  505. ori r7, r0, 0 /* parameter unsigned long error_code */
  506. rted r0, do_page_fault
  507. addik r5, r1, 0 /* parameter struct pt_regs * regs */
  508. /* Entry point used to return from an exception. */
  509. C_ENTRY(ret_from_exc):
  510. lwi r11, r1, PT_MODE;
  511. bnei r11, 2f; /* See if returning to kernel mode, */
  512. /* ... if so, skip resched &c. */
  513. /* We're returning to user mode, so check for various conditions that
  514. trigger rescheduling. */
  515. 1:
  516. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  517. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  518. andi r11, r19, _TIF_NEED_RESCHED;
  519. beqi r11, 5f;
  520. /* Call the scheduler before returning from a syscall/trap. */
  521. bralid r15, schedule; /* Call scheduler */
  522. nop; /* delay slot */
  523. bri 1b
  524. /* Maybe handle a signal */
  525. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  526. beqi r11, 4f; /* Signals to handle, handle them */
  527. /*
  528. * Handle a signal return; Pending signals should be in r18.
  529. *
  530. * Not all registers are saved by the normal trap/interrupt entry
  531. * points (for instance, call-saved registers (because the normal
  532. * C-compiler calling sequence in the kernel makes sure they're
  533. * preserved), and call-clobbered registers in the case of
  534. * traps), but signal handlers may want to examine or change the
  535. * complete register state. Here we save anything not saved by
  536. * the normal entry sequence, so that it may be safely restored
  537. * (in a possibly modified form) after do_notify_resume returns. */
  538. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  539. bralid r15, do_notify_resume; /* Handle any signals */
  540. addi r6, r0, 0; /* Arg 2: int in_syscall */
  541. bri 1b
  542. /* Finally, return to user state. */
  543. 4: set_bip; /* Ints masked for state restore */
  544. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  545. VM_OFF;
  546. tophys(r1,r1);
  547. RESTORE_REGS;
  548. addik r1, r1, PT_SIZE /* Clean up stack space. */
  549. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
  550. bri 6f;
  551. /* Return to kernel state. */
  552. 2: set_bip; /* Ints masked for state restore */
  553. VM_OFF;
  554. tophys(r1,r1);
  555. RESTORE_REGS;
  556. addik r1, r1, PT_SIZE /* Clean up stack space. */
  557. tovirt(r1,r1);
  558. 6:
  559. EXC_return: /* Make global symbol for debugging */
  560. rtbd r14, 0; /* Instructions to return from an IRQ */
  561. nop;
  562. /*
  563. * HW EXCEPTION rutine end
  564. */
  565. /*
  566. * Hardware maskable interrupts.
  567. *
  568. * The stack-pointer (r1) should have already been saved to the memory
  569. * location PER_CPU(ENTRY_SP).
  570. */
  571. C_ENTRY(_interrupt):
  572. /* MS: we are in physical address */
  573. /* Save registers, switch to proper stack, convert SP to virtual.*/
  574. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  575. /* MS: See if already in kernel mode. */
  576. mfs r1, rmsr
  577. nop
  578. andi r1, r1, MSR_UMS
  579. bnei r1, 1f
  580. /* Kernel-mode state save. */
  581. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  582. tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
  583. /* save registers */
  584. /* MS: Make room on the stack -> activation record */
  585. addik r1, r1, -PT_SIZE;
  586. SAVE_REGS
  587. brid 2f;
  588. swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
  589. 1:
  590. /* User-mode state save. */
  591. /* MS: get the saved current */
  592. lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  593. tophys(r1,r1);
  594. lwi r1, r1, TS_THREAD_INFO;
  595. addik r1, r1, THREAD_SIZE;
  596. tophys(r1,r1);
  597. /* save registers */
  598. addik r1, r1, -PT_SIZE;
  599. SAVE_REGS
  600. /* calculate mode */
  601. swi r0, r1, PT_MODE;
  602. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  603. swi r11, r1, PT_R1;
  604. clear_ums;
  605. 2:
  606. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  607. tovirt(r1,r1)
  608. addik r15, r0, irq_call;
  609. irq_call:rtbd r0, do_IRQ;
  610. addik r5, r1, 0;
  611. /* MS: we are in virtual mode */
  612. ret_from_irq:
  613. lwi r11, r1, PT_MODE;
  614. bnei r11, 2f;
  615. 1:
  616. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  617. lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */
  618. andi r11, r19, _TIF_NEED_RESCHED;
  619. beqi r11, 5f
  620. bralid r15, schedule;
  621. nop; /* delay slot */
  622. bri 1b
  623. /* Maybe handle a signal */
  624. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  625. beqid r11, no_intr_resched
  626. /* Handle a signal return; Pending signals should be in r18. */
  627. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  628. bralid r15, do_notify_resume; /* Handle any signals */
  629. addi r6, r0, 0; /* Arg 2: int in_syscall */
  630. bri 1b
  631. /* Finally, return to user state. */
  632. no_intr_resched:
  633. /* Disable interrupts, we are now committed to the state restore */
  634. disable_irq
  635. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
  636. VM_OFF;
  637. tophys(r1,r1);
  638. RESTORE_REGS
  639. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  640. lwi r1, r1, PT_R1 - PT_SIZE;
  641. bri 6f;
  642. /* MS: Return to kernel state. */
  643. 2:
  644. #ifdef CONFIG_PREEMPT
  645. lwi r11, CURRENT_TASK, TS_THREAD_INFO;
  646. /* MS: get preempt_count from thread info */
  647. lwi r5, r11, TI_PREEMPT_COUNT;
  648. bgti r5, restore;
  649. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  650. andi r5, r5, _TIF_NEED_RESCHED;
  651. beqi r5, restore /* if zero jump over */
  652. preempt:
  653. /* interrupts are off that's why I am calling preempt_chedule_irq */
  654. bralid r15, preempt_schedule_irq
  655. nop
  656. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  657. lwi r5, r11, TI_FLAGS; /* get flags in thread info */
  658. andi r5, r5, _TIF_NEED_RESCHED;
  659. bnei r5, preempt /* if non zero jump to resched */
  660. restore:
  661. #endif
  662. VM_OFF /* MS: turn off MMU */
  663. tophys(r1,r1)
  664. RESTORE_REGS
  665. addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
  666. tovirt(r1,r1);
  667. 6:
  668. IRQ_return: /* MS: Make global symbol for debugging */
  669. rtid r14, 0
  670. nop
  671. /*
  672. * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
  673. * and call handling function with saved pt_regs
  674. */
  675. C_ENTRY(_debug_exception):
  676. /* BIP bit is set on entry, no interrupts can occur */
  677. swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
  678. mfs r1, rmsr
  679. nop
  680. andi r1, r1, MSR_UMS
  681. bnei r1, 1f
  682. /* MS: Kernel-mode state save - kgdb */
  683. lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
  684. /* BIP bit is set on entry, no interrupts can occur */
  685. addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
  686. SAVE_REGS;
  687. /* save all regs to pt_reg structure */
  688. swi r0, r1, PT_R0; /* R0 must be saved too */
  689. swi r14, r1, PT_R14 /* rewrite saved R14 value */
  690. swi r16, r1, PT_PC; /* PC and r16 are the same */
  691. /* save special purpose registers to pt_regs */
  692. mfs r11, rear;
  693. swi r11, r1, PT_EAR;
  694. mfs r11, resr;
  695. swi r11, r1, PT_ESR;
  696. mfs r11, rfsr;
  697. swi r11, r1, PT_FSR;
  698. /* stack pointer is in physical address at it is decrease
  699. * by PT_SIZE but we need to get correct R1 value */
  700. addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
  701. swi r11, r1, PT_R1
  702. /* MS: r31 - current pointer isn't changed */
  703. tovirt(r1,r1)
  704. #ifdef CONFIG_KGDB
  705. addi r5, r1, 0 /* pass pt_reg address as the first arg */
  706. addik r15, r0, dbtrap_call; /* return address */
  707. rtbd r0, microblaze_kgdb_break
  708. nop;
  709. #endif
  710. /* MS: Place handler for brki from kernel space if KGDB is OFF.
  711. * It is very unlikely that another brki instruction is called. */
  712. bri 0
  713. /* MS: User-mode state save - gdb */
  714. 1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
  715. tophys(r1,r1);
  716. lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
  717. addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
  718. tophys(r1,r1);
  719. addik r1, r1, -PT_SIZE; /* Make room on the stack. */
  720. SAVE_REGS;
  721. swi r16, r1, PT_PC; /* Save LP */
  722. swi r0, r1, PT_MODE; /* Was in user-mode. */
  723. lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
  724. swi r11, r1, PT_R1; /* Store user SP. */
  725. lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
  726. tovirt(r1,r1)
  727. set_vms;
  728. addik r5, r1, 0;
  729. addik r15, r0, dbtrap_call;
  730. dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
  731. rtbd r0, sw_exception
  732. nop
  733. /* MS: The first instruction for the second part of the gdb/kgdb */
  734. set_bip; /* Ints masked for state restore */
  735. lwi r11, r1, PT_MODE;
  736. bnei r11, 2f;
  737. /* MS: Return to user space - gdb */
  738. 1:
  739. /* Get current task ptr into r11 */
  740. lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
  741. lwi r19, r11, TI_FLAGS; /* get flags in thread info */
  742. andi r11, r19, _TIF_NEED_RESCHED;
  743. beqi r11, 5f;
  744. /* Call the scheduler before returning from a syscall/trap. */
  745. bralid r15, schedule; /* Call scheduler */
  746. nop; /* delay slot */
  747. bri 1b
  748. /* Maybe handle a signal */
  749. 5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
  750. beqi r11, 4f; /* Signals to handle, handle them */
  751. addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
  752. bralid r15, do_notify_resume; /* Handle any signals */
  753. addi r6, r0, 0; /* Arg 2: int in_syscall */
  754. bri 1b
  755. /* Finally, return to user state. */
  756. 4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
  757. VM_OFF;
  758. tophys(r1,r1);
  759. /* MS: Restore all regs */
  760. RESTORE_REGS
  761. addik r1, r1, PT_SIZE /* Clean up stack space */
  762. lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
  763. DBTRAP_return_user: /* MS: Make global symbol for debugging */
  764. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  765. nop;
  766. /* MS: Return to kernel state - kgdb */
  767. 2: VM_OFF;
  768. tophys(r1,r1);
  769. /* MS: Restore all regs */
  770. RESTORE_REGS
  771. lwi r14, r1, PT_R14;
  772. lwi r16, r1, PT_PC;
  773. addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
  774. tovirt(r1,r1);
  775. DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
  776. rtbd r16, 0; /* MS: Instructions to return from a debug trap */
  777. nop;
  778. ENTRY(_switch_to)
  779. /* prepare return value */
  780. addk r3, r0, CURRENT_TASK
  781. /* save registers in cpu_context */
  782. /* use r11 and r12, volatile registers, as temp register */
  783. /* give start of cpu_context for previous process */
  784. addik r11, r5, TI_CPU_CONTEXT
  785. swi r1, r11, CC_R1
  786. swi r2, r11, CC_R2
  787. /* skip volatile registers.
  788. * they are saved on stack when we jumped to _switch_to() */
  789. /* dedicated registers */
  790. swi r13, r11, CC_R13
  791. swi r14, r11, CC_R14
  792. swi r15, r11, CC_R15
  793. swi r16, r11, CC_R16
  794. swi r17, r11, CC_R17
  795. swi r18, r11, CC_R18
  796. /* save non-volatile registers */
  797. swi r19, r11, CC_R19
  798. swi r20, r11, CC_R20
  799. swi r21, r11, CC_R21
  800. swi r22, r11, CC_R22
  801. swi r23, r11, CC_R23
  802. swi r24, r11, CC_R24
  803. swi r25, r11, CC_R25
  804. swi r26, r11, CC_R26
  805. swi r27, r11, CC_R27
  806. swi r28, r11, CC_R28
  807. swi r29, r11, CC_R29
  808. swi r30, r11, CC_R30
  809. /* special purpose registers */
  810. mfs r12, rmsr
  811. swi r12, r11, CC_MSR
  812. mfs r12, rear
  813. swi r12, r11, CC_EAR
  814. mfs r12, resr
  815. swi r12, r11, CC_ESR
  816. mfs r12, rfsr
  817. swi r12, r11, CC_FSR
  818. /* update r31, the current-give me pointer to task which will be next */
  819. lwi CURRENT_TASK, r6, TI_TASK
  820. /* stored it to current_save too */
  821. swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
  822. /* get new process' cpu context and restore */
  823. /* give me start where start context of next task */
  824. addik r11, r6, TI_CPU_CONTEXT
  825. /* non-volatile registers */
  826. lwi r30, r11, CC_R30
  827. lwi r29, r11, CC_R29
  828. lwi r28, r11, CC_R28
  829. lwi r27, r11, CC_R27
  830. lwi r26, r11, CC_R26
  831. lwi r25, r11, CC_R25
  832. lwi r24, r11, CC_R24
  833. lwi r23, r11, CC_R23
  834. lwi r22, r11, CC_R22
  835. lwi r21, r11, CC_R21
  836. lwi r20, r11, CC_R20
  837. lwi r19, r11, CC_R19
  838. /* dedicated registers */
  839. lwi r18, r11, CC_R18
  840. lwi r17, r11, CC_R17
  841. lwi r16, r11, CC_R16
  842. lwi r15, r11, CC_R15
  843. lwi r14, r11, CC_R14
  844. lwi r13, r11, CC_R13
  845. /* skip volatile registers */
  846. lwi r2, r11, CC_R2
  847. lwi r1, r11, CC_R1
  848. /* special purpose registers */
  849. lwi r12, r11, CC_FSR
  850. mts rfsr, r12
  851. lwi r12, r11, CC_MSR
  852. mts rmsr, r12
  853. rtsd r15, 8
  854. nop
  855. ENTRY(_reset)
  856. brai 0; /* Jump to reset vector */
  857. /* These are compiled and loaded into high memory, then
  858. * copied into place in mach_early_setup */
  859. .section .init.ivt, "ax"
  860. #if CONFIG_MANUAL_RESET_VECTOR
  861. .org 0x0
  862. brai CONFIG_MANUAL_RESET_VECTOR
  863. #endif
  864. .org 0x8
  865. brai TOPHYS(_user_exception); /* syscall handler */
  866. .org 0x10
  867. brai TOPHYS(_interrupt); /* Interrupt handler */
  868. .org 0x18
  869. brai TOPHYS(_debug_exception); /* debug trap handler */
  870. .org 0x20
  871. brai TOPHYS(_hw_exception_handler); /* HW exception handler */
  872. .section .rodata,"a"
  873. #include "syscall_table.S"
  874. syscall_table_size=(.-sys_call_table)
  875. type_SYSCALL:
  876. .ascii "SYSCALL\0"
  877. type_IRQ:
  878. .ascii "IRQ\0"
  879. type_IRQ_PREEMPT:
  880. .ascii "IRQ (PREEMPTED)\0"
  881. type_SYSCALL_PREEMPT:
  882. .ascii " SYSCALL (PREEMPTED)\0"
  883. /*
  884. * Trap decoding for stack unwinder
  885. * Tuples are (start addr, end addr, string)
  886. * If return address lies on [start addr, end addr],
  887. * unwinder displays 'string'
  888. */
  889. .align 4
  890. .global microblaze_trap_handlers
  891. microblaze_trap_handlers:
  892. /* Exact matches come first */
  893. .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
  894. .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
  895. /* Fuzzy matches go here */
  896. .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
  897. .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
  898. /* End of table */
  899. .word 0 ; .word 0 ; .word 0