entry.S 41 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121
  1. /*
  2. * arch/v850/kernel/entry.S -- Low-level system-call handling, trap handlers,
  3. * and context-switching
  4. *
  5. * Copyright (C) 2001,02,03 NEC Electronics Corporation
  6. * Copyright (C) 2001,02,03 Miles Bader <miles@gnu.org>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. *
  12. * Written by Miles Bader <miles@gnu.org>
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/entry.h>
  16. #include <asm/current.h>
  17. #include <asm/thread_info.h>
  18. #include <asm/clinkage.h>
  19. #include <asm/processor.h>
  20. #include <asm/irq.h>
  21. #include <asm/errno.h>
  22. #include <asm/asm-consts.h>
  23. /* Make a slightly more convenient alias for C_SYMBOL_NAME. */
  24. #define CSYM C_SYMBOL_NAME
  25. /* The offset of the struct pt_regs in a state-save-frame on the stack. */
  26. #define PTO STATE_SAVE_PT_OFFSET
  27. /* Save argument registers to the state-save-frame pointed to by EP. */
  28. #define SAVE_ARG_REGS \
  29. sst.w r6, PTO+PT_GPR(6)[ep]; \
  30. sst.w r7, PTO+PT_GPR(7)[ep]; \
  31. sst.w r8, PTO+PT_GPR(8)[ep]; \
  32. sst.w r9, PTO+PT_GPR(9)[ep]
  33. /* Restore argument registers from the state-save-frame pointed to by EP. */
  34. #define RESTORE_ARG_REGS \
  35. sld.w PTO+PT_GPR(6)[ep], r6; \
  36. sld.w PTO+PT_GPR(7)[ep], r7; \
  37. sld.w PTO+PT_GPR(8)[ep], r8; \
  38. sld.w PTO+PT_GPR(9)[ep], r9
  39. /* Save value return registers to the state-save-frame pointed to by EP. */
  40. #define SAVE_RVAL_REGS \
  41. sst.w r10, PTO+PT_GPR(10)[ep]; \
  42. sst.w r11, PTO+PT_GPR(11)[ep]
  43. /* Restore value return registers from the state-save-frame pointed to by EP. */
  44. #define RESTORE_RVAL_REGS \
  45. sld.w PTO+PT_GPR(10)[ep], r10; \
  46. sld.w PTO+PT_GPR(11)[ep], r11
  47. #define SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS \
  48. sst.w r1, PTO+PT_GPR(1)[ep]; \
  49. sst.w r5, PTO+PT_GPR(5)[ep]
  50. #define SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL \
  51. sst.w r12, PTO+PT_GPR(12)[ep]; \
  52. sst.w r13, PTO+PT_GPR(13)[ep]; \
  53. sst.w r14, PTO+PT_GPR(14)[ep]; \
  54. sst.w r15, PTO+PT_GPR(15)[ep]; \
  55. sst.w r16, PTO+PT_GPR(16)[ep]; \
  56. sst.w r17, PTO+PT_GPR(17)[ep]; \
  57. sst.w r18, PTO+PT_GPR(18)[ep]; \
  58. sst.w r19, PTO+PT_GPR(19)[ep]
  59. #define RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS \
  60. sld.w PTO+PT_GPR(1)[ep], r1; \
  61. sld.w PTO+PT_GPR(5)[ep], r5
  62. #define RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL \
  63. sld.w PTO+PT_GPR(12)[ep], r12; \
  64. sld.w PTO+PT_GPR(13)[ep], r13; \
  65. sld.w PTO+PT_GPR(14)[ep], r14; \
  66. sld.w PTO+PT_GPR(15)[ep], r15; \
  67. sld.w PTO+PT_GPR(16)[ep], r16; \
  68. sld.w PTO+PT_GPR(17)[ep], r17; \
  69. sld.w PTO+PT_GPR(18)[ep], r18; \
  70. sld.w PTO+PT_GPR(19)[ep], r19
  71. /* Save `call clobbered' registers to the state-save-frame pointed to by EP. */
  72. #define SAVE_CALL_CLOBBERED_REGS \
  73. SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
  74. SAVE_ARG_REGS; \
  75. SAVE_RVAL_REGS; \
  76. SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL
  77. /* Restore `call clobbered' registers from the state-save-frame pointed to
  78. by EP. */
  79. #define RESTORE_CALL_CLOBBERED_REGS \
  80. RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
  81. RESTORE_ARG_REGS; \
  82. RESTORE_RVAL_REGS; \
  83. RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL
  84. /* Save `call clobbered' registers except for the return-value registers
  85. to the state-save-frame pointed to by EP. */
  86. #define SAVE_CALL_CLOBBERED_REGS_NO_RVAL \
  87. SAVE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
  88. SAVE_ARG_REGS; \
  89. SAVE_CALL_CLOBBERED_REGS_AFTER_RVAL
  90. /* Restore `call clobbered' registers except for the return-value registers
  91. from the state-save-frame pointed to by EP. */
  92. #define RESTORE_CALL_CLOBBERED_REGS_NO_RVAL \
  93. RESTORE_CALL_CLOBBERED_REGS_BEFORE_ARGS; \
  94. RESTORE_ARG_REGS; \
  95. RESTORE_CALL_CLOBBERED_REGS_AFTER_RVAL
  96. /* Save `call saved' registers to the state-save-frame pointed to by EP. */
  97. #define SAVE_CALL_SAVED_REGS \
  98. sst.w r2, PTO+PT_GPR(2)[ep]; \
  99. sst.w r20, PTO+PT_GPR(20)[ep]; \
  100. sst.w r21, PTO+PT_GPR(21)[ep]; \
  101. sst.w r22, PTO+PT_GPR(22)[ep]; \
  102. sst.w r23, PTO+PT_GPR(23)[ep]; \
  103. sst.w r24, PTO+PT_GPR(24)[ep]; \
  104. sst.w r25, PTO+PT_GPR(25)[ep]; \
  105. sst.w r26, PTO+PT_GPR(26)[ep]; \
  106. sst.w r27, PTO+PT_GPR(27)[ep]; \
  107. sst.w r28, PTO+PT_GPR(28)[ep]; \
  108. sst.w r29, PTO+PT_GPR(29)[ep]
  109. /* Restore `call saved' registers from the state-save-frame pointed to by EP. */
  110. #define RESTORE_CALL_SAVED_REGS \
  111. sld.w PTO+PT_GPR(2)[ep], r2; \
  112. sld.w PTO+PT_GPR(20)[ep], r20; \
  113. sld.w PTO+PT_GPR(21)[ep], r21; \
  114. sld.w PTO+PT_GPR(22)[ep], r22; \
  115. sld.w PTO+PT_GPR(23)[ep], r23; \
  116. sld.w PTO+PT_GPR(24)[ep], r24; \
  117. sld.w PTO+PT_GPR(25)[ep], r25; \
  118. sld.w PTO+PT_GPR(26)[ep], r26; \
  119. sld.w PTO+PT_GPR(27)[ep], r27; \
  120. sld.w PTO+PT_GPR(28)[ep], r28; \
  121. sld.w PTO+PT_GPR(29)[ep], r29
  122. /* Save the PC stored in the special register SAVEREG to the state-save-frame
  123. pointed to by EP. r19 is clobbered. */
  124. #define SAVE_PC(savereg) \
  125. stsr SR_ ## savereg, r19; \
  126. sst.w r19, PTO+PT_PC[ep]
  127. /* Restore the PC from the state-save-frame pointed to by EP, to the special
  128. register SAVEREG. LP is clobbered (it is used as a scratch register
  129. because the POP_STATE macro restores it, and this macro is usually used
  130. inside POP_STATE). */
  131. #define RESTORE_PC(savereg) \
  132. sld.w PTO+PT_PC[ep], lp; \
  133. ldsr lp, SR_ ## savereg
  134. /* Save the PSW register stored in the special register SAVREG to the
  135. state-save-frame pointed to by EP. r19 is clobbered. */
  136. #define SAVE_PSW(savereg) \
  137. stsr SR_ ## savereg, r19; \
  138. sst.w r19, PTO+PT_PSW[ep]
  139. /* Restore the PSW register from the state-save-frame pointed to by EP, to
  140. the special register SAVEREG. LP is clobbered (it is used as a scratch
  141. register because the POP_STATE macro restores it, and this macro is
  142. usually used inside POP_STATE). */
  143. #define RESTORE_PSW(savereg) \
  144. sld.w PTO+PT_PSW[ep], lp; \
  145. ldsr lp, SR_ ## savereg
  146. /* Save CTPC/CTPSW/CTBP registers to the state-save-frame pointed to by REG.
  147. r19 is clobbered. */
  148. #define SAVE_CT_REGS \
  149. stsr SR_CTPC, r19; \
  150. sst.w r19, PTO+PT_CTPC[ep]; \
  151. stsr SR_CTPSW, r19; \
  152. sst.w r19, PTO+PT_CTPSW[ep]; \
  153. stsr SR_CTBP, r19; \
  154. sst.w r19, PTO+PT_CTBP[ep]
  155. /* Restore CTPC/CTPSW/CTBP registers from the state-save-frame pointed to by EP.
  156. LP is clobbered (it is used as a scratch register because the POP_STATE
  157. macro restores it, and this macro is usually used inside POP_STATE). */
  158. #define RESTORE_CT_REGS \
  159. sld.w PTO+PT_CTPC[ep], lp; \
  160. ldsr lp, SR_CTPC; \
  161. sld.w PTO+PT_CTPSW[ep], lp; \
  162. ldsr lp, SR_CTPSW; \
  163. sld.w PTO+PT_CTBP[ep], lp; \
  164. ldsr lp, SR_CTBP
  165. /* Push register state, except for the stack pointer, on the stack in the
  166. form of a state-save-frame (plus some extra padding), in preparation for
  167. a system call. This macro makes sure that the EP, GP, and LP
  168. registers are saved, and TYPE identifies the set of extra registers to
  169. be saved as well. Also copies (the new value of) SP to EP. */
  170. #define PUSH_STATE(type) \
  171. addi -STATE_SAVE_SIZE, sp, sp; /* Make room on the stack. */ \
  172. st.w ep, PTO+PT_GPR(GPR_EP)[sp]; \
  173. mov sp, ep; \
  174. sst.w gp, PTO+PT_GPR(GPR_GP)[ep]; \
  175. sst.w lp, PTO+PT_GPR(GPR_LP)[ep]; \
  176. type ## _STATE_SAVER
  177. /* Pop a register state pushed by PUSH_STATE, except for the stack pointer,
  178. from the the stack. */
  179. #define POP_STATE(type) \
  180. mov sp, ep; \
  181. type ## _STATE_RESTORER; \
  182. sld.w PTO+PT_GPR(GPR_GP)[ep], gp; \
  183. sld.w PTO+PT_GPR(GPR_LP)[ep], lp; \
  184. sld.w PTO+PT_GPR(GPR_EP)[ep], ep; \
  185. addi STATE_SAVE_SIZE, sp, sp /* Clean up our stack space. */
  186. /* Switch to the kernel stack if necessary, and push register state on the
  187. stack in the form of a state-save-frame. Also load the current task
  188. pointer if switching from user mode. The stack-pointer (r3) should have
  189. already been saved to the memory location SP_SAVE_LOC (the reason for
  190. this is that the interrupt vectors may be beyond a 22-bit signed offset
  191. jump from the actual interrupt handler, and this allows them to save the
  192. stack-pointer and use that register to do an indirect jump). This macro
  193. makes sure that `special' registers, system registers, and the stack
  194. pointer are saved; TYPE identifies the set of extra registers to be
  195. saved as well. SYSCALL_NUM is the register in which the system-call
  196. number this state is for is stored (r0 if this isn't a system call).
  197. Interrupts should already be disabled when calling this. */
  198. #define SAVE_STATE(type, syscall_num, sp_save_loc) \
  199. tst1 0, KM; /* See if already in kernel mode. */ \
  200. bz 1f; \
  201. ld.w sp_save_loc, sp; /* ... yes, use saved SP. */ \
  202. br 2f; \
  203. 1: ld.w KSP, sp; /* ... no, switch to kernel stack. */ \
  204. 2: PUSH_STATE(type); \
  205. ld.b KM, r19; /* Remember old kernel-mode. */ \
  206. sst.w r19, PTO+PT_KERNEL_MODE[ep]; \
  207. ld.w sp_save_loc, r19; /* Remember old SP. */ \
  208. sst.w r19, PTO+PT_GPR(GPR_SP)[ep]; \
  209. mov 1, r19; /* Now definitely in kernel-mode. */ \
  210. st.b r19, KM; \
  211. GET_CURRENT_TASK(CURRENT_TASK); /* Fetch the current task pointer. */ \
  212. /* Save away the syscall number. */ \
  213. sst.w syscall_num, PTO+PT_CUR_SYSCALL[ep]
  214. /* Save register state not normally saved by PUSH_STATE for TYPE, to the
  215. state-save-frame on the stack; also copies SP to EP. r19 may be trashed. */
  216. #define SAVE_EXTRA_STATE(type) \
  217. mov sp, ep; \
  218. type ## _EXTRA_STATE_SAVER
  219. /* Restore register state not normally restored by POP_STATE for TYPE,
  220. from the state-save-frame on the stack; also copies SP to EP.
  221. r19 may be trashed. */
  222. #define RESTORE_EXTRA_STATE(type) \
  223. mov sp, ep; \
  224. type ## _EXTRA_STATE_RESTORER
  225. /* Save any call-clobbered registers not normally saved by PUSH_STATE for
  226. TYPE, to the state-save-frame on the stack.
  227. EP may be trashed, but is not guaranteed to contain a copy of SP
  228. (unlike after most SAVE_... macros). r19 may be trashed. */
  229. #define SAVE_EXTRA_STATE_FOR_SCHEDULE(type) \
  230. type ## _SCHEDULE_EXTRA_STATE_SAVER
  231. /* Restore any call-clobbered registers not normally restored by
  232. POP_STATE for TYPE, to the state-save-frame on the stack.
  233. EP may be trashed, but is not guaranteed to contain a copy of SP
  234. (unlike after most RESTORE_... macros). r19 may be trashed. */
  235. #define RESTORE_EXTRA_STATE_FOR_SCHEDULE(type) \
  236. type ## _SCHEDULE_EXTRA_STATE_RESTORER
  237. /* These are extra_state_saver/restorer values for a user trap. Note
  238. that we save the argument registers so that restarted syscalls will
  239. function properly (otherwise it wouldn't be necessary), and we must
  240. _not_ restore the return-value registers (so that traps can return a
  241. value!), but call-clobbered registers are not saved at all, as the
  242. caller of the syscall function should have saved them. */
  243. #define TRAP_RET reti
  244. /* Traps don't save call-clobbered registers (but do still save arg regs).
  245. We preserve PSw to keep long-term state, namely interrupt status (for traps
  246. from kernel-mode), and the single-step flag (for user traps). */
  247. #define TRAP_STATE_SAVER \
  248. SAVE_ARG_REGS; \
  249. SAVE_PC(EIPC); \
  250. SAVE_PSW(EIPSW)
  251. /* When traps return, they just leave call-clobbered registers (except for arg
  252. regs) with whatever value they have from the kernel. Traps don't preserve
  253. the PSW, but we zero EIPSW to ensure it doesn't contain anything dangerous
  254. (in particular, the single-step flag). */
  255. #define TRAP_STATE_RESTORER \
  256. RESTORE_ARG_REGS; \
  257. RESTORE_PC(EIPC); \
  258. RESTORE_PSW(EIPSW)
  259. /* Save registers not normally saved by traps. We need to save r12, even
  260. though it's nominally call-clobbered, because it's used when restarting
  261. a system call (the signal-handling path uses SAVE_EXTRA_STATE, and
  262. expects r12 to be restored when the trap returns). */
  263. #define TRAP_EXTRA_STATE_SAVER \
  264. SAVE_RVAL_REGS; \
  265. sst.w r12, PTO+PT_GPR(12)[ep]; \
  266. SAVE_CALL_SAVED_REGS; \
  267. SAVE_CT_REGS
  268. #define TRAP_EXTRA_STATE_RESTORER \
  269. RESTORE_RVAL_REGS; \
  270. sld.w PTO+PT_GPR(12)[ep], r12; \
  271. RESTORE_CALL_SAVED_REGS; \
  272. RESTORE_CT_REGS
  273. /* Save registers prior to calling scheduler (just before trap returns).
  274. We have to save the return-value registers to preserve the trap's return
  275. value. Note that ..._SCHEDULE_EXTRA_STATE_SAVER, unlike most ..._SAVER
  276. macros, is required to setup EP itself if EP is needed (this is because
  277. in many cases, the macro is empty). */
  278. #define TRAP_SCHEDULE_EXTRA_STATE_SAVER \
  279. mov sp, ep; \
  280. SAVE_RVAL_REGS
  281. /* Note that ..._SCHEDULE_EXTRA_STATE_RESTORER, unlike most ..._RESTORER
  282. macros, is required to setup EP itself if EP is needed (this is because
  283. in many cases, the macro is empty). */
  284. #define TRAP_SCHEDULE_EXTRA_STATE_RESTORER \
  285. mov sp, ep; \
  286. RESTORE_RVAL_REGS
  287. /* Register saving/restoring for maskable interrupts. */
  288. #define IRQ_RET reti
  289. #define IRQ_STATE_SAVER \
  290. SAVE_CALL_CLOBBERED_REGS; \
  291. SAVE_PC(EIPC); \
  292. SAVE_PSW(EIPSW)
  293. #define IRQ_STATE_RESTORER \
  294. RESTORE_CALL_CLOBBERED_REGS; \
  295. RESTORE_PC(EIPC); \
  296. RESTORE_PSW(EIPSW)
  297. #define IRQ_EXTRA_STATE_SAVER \
  298. SAVE_CALL_SAVED_REGS; \
  299. SAVE_CT_REGS
  300. #define IRQ_EXTRA_STATE_RESTORER \
  301. RESTORE_CALL_SAVED_REGS; \
  302. RESTORE_CT_REGS
  303. #define IRQ_SCHEDULE_EXTRA_STATE_SAVER /* nothing */
  304. #define IRQ_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */
  305. /* Register saving/restoring for non-maskable interrupts. */
  306. #define NMI_RET reti
  307. #define NMI_STATE_SAVER \
  308. SAVE_CALL_CLOBBERED_REGS; \
  309. SAVE_PC(FEPC); \
  310. SAVE_PSW(FEPSW);
  311. #define NMI_STATE_RESTORER \
  312. RESTORE_CALL_CLOBBERED_REGS; \
  313. RESTORE_PC(FEPC); \
  314. RESTORE_PSW(FEPSW);
  315. #define NMI_EXTRA_STATE_SAVER \
  316. SAVE_CALL_SAVED_REGS; \
  317. SAVE_CT_REGS
  318. #define NMI_EXTRA_STATE_RESTORER \
  319. RESTORE_CALL_SAVED_REGS; \
  320. RESTORE_CT_REGS
  321. #define NMI_SCHEDULE_EXTRA_STATE_SAVER /* nothing */
  322. #define NMI_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */
  323. /* Register saving/restoring for debug traps. */
  324. #define DBTRAP_RET .long 0x014607E0 /* `dbret', but gas doesn't support it. */
  325. #define DBTRAP_STATE_SAVER \
  326. SAVE_CALL_CLOBBERED_REGS; \
  327. SAVE_PC(DBPC); \
  328. SAVE_PSW(DBPSW)
  329. #define DBTRAP_STATE_RESTORER \
  330. RESTORE_CALL_CLOBBERED_REGS; \
  331. RESTORE_PC(DBPC); \
  332. RESTORE_PSW(DBPSW)
  333. #define DBTRAP_EXTRA_STATE_SAVER \
  334. SAVE_CALL_SAVED_REGS; \
  335. SAVE_CT_REGS
  336. #define DBTRAP_EXTRA_STATE_RESTORER \
  337. RESTORE_CALL_SAVED_REGS; \
  338. RESTORE_CT_REGS
  339. #define DBTRAP_SCHEDULE_EXTRA_STATE_SAVER /* nothing */
  340. #define DBTRAP_SCHEDULE_EXTRA_STATE_RESTORER /* nothing */
  341. /* Register saving/restoring for a context switch. We don't need to save
  342. too many registers, because context-switching looks like a function call
  343. (via the function `switch_thread'), so callers will save any
  344. call-clobbered registers themselves. We do need to save the CT regs, as
  345. they're normally not saved during kernel entry (the kernel doesn't use
  346. them). We save PSW so that interrupt-status state will correctly follow
  347. each thread (mostly NMI vs. normal-IRQ/trap), though for the most part
  348. it doesn't matter since threads are always in almost exactly the same
  349. processor state during a context switch. The stack pointer and return
  350. value are handled by switch_thread itself. */
  351. #define SWITCH_STATE_SAVER \
  352. SAVE_CALL_SAVED_REGS; \
  353. SAVE_PSW(PSW); \
  354. SAVE_CT_REGS
  355. #define SWITCH_STATE_RESTORER \
  356. RESTORE_CALL_SAVED_REGS; \
  357. RESTORE_PSW(PSW); \
  358. RESTORE_CT_REGS
  359. /* Restore register state from the state-save-frame on the stack, switch back
  360. to the user stack if necessary, and return from the trap/interrupt.
  361. EXTRA_STATE_RESTORER is a sequence of assembly language statements to
  362. restore anything not restored by this macro. Only registers not saved by
  363. the C compiler are restored (that is, R3(sp), R4(gp), R31(lp), and
  364. anything restored by EXTRA_STATE_RESTORER). */
  365. #define RETURN(type) \
  366. ld.b PTO+PT_KERNEL_MODE[sp], r19; \
  367. di; /* Disable interrupts */ \
  368. cmp r19, r0; /* See if returning to kernel mode, */\
  369. bne 2f; /* ... if so, skip resched &c. */ \
  370. \
  371. /* We're returning to user mode, so check for various conditions that \
  372. trigger rescheduling. */ \
  373. GET_CURRENT_THREAD(r18); \
  374. ld.w TI_FLAGS[r18], r19; \
  375. andi _TIF_NEED_RESCHED, r19, r0; \
  376. bnz 3f; /* Call the scheduler. */ \
  377. 5: andi _TIF_SIGPENDING, r19, r18; \
  378. ld.w TASK_PTRACE[CURRENT_TASK], r19; /* ptrace flags */ \
  379. or r18, r19; /* see if either is non-zero */ \
  380. bnz 4f; /* if so, handle them */ \
  381. \
  382. /* Return to user state. */ \
  383. 1: st.b r0, KM; /* Now officially in user state. */ \
  384. \
  385. /* Final return. The stack-pointer fiddling is not needed when returning \
  386. to kernel-mode, but they don't hurt, and this way we can share the \
  387. (sometimes rather lengthy) POP_STATE macro. */ \
  388. 2: POP_STATE(type); \
  389. st.w sp, KSP; /* Save the kernel stack pointer. */ \
  390. ld.w PT_GPR(GPR_SP)-PT_SIZE[sp], sp; /* Restore stack pointer. */ \
  391. type ## _RET; /* Return from the trap/interrupt. */ \
  392. \
  393. /* Call the scheduler before returning from a syscall/trap. */ \
  394. 3: SAVE_EXTRA_STATE_FOR_SCHEDULE(type); /* Prepare to call scheduler. */ \
  395. jarl call_scheduler, lp; /* Call scheduler */ \
  396. di; /* The scheduler enables interrupts */\
  397. RESTORE_EXTRA_STATE_FOR_SCHEDULE(type); \
  398. GET_CURRENT_THREAD(r18); \
  399. ld.w TI_FLAGS[r18], r19; \
  400. br 5b; /* Continue with return path. */ \
  401. \
  402. /* Handle a signal or ptraced process return. \
  403. r18 should be non-zero if there are pending signals. */ \
  404. 4: /* Not all registers are saved by the normal trap/interrupt entry \
  405. points (for instance, call-saved registers (because the normal \
  406. C-compiler calling sequence in the kernel makes sure they're \
  407. preserved), and call-clobbered registers in the case of \
  408. traps), but signal handlers may want to examine or change the \
  409. complete register state. Here we save anything not saved by \
  410. the normal entry sequence, so that it may be safely restored \
  411. (in a possibly modified form) after do_signal returns. */ \
  412. SAVE_EXTRA_STATE(type); /* Save state not saved by entry. */ \
  413. jarl handle_signal_or_ptrace_return, lp; \
  414. RESTORE_EXTRA_STATE(type); /* Restore extra regs. */ \
  415. br 1b
  416. /* Jump to the appropriate function for the system call number in r12
  417. (r12 is not preserved), or return an error if r12 is not valid. The
  418. LP register should point to the location where the called function
  419. should return. [note that MAKE_SYS_CALL uses label 1] */
  420. #define MAKE_SYS_CALL \
  421. /* Figure out which function to use for this system call. */ \
  422. shl 2, r12; \
  423. /* See if the system call number is valid. */ \
  424. addi lo(CSYM(sys_call_table) - sys_call_table_end), r12, r0; \
  425. bnh 1f; \
  426. mov hilo(CSYM(sys_call_table)), r19; \
  427. add r19, r12; \
  428. ld.w 0[r12], r12; \
  429. /* Make the system call. */ \
  430. jmp [r12]; \
  431. /* The syscall number is invalid, return an error. */ \
  432. 1: addi -ENOSYS, r0, r10; \
  433. jmp [lp]
  434. .text
  435. /*
  436. * User trap.
  437. *
  438. * Trap 0 system calls are also handled here.
  439. *
  440. * The stack-pointer (r3) should have already been saved to the memory
  441. * location ENTRY_SP (the reason for this is that the interrupt vectors may be
  442. * beyond a 22-bit signed offset jump from the actual interrupt handler, and
  443. * this allows them to save the stack-pointer and use that register to do an
  444. * indirect jump).
  445. *
  446. * Syscall protocol:
  447. * Syscall number in r12, args in r6-r9
  448. * Return value in r10
  449. */
  450. G_ENTRY(trap):
  451. SAVE_STATE (TRAP, r12, ENTRY_SP) // Save registers.
  452. stsr SR_ECR, r19 // Find out which trap it was.
  453. ei // Enable interrupts.
  454. mov hilo(ret_from_trap), lp // where the trap should return
  455. // The following two shifts (1) clear out extraneous NMI data in the
  456. // upper 16-bits, (2) convert the 0x40 - 0x5f range of trap ECR
  457. // numbers into the (0-31) << 2 range we want, (3) set the flags.
  458. shl 27, r19 // chop off all high bits
  459. shr 25, r19 // scale back down and then << 2
  460. bnz 2f // See if not trap 0.
  461. // Trap 0 is a `short' system call, skip general trap table.
  462. MAKE_SYS_CALL // Jump to the syscall function.
  463. 2: // For other traps, use a table lookup.
  464. mov hilo(CSYM(trap_table)), r18
  465. add r19, r18
  466. ld.w 0[r18], r18
  467. jmp [r18] // Jump to the trap handler.
  468. END(trap)
  469. /* This is just like ret_from_trap, but first restores extra registers
  470. saved by some wrappers. */
  471. L_ENTRY(restore_extra_regs_and_ret_from_trap):
  472. RESTORE_EXTRA_STATE(TRAP)
  473. // fall through
  474. END(restore_extra_regs_and_ret_from_trap)
  475. /* Entry point used to return from a syscall/trap. */
  476. L_ENTRY(ret_from_trap):
  477. RETURN(TRAP)
  478. END(ret_from_trap)
  479. /* This the initial entry point for a new child thread, with an appropriate
  480. stack in place that makes it look the the child is in the middle of an
  481. syscall. This function is actually `returned to' from switch_thread
  482. (copy_thread makes ret_from_fork the return address in each new thread's
  483. saved context). */
  484. C_ENTRY(ret_from_fork):
  485. mov r10, r6 // switch_thread returns the prev task.
  486. jarl CSYM(schedule_tail), lp // ...which is schedule_tail's arg
  487. mov r0, r10 // Child's fork call should return 0.
  488. br ret_from_trap // Do normal trap return.
  489. C_END(ret_from_fork)
  490. /*
  491. * Trap 1: `long' system calls
  492. * `Long' syscall protocol:
  493. * Syscall number in r12, args in r6-r9, r13-r14
  494. * Return value in r10
  495. */
  496. L_ENTRY(syscall_long):
  497. // Push extra arguments on the stack. Note that by default, the trap
  498. // handler reserves enough stack space for 6 arguments, so we don't
  499. // have to make any additional room.
  500. st.w r13, 16[sp] // arg 5
  501. st.w r14, 20[sp] // arg 6
  502. // Make sure r13 and r14 are preserved, in case we have to restart a
  503. // system call because of a signal (ep has already been set by caller).
  504. st.w r13, PTO+PT_GPR(13)[sp]
  505. st.w r14, PTO+PT_GPR(13)[sp]
  506. mov hilo(ret_from_long_syscall), lp
  507. MAKE_SYS_CALL // Jump to the syscall function.
  508. END(syscall_long)
  509. /* Entry point used to return from a long syscall. Only needed to restore
  510. r13/r14 if the general trap mechanism doesnt' do so. */
  511. L_ENTRY(ret_from_long_syscall):
  512. ld.w PTO+PT_GPR(13)[sp], r13 // Restore the extra registers
  513. ld.w PTO+PT_GPR(13)[sp], r14
  514. br ret_from_trap // The rest is the same as other traps
  515. END(ret_from_long_syscall)
  516. /* These syscalls need access to the struct pt_regs on the stack, so we
  517. implement them in assembly (they're basically all wrappers anyway). */
  518. L_ENTRY(sys_fork_wrapper):
  519. #ifdef CONFIG_MMU
  520. addi SIGCHLD, r0, r6 // Arg 0: flags
  521. ld.w PTO+PT_GPR(GPR_SP)[sp], r7 // Arg 1: child SP (use parent's)
  522. movea PTO, sp, r8 // Arg 2: parent context
  523. mov r0, r9 // Arg 3/4/5: 0
  524. st.w r0, 16[sp]
  525. st.w r0, 20[sp]
  526. mov hilo(CSYM(do_fork)), r18 // Where the real work gets done
  527. br save_extra_state_tramp // Save state and go there
  528. #else
  529. // fork almost works, enough to trick you into looking elsewhere :-(
  530. addi -EINVAL, r0, r10
  531. jmp [lp]
  532. #endif
  533. END(sys_fork_wrapper)
  534. L_ENTRY(sys_vfork_wrapper):
  535. addi CLONE_VFORK | CLONE_VM | SIGCHLD, r0, r6 // Arg 0: flags
  536. ld.w PTO+PT_GPR(GPR_SP)[sp], r7 // Arg 1: child SP (use parent's)
  537. movea PTO, sp, r8 // Arg 2: parent context
  538. mov r0, r9 // Arg 3/4/5: 0
  539. st.w r0, 16[sp]
  540. st.w r0, 20[sp]
  541. mov hilo(CSYM(do_fork)), r18 // Where the real work gets done
  542. br save_extra_state_tramp // Save state and go there
  543. END(sys_vfork_wrapper)
  544. L_ENTRY(sys_clone_wrapper):
  545. ld.w PTO+PT_GPR(GPR_SP)[sp], r19// parent's stack pointer
  546. cmp r7, r0 // See if child SP arg (arg 1) is 0.
  547. cmov z, r19, r7, r7 // ... and use the parent's if so.
  548. movea PTO, sp, r8 // Arg 2: parent context
  549. mov r0, r9 // Arg 3/4/5: 0
  550. st.w r0, 16[sp]
  551. st.w r0, 20[sp]
  552. mov hilo(CSYM(do_fork)), r18 // Where the real work gets done
  553. br save_extra_state_tramp // Save state and go there
  554. END(sys_clone_wrapper)
  555. L_ENTRY(sys_execve_wrapper):
  556. movea PTO, sp, r9 // add user context as 4th arg
  557. jr CSYM(sys_execve) // Do real work (tail-call).
  558. END(sys_execve_wrapper)
  559. L_ENTRY(sys_sigsuspend_wrapper):
  560. movea PTO, sp, r7 // add user context as 2nd arg
  561. mov hilo(CSYM(sys_sigsuspend)), r18 // syscall function
  562. jarl save_extra_state_tramp, lp // Save state and do it
  563. br restore_extra_regs_and_ret_from_trap
  564. END(sys_sigsuspend_wrapper)
  565. L_ENTRY(sys_rt_sigsuspend_wrapper):
  566. movea PTO, sp, r8 // add user context as 3rd arg
  567. mov hilo(CSYM(sys_rt_sigsuspend)), r18 // syscall function
  568. jarl save_extra_state_tramp, lp // Save state and do it
  569. br restore_extra_regs_and_ret_from_trap
  570. END(sys_rt_sigsuspend_wrapper)
  571. L_ENTRY(sys_sigreturn_wrapper):
  572. movea PTO, sp, r6 // add user context as 1st arg
  573. mov hilo(CSYM(sys_sigreturn)), r18 // syscall function
  574. jarl save_extra_state_tramp, lp // Save state and do it
  575. br restore_extra_regs_and_ret_from_trap
  576. END(sys_sigreturn_wrapper)
  577. L_ENTRY(sys_rt_sigreturn_wrapper):
  578. movea PTO, sp, r6 // add user context as 1st arg
  579. mov hilo(CSYM(sys_rt_sigreturn)), r18// syscall function
  580. jarl save_extra_state_tramp, lp // Save state and do it
  581. br restore_extra_regs_and_ret_from_trap
  582. END(sys_rt_sigreturn_wrapper)
  583. /* Save any state not saved by SAVE_STATE(TRAP), and jump to r18.
  584. It's main purpose is to share the rather lengthy code sequence that
  585. SAVE_STATE expands into among the above wrapper functions. */
  586. L_ENTRY(save_extra_state_tramp):
  587. SAVE_EXTRA_STATE(TRAP) // Save state not saved by entry.
  588. jmp [r18] // Do the work the caller wants
  589. END(save_extra_state_tramp)
  590. /*
  591. * Hardware maskable interrupts.
  592. *
  593. * The stack-pointer (r3) should have already been saved to the memory
  594. * location ENTRY_SP (the reason for this is that the interrupt vectors may be
  595. * beyond a 22-bit signed offset jump from the actual interrupt handler, and
  596. * this allows them to save the stack-pointer and use that register to do an
  597. * indirect jump).
  598. */
  599. G_ENTRY(irq):
  600. SAVE_STATE (IRQ, r0, ENTRY_SP) // Save registers.
  601. stsr SR_ECR, r6 // Find out which interrupt it was.
  602. movea PTO, sp, r7 // User regs are arg2
  603. // All v850 implementations I know about encode their interrupts as
  604. // multiples of 0x10, starting at 0x80 (after NMIs and software
  605. // interrupts). Convert this number into a simple IRQ index for the
  606. // rest of the kernel. We also clear the upper 16 bits, which hold
  607. // NMI info, and don't appear to be cleared when a NMI returns.
  608. shl 16, r6 // clear upper 16 bits
  609. shr 20, r6 // shift back, and remove lower nibble
  610. add -8, r6 // remove bias for irqs
  611. // Call the high-level interrupt handling code.
  612. jarl CSYM(handle_irq), lp
  613. RETURN(IRQ)
  614. END(irq)
  615. /*
  616. * Debug trap / illegal-instruction exception
  617. *
  618. * The stack-pointer (r3) should have already been saved to the memory
  619. * location ENTRY_SP (the reason for this is that the interrupt vectors may be
  620. * beyond a 22-bit signed offset jump from the actual interrupt handler, and
  621. * this allows them to save the stack-pointer and use that register to do an
  622. * indirect jump).
  623. */
  624. G_ENTRY(dbtrap):
  625. SAVE_STATE (DBTRAP, r0, ENTRY_SP)// Save registers.
  626. /* First see if we came from kernel mode; if so, the dbtrap
  627. instruction has a special meaning, to set the DIR (`debug
  628. information register') register. This is because the DIR register
  629. can _only_ be manipulated/read while in `debug mode,' and debug
  630. mode is only active while we're inside the dbtrap handler. The
  631. exact functionality is: { DIR = (DIR | r6) & ~r7; return DIR; }. */
  632. ld.b PTO+PT_KERNEL_MODE[sp], r19
  633. cmp r19, r0
  634. bz 1f
  635. stsr SR_DIR, r10
  636. or r6, r10
  637. not r7, r7
  638. and r7, r10
  639. ldsr r10, SR_DIR
  640. stsr SR_DIR, r10 // Confirm the value we set
  641. st.w r10, PTO+PT_GPR(10)[sp] // return it
  642. br 3f
  643. 1: ei // Enable interrupts.
  644. /* The default signal type we raise. */
  645. mov SIGTRAP, r6
  646. /* See if it's a single-step trap. */
  647. stsr SR_DBPSW, r19
  648. andi 0x0800, r19, r19
  649. bnz 2f
  650. /* Look to see if the preceding instruction was is a dbtrap or not,
  651. to decide which signal we should use. */
  652. stsr SR_DBPC, r19 // PC following trapping insn
  653. ld.hu -2[r19], r19
  654. ori 0xf840, r0, r20 // DBTRAP insn
  655. cmp r19, r20 // Was this trap caused by DBTRAP?
  656. cmov ne, SIGILL, r6, r6 // Choose signal appropriately
  657. /* Raise the desired signal. */
  658. 2: mov CURRENT_TASK, r7 // Arg 1: task
  659. jarl CSYM(send_sig), lp // tail call
  660. 3: RETURN(DBTRAP)
  661. END(dbtrap)
  662. /*
  663. * Hardware non-maskable interrupts.
  664. *
  665. * The stack-pointer (r3) should have already been saved to the memory
  666. * location ENTRY_SP (the reason for this is that the interrupt vectors may be
  667. * beyond a 22-bit signed offset jump from the actual interrupt handler, and
  668. * this allows them to save the stack-pointer and use that register to do an
  669. * indirect jump).
  670. */
  671. G_ENTRY(nmi):
  672. SAVE_STATE (NMI, r0, NMI_ENTRY_SP); /* Save registers. */
  673. stsr SR_ECR, r6; /* Find out which nmi it was. */
  674. shr 20, r6; /* Extract NMI code in bits 20-24. */
  675. movea PTO, sp, r7; /* User regs are arg2. */
  676. /* Non-maskable interrupts always lie right after maskable interrupts.
  677. Call the generic IRQ handler, with two arguments, the IRQ number,
  678. and a pointer to the user registers, to handle the specifics.
  679. (we subtract one because the first NMI has code 1). */
  680. addi FIRST_NMI - 1, r6, r6
  681. jarl CSYM(handle_irq), lp
  682. RETURN(NMI)
  683. END(nmi)
  684. /*
  685. * Trap with no handler
  686. */
  687. L_ENTRY(bad_trap_wrapper):
  688. mov r19, r6 // Arg 0: trap number
  689. movea PTO, sp, r7 // Arg 1: user regs
  690. jr CSYM(bad_trap) // tail call handler
  691. END(bad_trap_wrapper)
  692. /*
  693. * Invoke the scheduler, called from the trap/irq kernel exit path.
  694. *
  695. * This basically just calls `schedule', but also arranges for extra
  696. * registers to be saved for ptrace'd processes, so ptrace can modify them.
  697. */
  698. L_ENTRY(call_scheduler):
  699. ld.w TASK_PTRACE[CURRENT_TASK], r19 // See if task is ptrace'd
  700. cmp r19, r0
  701. bnz 1f // ... yes, do special stuff
  702. jr CSYM(schedule) // ... no, just tail-call scheduler
  703. // Save extra regs for ptrace'd task. We want to save anything
  704. // that would otherwise only be `implicitly' saved by the normal
  705. // compiler calling-convention.
  706. 1: mov sp, ep // Setup EP for SAVE_CALL_SAVED_REGS
  707. SAVE_CALL_SAVED_REGS // Save call-saved registers to stack
  708. mov lp, r20 // Save LP in a callee-saved register
  709. jarl CSYM(schedule), lp // Call scheduler
  710. mov r20, lp
  711. mov sp, ep // We can't rely on EP after return
  712. RESTORE_CALL_SAVED_REGS // Restore (possibly modified) regs
  713. jmp [lp] // Return to the return path
  714. END(call_scheduler)
  715. /*
  716. * This is an out-of-line handler for two special cases during the kernel
  717. * trap/irq exit sequence:
  718. *
  719. * (1) If r18 is non-zero then a signal needs to be handled, which is
  720. * done, and then the caller returned to.
  721. *
  722. * (2) If r18 is non-zero then we're returning to a ptraced process, which
  723. * has several special cases -- single-stepping and trap tracing, both
  724. * of which require using the `dbret' instruction to exit the kernel
  725. * instead of the normal `reti' (this is because the CPU not correctly
  726. * single-step after a reti). In this case, of course, this handler
  727. * never returns to the caller.
  728. *
  729. * In either case, all registers should have been saved to the current
  730. * state-save-frame on the stack, except for callee-saved registers.
  731. *
  732. * [These two different cases are combined merely to avoid bloating the
  733. * macro-inlined code, not because they really make much sense together!]
  734. */
  735. L_ENTRY(handle_signal_or_ptrace_return):
  736. cmp r18, r0 // See if handling a signal
  737. bz 1f // ... nope, go do ptrace return
  738. // Handle a signal
  739. mov lp, r20 // Save link-pointer
  740. mov r10, r21 // Save return-values (for trap)
  741. mov r11, r22
  742. movea PTO, sp, r6 // Arg 1: struct pt_regs *regs
  743. mov r0, r7 // Arg 2: sigset_t *oldset
  744. jarl CSYM(do_signal), lp // Handle the signal
  745. di // sig handling enables interrupts
  746. mov r20, lp // Restore link-pointer
  747. mov r21, r10 // Restore return-values (for trap)
  748. mov r22, r11
  749. ld.w TASK_PTRACE[CURRENT_TASK], r19 // check ptrace flags too
  750. cmp r19, r0
  751. bnz 1f // ... some set, so look more
  752. 2: jmp [lp] // ... none set, so return normally
  753. // ptrace return
  754. 1: ld.w PTO+PT_PSW[sp], r19 // Look at user-processes's flags
  755. andi 0x0800, r19, r19 // See if single-step flag is set
  756. bz 2b // ... nope, return normally
  757. // Return as if from a dbtrap insn
  758. st.b r0, KM // Now officially in user state.
  759. POP_STATE(DBTRAP) // Restore regs
  760. st.w sp, KSP // Save the kernel stack pointer.
  761. ld.w PT_GPR(GPR_SP)-PT_SIZE[sp], sp // Restore user stack pointer.
  762. DBTRAP_RET // Return from the trap/interrupt.
  763. END(handle_signal_or_ptrace_return)
  764. /*
  765. * This is where we switch between two threads. The arguments are:
  766. * r6 -- pointer to the struct thread for the `current' process
  767. * r7 -- pointer to the struct thread for the `new' process.
  768. * when this function returns, it will return to the new thread.
  769. */
  770. C_ENTRY(switch_thread):
  771. // Return the previous task (r10 is not clobbered by restore below)
  772. mov CURRENT_TASK, r10
  773. // First, push the current processor state on the stack
  774. PUSH_STATE(SWITCH)
  775. // Now save the location of the kernel stack pointer for this thread;
  776. // since we've pushed all other state on the stack, this is enough to
  777. // restore it all later.
  778. st.w sp, THREAD_KSP[r6]
  779. // Now restore the stack pointer from the new process
  780. ld.w THREAD_KSP[r7], sp
  781. // ... and restore all state from that
  782. POP_STATE(SWITCH)
  783. // Update the current task pointer
  784. GET_CURRENT_TASK(CURRENT_TASK)
  785. // Now return into the new thread
  786. jmp [lp]
  787. C_END(switch_thread)
  788. .data
  789. .align 4
  790. C_DATA(trap_table):
  791. .long bad_trap_wrapper // trap 0, doesn't use trap table.
  792. .long syscall_long // trap 1, `long' syscall.
  793. .long bad_trap_wrapper
  794. .long bad_trap_wrapper
  795. .long bad_trap_wrapper
  796. .long bad_trap_wrapper
  797. .long bad_trap_wrapper
  798. .long bad_trap_wrapper
  799. .long bad_trap_wrapper
  800. .long bad_trap_wrapper
  801. .long bad_trap_wrapper
  802. .long bad_trap_wrapper
  803. .long bad_trap_wrapper
  804. .long bad_trap_wrapper
  805. .long bad_trap_wrapper
  806. .long bad_trap_wrapper
  807. C_END(trap_table)
  808. .section .rodata
  809. .align 4
  810. C_DATA(sys_call_table):
  811. .long CSYM(sys_restart_syscall) // 0
  812. .long CSYM(sys_exit)
  813. .long sys_fork_wrapper
  814. .long CSYM(sys_read)
  815. .long CSYM(sys_write)
  816. .long CSYM(sys_open) // 5
  817. .long CSYM(sys_close)
  818. .long CSYM(sys_waitpid)
  819. .long CSYM(sys_creat)
  820. .long CSYM(sys_link)
  821. .long CSYM(sys_unlink) // 10
  822. .long sys_execve_wrapper
  823. .long CSYM(sys_chdir)
  824. .long CSYM(sys_time)
  825. .long CSYM(sys_mknod)
  826. .long CSYM(sys_chmod) // 15
  827. .long CSYM(sys_chown)
  828. .long CSYM(sys_ni_syscall) // was: break
  829. .long CSYM(sys_ni_syscall) // was: oldstat (aka stat)
  830. .long CSYM(sys_lseek)
  831. .long CSYM(sys_getpid) // 20
  832. .long CSYM(sys_mount)
  833. .long CSYM(sys_oldumount)
  834. .long CSYM(sys_setuid)
  835. .long CSYM(sys_getuid)
  836. .long CSYM(sys_stime) // 25
  837. .long CSYM(sys_ptrace)
  838. .long CSYM(sys_alarm)
  839. .long CSYM(sys_ni_syscall) // was: oldfstat (aka fstat)
  840. .long CSYM(sys_pause)
  841. .long CSYM(sys_utime) // 30
  842. .long CSYM(sys_ni_syscall) // was: stty
  843. .long CSYM(sys_ni_syscall) // was: gtty
  844. .long CSYM(sys_access)
  845. .long CSYM(sys_nice)
  846. .long CSYM(sys_ni_syscall) // 35, was: ftime
  847. .long CSYM(sys_sync)
  848. .long CSYM(sys_kill)
  849. .long CSYM(sys_rename)
  850. .long CSYM(sys_mkdir)
  851. .long CSYM(sys_rmdir) // 40
  852. .long CSYM(sys_dup)
  853. .long CSYM(sys_pipe)
  854. .long CSYM(sys_times)
  855. .long CSYM(sys_ni_syscall) // was: prof
  856. .long CSYM(sys_brk) // 45
  857. .long CSYM(sys_setgid)
  858. .long CSYM(sys_getgid)
  859. .long CSYM(sys_signal)
  860. .long CSYM(sys_geteuid)
  861. .long CSYM(sys_getegid) // 50
  862. .long CSYM(sys_acct)
  863. .long CSYM(sys_umount) // recycled never used phys()
  864. .long CSYM(sys_ni_syscall) // was: lock
  865. .long CSYM(sys_ioctl)
  866. .long CSYM(sys_fcntl) // 55
  867. .long CSYM(sys_ni_syscall) // was: mpx
  868. .long CSYM(sys_setpgid)
  869. .long CSYM(sys_ni_syscall) // was: ulimit
  870. .long CSYM(sys_ni_syscall)
  871. .long CSYM(sys_umask) // 60
  872. .long CSYM(sys_chroot)
  873. .long CSYM(sys_ustat)
  874. .long CSYM(sys_dup2)
  875. .long CSYM(sys_getppid)
  876. .long CSYM(sys_getpgrp) // 65
  877. .long CSYM(sys_setsid)
  878. .long CSYM(sys_sigaction)
  879. .long CSYM(sys_sgetmask)
  880. .long CSYM(sys_ssetmask)
  881. .long CSYM(sys_setreuid) // 70
  882. .long CSYM(sys_setregid)
  883. .long sys_sigsuspend_wrapper
  884. .long CSYM(sys_sigpending)
  885. .long CSYM(sys_sethostname)
  886. .long CSYM(sys_setrlimit) // 75
  887. .long CSYM(sys_getrlimit)
  888. .long CSYM(sys_getrusage)
  889. .long CSYM(sys_gettimeofday)
  890. .long CSYM(sys_settimeofday)
  891. .long CSYM(sys_getgroups) // 80
  892. .long CSYM(sys_setgroups)
  893. .long CSYM(sys_select)
  894. .long CSYM(sys_symlink)
  895. .long CSYM(sys_ni_syscall) // was: oldlstat (aka lstat)
  896. .long CSYM(sys_readlink) // 85
  897. .long CSYM(sys_uselib)
  898. .long CSYM(sys_swapon)
  899. .long CSYM(sys_reboot)
  900. .long CSYM(old_readdir)
  901. .long CSYM(sys_mmap) // 90
  902. .long CSYM(sys_munmap)
  903. .long CSYM(sys_truncate)
  904. .long CSYM(sys_ftruncate)
  905. .long CSYM(sys_fchmod)
  906. .long CSYM(sys_fchown) // 95
  907. .long CSYM(sys_getpriority)
  908. .long CSYM(sys_setpriority)
  909. .long CSYM(sys_ni_syscall) // was: profil
  910. .long CSYM(sys_statfs)
  911. .long CSYM(sys_fstatfs) // 100
  912. .long CSYM(sys_ni_syscall) // i386: ioperm
  913. .long CSYM(sys_socketcall)
  914. .long CSYM(sys_syslog)
  915. .long CSYM(sys_setitimer)
  916. .long CSYM(sys_getitimer) // 105
  917. .long CSYM(sys_newstat)
  918. .long CSYM(sys_newlstat)
  919. .long CSYM(sys_newfstat)
  920. .long CSYM(sys_ni_syscall) // was: olduname (aka uname)
  921. .long CSYM(sys_ni_syscall) // 110, i386: iopl
  922. .long CSYM(sys_vhangup)
  923. .long CSYM(sys_ni_syscall) // was: idle
  924. .long CSYM(sys_ni_syscall) // i386: vm86old
  925. .long CSYM(sys_wait4)
  926. .long CSYM(sys_swapoff) // 115
  927. .long CSYM(sys_sysinfo)
  928. .long CSYM(sys_ipc)
  929. .long CSYM(sys_fsync)
  930. .long sys_sigreturn_wrapper
  931. .long sys_clone_wrapper // 120
  932. .long CSYM(sys_setdomainname)
  933. .long CSYM(sys_newuname)
  934. .long CSYM(sys_ni_syscall) // i386: modify_ldt, m68k: cacheflush
  935. .long CSYM(sys_adjtimex)
  936. .long CSYM(sys_ni_syscall) // 125 - sys_mprotect
  937. .long CSYM(sys_sigprocmask)
  938. .long CSYM(sys_ni_syscall) // sys_create_module
  939. .long CSYM(sys_init_module)
  940. .long CSYM(sys_delete_module)
  941. .long CSYM(sys_ni_syscall) // 130 - sys_get_kernel_syms
  942. .long CSYM(sys_quotactl)
  943. .long CSYM(sys_getpgid)
  944. .long CSYM(sys_fchdir)
  945. .long CSYM(sys_bdflush)
  946. .long CSYM(sys_sysfs) // 135
  947. .long CSYM(sys_personality)
  948. .long CSYM(sys_ni_syscall) // for afs_syscall
  949. .long CSYM(sys_setfsuid)
  950. .long CSYM(sys_setfsgid)
  951. .long CSYM(sys_llseek) // 140
  952. .long CSYM(sys_getdents)
  953. .long CSYM(sys_select) // for backward compat; remove someday
  954. .long CSYM(sys_flock)
  955. .long CSYM(sys_ni_syscall) // sys_msync
  956. .long CSYM(sys_readv) // 145
  957. .long CSYM(sys_writev)
  958. .long CSYM(sys_getsid)
  959. .long CSYM(sys_fdatasync)
  960. .long CSYM(sys_sysctl)
  961. .long CSYM(sys_ni_syscall) // 150 - sys_mlock
  962. .long CSYM(sys_ni_syscall) // sys_munlock
  963. .long CSYM(sys_ni_syscall) // sys_mlockall
  964. .long CSYM(sys_ni_syscall) // sys_munlockall
  965. .long CSYM(sys_sched_setparam)
  966. .long CSYM(sys_sched_getparam) // 155
  967. .long CSYM(sys_sched_setscheduler)
  968. .long CSYM(sys_sched_getscheduler)
  969. .long CSYM(sys_sched_yield)
  970. .long CSYM(sys_sched_get_priority_max)
  971. .long CSYM(sys_sched_get_priority_min) // 160
  972. .long CSYM(sys_sched_rr_get_interval)
  973. .long CSYM(sys_nanosleep)
  974. .long CSYM(sys_ni_syscall) // sys_mremap
  975. .long CSYM(sys_setresuid)
  976. .long CSYM(sys_getresuid) // 165
  977. .long CSYM(sys_ni_syscall) // for vm86
  978. .long CSYM(sys_ni_syscall) // sys_query_module
  979. .long CSYM(sys_poll)
  980. .long CSYM(sys_nfsservctl)
  981. .long CSYM(sys_setresgid) // 170
  982. .long CSYM(sys_getresgid)
  983. .long CSYM(sys_prctl)
  984. .long sys_rt_sigreturn_wrapper
  985. .long CSYM(sys_rt_sigaction)
  986. .long CSYM(sys_rt_sigprocmask) // 175
  987. .long CSYM(sys_rt_sigpending)
  988. .long CSYM(sys_rt_sigtimedwait)
  989. .long CSYM(sys_rt_sigqueueinfo)
  990. .long sys_rt_sigsuspend_wrapper
  991. .long CSYM(sys_pread64) // 180
  992. .long CSYM(sys_pwrite64)
  993. .long CSYM(sys_lchown)
  994. .long CSYM(sys_getcwd)
  995. .long CSYM(sys_capget)
  996. .long CSYM(sys_capset) // 185
  997. .long CSYM(sys_sigaltstack)
  998. .long CSYM(sys_sendfile)
  999. .long CSYM(sys_ni_syscall) // streams1
  1000. .long CSYM(sys_ni_syscall) // streams2
  1001. .long sys_vfork_wrapper // 190
  1002. .long CSYM(sys_ni_syscall)
  1003. .long CSYM(sys_mmap2)
  1004. .long CSYM(sys_truncate64)
  1005. .long CSYM(sys_ftruncate64)
  1006. .long CSYM(sys_stat64) // 195
  1007. .long CSYM(sys_lstat64)
  1008. .long CSYM(sys_fstat64)
  1009. .long CSYM(sys_fcntl64)
  1010. .long CSYM(sys_getdents64)
  1011. .long CSYM(sys_pivot_root) // 200
  1012. .long CSYM(sys_gettid)
  1013. .long CSYM(sys_tkill)
  1014. sys_call_table_end:
  1015. C_END(sys_call_table)