entry.S 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487
  1. /* entry.S: FR-V entry
  2. *
  3. * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. *
  12. * Entry to the kernel is "interesting":
  13. * (1) There are no stack pointers, not even for the kernel
  14. * (2) General Registers should not be clobbered
  15. * (3) There are no kernel-only data registers
  16. * (4) Since all addressing modes are wrt to a General Register, no global
  17. * variables can be reached
  18. *
  19. * We deal with this by declaring that we shall kill GR28 on entering the
  20. * kernel from userspace
  21. *
  22. * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
  23. * they can't rely on GR28 to be anything useful, and so need to clobber a
  24. * separate register (GR31). Break interrupts are managed in break.S
  25. *
  26. * GR29 _is_ saved, and holds the current task pointer globally
  27. *
  28. */
  29. #include <linux/linkage.h>
  30. #include <asm/thread_info.h>
  31. #include <asm/setup.h>
  32. #include <asm/segment.h>
  33. #include <asm/ptrace.h>
  34. #include <asm/errno.h>
  35. #include <asm/cache.h>
  36. #include <asm/spr-regs.h>
  37. #define nr_syscalls ((syscall_table_size)/4)
  38. .text
  39. .balign 4
  40. .macro LEDS val
  41. # sethi.p %hi(0xe1200004),gr30
  42. # setlo %lo(0xe1200004),gr30
  43. # setlos #~\val,gr31
  44. # st gr31,@(gr30,gr0)
  45. # sethi.p %hi(0xffc00100),gr30
  46. # setlo %lo(0xffc00100),gr30
  47. # sth gr0,@(gr30,gr0)
  48. # membar
  49. .endm
  50. .macro LEDS32
  51. # not gr31,gr31
  52. # sethi.p %hi(0xe1200004),gr30
  53. # setlo %lo(0xe1200004),gr30
  54. # st.p gr31,@(gr30,gr0)
  55. # srli gr31,#16,gr31
  56. # sethi.p %hi(0xffc00100),gr30
  57. # setlo %lo(0xffc00100),gr30
  58. # sth gr31,@(gr30,gr0)
  59. # membar
  60. .endm
  61. ###############################################################################
  62. #
  63. # entry point for External interrupts received whilst executing userspace code
  64. #
  65. ###############################################################################
  66. .globl __entry_uspace_external_interrupt
  67. .type __entry_uspace_external_interrupt,@function
  68. __entry_uspace_external_interrupt:
  69. LEDS 0x6200
  70. sethi.p %hi(__kernel_frame0_ptr),gr28
  71. setlo %lo(__kernel_frame0_ptr),gr28
  72. ldi @(gr28,#0),gr28
  73. # handle h/w single-step through exceptions
  74. sti gr0,@(gr28,#REG__STATUS)
  75. .globl __entry_uspace_external_interrupt_reentry
  76. __entry_uspace_external_interrupt_reentry:
  77. LEDS 0x6201
  78. setlos #REG__END,gr30
  79. dcpl gr28,gr30,#0
  80. # finish building the exception frame
  81. sti sp, @(gr28,#REG_SP)
  82. stdi gr2, @(gr28,#REG_GR(2))
  83. stdi gr4, @(gr28,#REG_GR(4))
  84. stdi gr6, @(gr28,#REG_GR(6))
  85. stdi gr8, @(gr28,#REG_GR(8))
  86. stdi gr10,@(gr28,#REG_GR(10))
  87. stdi gr12,@(gr28,#REG_GR(12))
  88. stdi gr14,@(gr28,#REG_GR(14))
  89. stdi gr16,@(gr28,#REG_GR(16))
  90. stdi gr18,@(gr28,#REG_GR(18))
  91. stdi gr20,@(gr28,#REG_GR(20))
  92. stdi gr22,@(gr28,#REG_GR(22))
  93. stdi gr24,@(gr28,#REG_GR(24))
  94. stdi gr26,@(gr28,#REG_GR(26))
  95. sti gr0, @(gr28,#REG_GR(28))
  96. sti gr29,@(gr28,#REG_GR(29))
  97. stdi.p gr30,@(gr28,#REG_GR(30))
  98. # set up the kernel stack pointer
  99. ori gr28,0,sp
  100. movsg tbr ,gr20
  101. movsg psr ,gr22
  102. movsg pcsr,gr21
  103. movsg isr ,gr23
  104. movsg ccr ,gr24
  105. movsg cccr,gr25
  106. movsg lr ,gr26
  107. movsg lcr ,gr27
  108. setlos.p #-1,gr4
  109. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  110. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  111. slli gr5,#1,gr5
  112. or gr6,gr5,gr5
  113. andi gr5,#~PSR_ET,gr5
  114. sti gr20,@(gr28,#REG_TBR)
  115. sti gr21,@(gr28,#REG_PC)
  116. sti gr5 ,@(gr28,#REG_PSR)
  117. sti gr23,@(gr28,#REG_ISR)
  118. stdi gr24,@(gr28,#REG_CCR)
  119. stdi gr26,@(gr28,#REG_LR)
  120. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  121. movsg iacc0h,gr4
  122. movsg iacc0l,gr5
  123. stdi gr4,@(gr28,#REG_IACC0)
  124. movsg gner0,gr4
  125. movsg gner1,gr5
  126. stdi.p gr4,@(gr28,#REG_GNER0)
  127. # interrupts start off fully disabled in the interrupt handler
  128. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  129. # set up kernel global registers
  130. sethi.p %hi(__kernel_current_task),gr5
  131. setlo %lo(__kernel_current_task),gr5
  132. sethi.p %hi(_gp),gr16
  133. setlo %lo(_gp),gr16
  134. ldi @(gr5,#0),gr29
  135. ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  136. # make sure we (the kernel) get div-zero and misalignment exceptions
  137. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  138. movgs gr5,isr
  139. # switch to the kernel trap table
  140. sethi.p %hi(__entry_kerneltrap_table),gr6
  141. setlo %lo(__entry_kerneltrap_table),gr6
  142. movgs gr6,tbr
  143. # set the return address
  144. sethi.p %hi(__entry_return_from_user_interrupt),gr4
  145. setlo %lo(__entry_return_from_user_interrupt),gr4
  146. movgs gr4,lr
  147. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  148. movsg psr,gr4
  149. ori gr4,#PSR_PIL_14,gr4
  150. movgs gr4,psr
  151. ori gr4,#PSR_PIL_14|PSR_ET,gr4
  152. movgs gr4,psr
  153. LEDS 0x6202
  154. bra do_IRQ
  155. .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
  156. ###############################################################################
  157. #
  158. # entry point for External interrupts received whilst executing kernel code
  159. # - on arriving here, the following registers should already be set up:
  160. # GR15 - current thread_info struct pointer
  161. # GR16 - kernel GP-REL pointer
  162. # GR29 - current task struct pointer
  163. # TBR - kernel trap vector table
  164. # ISR - kernel's preferred integer controls
  165. #
  166. ###############################################################################
  167. .globl __entry_kernel_external_interrupt
  168. .type __entry_kernel_external_interrupt,@function
  169. __entry_kernel_external_interrupt:
  170. LEDS 0x6210
  171. // sub sp,gr15,gr31
  172. // LEDS32
  173. # set up the stack pointer
  174. or.p sp,gr0,gr30
  175. subi sp,#REG__END,sp
  176. sti gr30,@(sp,#REG_SP)
  177. # handle h/w single-step through exceptions
  178. sti gr0,@(sp,#REG__STATUS)
  179. .globl __entry_kernel_external_interrupt_reentry
  180. __entry_kernel_external_interrupt_reentry:
  181. LEDS 0x6211
  182. # set up the exception frame
  183. setlos #REG__END,gr30
  184. dcpl sp,gr30,#0
  185. sti.p gr28,@(sp,#REG_GR(28))
  186. ori sp,0,gr28
  187. # finish building the exception frame
  188. stdi gr2,@(gr28,#REG_GR(2))
  189. stdi gr4,@(gr28,#REG_GR(4))
  190. stdi gr6,@(gr28,#REG_GR(6))
  191. stdi gr8,@(gr28,#REG_GR(8))
  192. stdi gr10,@(gr28,#REG_GR(10))
  193. stdi gr12,@(gr28,#REG_GR(12))
  194. stdi gr14,@(gr28,#REG_GR(14))
  195. stdi gr16,@(gr28,#REG_GR(16))
  196. stdi gr18,@(gr28,#REG_GR(18))
  197. stdi gr20,@(gr28,#REG_GR(20))
  198. stdi gr22,@(gr28,#REG_GR(22))
  199. stdi gr24,@(gr28,#REG_GR(24))
  200. stdi gr26,@(gr28,#REG_GR(26))
  201. sti gr29,@(gr28,#REG_GR(29))
  202. stdi.p gr30,@(gr28,#REG_GR(30))
  203. # note virtual interrupts will be fully enabled upon return
  204. subicc gr0,#1,gr0,icc2 /* clear Z, set C */
  205. movsg tbr ,gr20
  206. movsg psr ,gr22
  207. movsg pcsr,gr21
  208. movsg isr ,gr23
  209. movsg ccr ,gr24
  210. movsg cccr,gr25
  211. movsg lr ,gr26
  212. movsg lcr ,gr27
  213. setlos.p #-1,gr4
  214. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  215. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  216. slli gr5,#1,gr5
  217. or gr6,gr5,gr5
  218. andi.p gr5,#~PSR_ET,gr5
  219. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  220. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  221. andi gr25,#~0xc0,gr25
  222. sti gr20,@(gr28,#REG_TBR)
  223. sti gr21,@(gr28,#REG_PC)
  224. sti gr5 ,@(gr28,#REG_PSR)
  225. sti gr23,@(gr28,#REG_ISR)
  226. stdi gr24,@(gr28,#REG_CCR)
  227. stdi gr26,@(gr28,#REG_LR)
  228. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  229. movsg iacc0h,gr4
  230. movsg iacc0l,gr5
  231. stdi gr4,@(gr28,#REG_IACC0)
  232. movsg gner0,gr4
  233. movsg gner1,gr5
  234. stdi.p gr4,@(gr28,#REG_GNER0)
  235. # interrupts start off fully disabled in the interrupt handler
  236. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  237. # set the return address
  238. sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
  239. setlo %lo(__entry_return_from_kernel_interrupt),gr4
  240. movgs gr4,lr
  241. # clear power-saving mode flags
  242. movsg hsr0,gr4
  243. andi gr4,#~HSR0_PDM,gr4
  244. movgs gr4,hsr0
  245. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  246. movsg psr,gr4
  247. ori gr4,#PSR_PIL_14,gr4
  248. movgs gr4,psr
  249. ori gr4,#PSR_ET,gr4
  250. movgs gr4,psr
  251. LEDS 0x6212
  252. bra do_IRQ
  253. .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
  254. ###############################################################################
  255. #
  256. # deal with interrupts that were actually virtually disabled
  257. # - we need to really disable them, flag the fact and return immediately
  258. # - if you change this, you must alter break.S also
  259. #
  260. ###############################################################################
  261. .balign L1_CACHE_BYTES
  262. .globl __entry_kernel_external_interrupt_virtually_disabled
  263. .type __entry_kernel_external_interrupt_virtually_disabled,@function
  264. __entry_kernel_external_interrupt_virtually_disabled:
  265. movsg psr,gr30
  266. andi gr30,#~PSR_PIL,gr30
  267. ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
  268. movgs gr30,psr
  269. subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
  270. rett #0
  271. .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
  272. ###############################################################################
  273. #
  274. # deal with re-enablement of interrupts that were pending when virtually re-enabled
  275. # - set ICC2.C, re-enable the real interrupts and return
  276. # - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
  277. # - if you change this, you must alter break.S also
  278. #
  279. ###############################################################################
  280. .balign L1_CACHE_BYTES
  281. .globl __entry_kernel_external_interrupt_virtual_reenable
  282. .type __entry_kernel_external_interrupt_virtual_reenable,@function
  283. __entry_kernel_external_interrupt_virtual_reenable:
  284. movsg psr,gr30
  285. andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
  286. movgs gr30,psr
  287. subicc gr0,#1,gr0,icc2 ; clear Z, set C
  288. rett #0
  289. .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
  290. ###############################################################################
  291. #
  292. # entry point for Software and Progam interrupts generated whilst executing userspace code
  293. #
  294. ###############################################################################
  295. .globl __entry_uspace_softprog_interrupt
  296. .type __entry_uspace_softprog_interrupt,@function
  297. .globl __entry_uspace_handle_mmu_fault
  298. __entry_uspace_softprog_interrupt:
  299. LEDS 0x6000
  300. #ifdef CONFIG_MMU
  301. movsg ear0,gr28
  302. __entry_uspace_handle_mmu_fault:
  303. movgs gr28,scr2
  304. #endif
  305. sethi.p %hi(__kernel_frame0_ptr),gr28
  306. setlo %lo(__kernel_frame0_ptr),gr28
  307. ldi @(gr28,#0),gr28
  308. # handle h/w single-step through exceptions
  309. sti gr0,@(gr28,#REG__STATUS)
  310. .globl __entry_uspace_softprog_interrupt_reentry
  311. __entry_uspace_softprog_interrupt_reentry:
  312. LEDS 0x6001
  313. setlos #REG__END,gr30
  314. dcpl gr28,gr30,#0
  315. # set up the kernel stack pointer
  316. sti.p sp,@(gr28,#REG_SP)
  317. ori gr28,0,sp
  318. sti gr0,@(gr28,#REG_GR(28))
  319. stdi gr20,@(gr28,#REG_GR(20))
  320. stdi gr22,@(gr28,#REG_GR(22))
  321. movsg tbr,gr20
  322. movsg pcsr,gr21
  323. movsg psr,gr22
  324. sethi.p %hi(__entry_return_from_user_exception),gr23
  325. setlo %lo(__entry_return_from_user_exception),gr23
  326. bra __entry_common
  327. .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
  328. # single-stepping was disabled on entry to a TLB handler that then faulted
  329. #ifdef CONFIG_MMU
  330. .globl __entry_uspace_handle_mmu_fault_sstep
  331. __entry_uspace_handle_mmu_fault_sstep:
  332. movgs gr28,scr2
  333. sethi.p %hi(__kernel_frame0_ptr),gr28
  334. setlo %lo(__kernel_frame0_ptr),gr28
  335. ldi @(gr28,#0),gr28
  336. # flag single-step re-enablement
  337. sti gr0,@(gr28,#REG__STATUS)
  338. bra __entry_uspace_softprog_interrupt_reentry
  339. #endif
  340. ###############################################################################
  341. #
  342. # entry point for Software and Progam interrupts generated whilst executing kernel code
  343. #
  344. ###############################################################################
  345. .globl __entry_kernel_softprog_interrupt
  346. .type __entry_kernel_softprog_interrupt,@function
  347. __entry_kernel_softprog_interrupt:
  348. LEDS 0x6004
  349. #ifdef CONFIG_MMU
  350. movsg ear0,gr30
  351. movgs gr30,scr2
  352. #endif
  353. .globl __entry_kernel_handle_mmu_fault
  354. __entry_kernel_handle_mmu_fault:
  355. # set up the stack pointer
  356. subi sp,#REG__END,sp
  357. sti sp,@(sp,#REG_SP)
  358. sti sp,@(sp,#REG_SP-4)
  359. andi sp,#~7,sp
  360. # handle h/w single-step through exceptions
  361. sti gr0,@(sp,#REG__STATUS)
  362. .globl __entry_kernel_softprog_interrupt_reentry
  363. __entry_kernel_softprog_interrupt_reentry:
  364. LEDS 0x6005
  365. setlos #REG__END,gr30
  366. dcpl sp,gr30,#0
  367. # set up the exception frame
  368. sti.p gr28,@(sp,#REG_GR(28))
  369. ori sp,0,gr28
  370. stdi gr20,@(gr28,#REG_GR(20))
  371. stdi gr22,@(gr28,#REG_GR(22))
  372. ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
  373. addi gr22,#REG__END,gr22
  374. sti gr22,@(sp,#REG_SP)
  375. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  376. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  377. movsg cccr,gr20
  378. andi gr20,#~0xc0,gr20
  379. movgs gr20,cccr
  380. movsg tbr,gr20
  381. movsg pcsr,gr21
  382. movsg psr,gr22
  383. sethi.p %hi(__entry_return_from_kernel_exception),gr23
  384. setlo %lo(__entry_return_from_kernel_exception),gr23
  385. bra __entry_common
  386. .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
  387. # single-stepping was disabled on entry to a TLB handler that then faulted
  388. #ifdef CONFIG_MMU
  389. .globl __entry_kernel_handle_mmu_fault_sstep
  390. __entry_kernel_handle_mmu_fault_sstep:
  391. # set up the stack pointer
  392. subi sp,#REG__END,sp
  393. sti sp,@(sp,#REG_SP)
  394. sti sp,@(sp,#REG_SP-4)
  395. andi sp,#~7,sp
  396. # flag single-step re-enablement
  397. sethi #REG__STATUS_STEP,gr30
  398. sti gr30,@(sp,#REG__STATUS)
  399. bra __entry_kernel_softprog_interrupt_reentry
  400. #endif
  401. ###############################################################################
  402. #
  403. # the rest of the kernel entry point code
  404. # - on arriving here, the following registers should be set up:
  405. # GR1 - kernel stack pointer
  406. # GR7 - syscall number (trap 0 only)
  407. # GR8-13 - syscall args (trap 0 only)
  408. # GR20 - saved TBR
  409. # GR21 - saved PC
  410. # GR22 - saved PSR
  411. # GR23 - return handler address
  412. # GR28 - exception frame on stack
  413. # SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
  414. # PSR - PSR.S 1, PSR.ET 0
  415. #
  416. ###############################################################################
  417. .globl __entry_common
  418. .type __entry_common,@function
  419. __entry_common:
  420. LEDS 0x6008
  421. # finish building the exception frame
  422. stdi gr2,@(gr28,#REG_GR(2))
  423. stdi gr4,@(gr28,#REG_GR(4))
  424. stdi gr6,@(gr28,#REG_GR(6))
  425. stdi gr8,@(gr28,#REG_GR(8))
  426. stdi gr10,@(gr28,#REG_GR(10))
  427. stdi gr12,@(gr28,#REG_GR(12))
  428. stdi gr14,@(gr28,#REG_GR(14))
  429. stdi gr16,@(gr28,#REG_GR(16))
  430. stdi gr18,@(gr28,#REG_GR(18))
  431. stdi gr24,@(gr28,#REG_GR(24))
  432. stdi gr26,@(gr28,#REG_GR(26))
  433. sti gr29,@(gr28,#REG_GR(29))
  434. stdi gr30,@(gr28,#REG_GR(30))
  435. movsg lcr ,gr27
  436. movsg lr ,gr26
  437. movgs gr23,lr
  438. movsg cccr,gr25
  439. movsg ccr ,gr24
  440. movsg isr ,gr23
  441. setlos.p #-1,gr4
  442. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  443. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  444. slli gr5,#1,gr5
  445. or gr6,gr5,gr5
  446. andi gr5,#~PSR_ET,gr5
  447. sti gr20,@(gr28,#REG_TBR)
  448. sti gr21,@(gr28,#REG_PC)
  449. sti gr5 ,@(gr28,#REG_PSR)
  450. sti gr23,@(gr28,#REG_ISR)
  451. stdi gr24,@(gr28,#REG_CCR)
  452. stdi gr26,@(gr28,#REG_LR)
  453. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  454. movsg iacc0h,gr4
  455. movsg iacc0l,gr5
  456. stdi gr4,@(gr28,#REG_IACC0)
  457. movsg gner0,gr4
  458. movsg gner1,gr5
  459. stdi.p gr4,@(gr28,#REG_GNER0)
  460. # set up virtual interrupt disablement
  461. subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
  462. # set up kernel global registers
  463. sethi.p %hi(__kernel_current_task),gr5
  464. setlo %lo(__kernel_current_task),gr5
  465. sethi.p %hi(_gp),gr16
  466. setlo %lo(_gp),gr16
  467. ldi @(gr5,#0),gr29
  468. ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  469. # switch to the kernel trap table
  470. sethi.p %hi(__entry_kerneltrap_table),gr6
  471. setlo %lo(__entry_kerneltrap_table),gr6
  472. movgs gr6,tbr
  473. # make sure we (the kernel) get div-zero and misalignment exceptions
  474. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  475. movgs gr5,isr
  476. # clear power-saving mode flags
  477. movsg hsr0,gr4
  478. andi gr4,#~HSR0_PDM,gr4
  479. movgs gr4,hsr0
  480. # multiplex again using old TBR as a guide
  481. setlos.p #TBR_TT,gr3
  482. sethi %hi(__entry_vector_table),gr6
  483. and.p gr20,gr3,gr5
  484. setlo %lo(__entry_vector_table),gr6
  485. srli gr5,#2,gr5
  486. ld @(gr5,gr6),gr5
  487. LEDS 0x6009
  488. jmpl @(gr5,gr0)
  489. .size __entry_common,.-__entry_common
  490. ###############################################################################
  491. #
  492. # handle instruction MMU fault
  493. #
  494. ###############################################################################
  495. #ifdef CONFIG_MMU
  496. .globl __entry_insn_mmu_fault
  497. __entry_insn_mmu_fault:
  498. LEDS 0x6010
  499. setlos #0,gr8
  500. movsg esr0,gr9
  501. movsg scr2,gr10
  502. # now that we've accessed the exception regs, we can enable exceptions
  503. movsg psr,gr4
  504. ori gr4,#PSR_ET,gr4
  505. movgs gr4,psr
  506. sethi.p %hi(do_page_fault),gr5
  507. setlo %lo(do_page_fault),gr5
  508. jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
  509. #endif
  510. ###############################################################################
  511. #
  512. # handle instruction access error
  513. #
  514. ###############################################################################
  515. .globl __entry_insn_access_error
  516. __entry_insn_access_error:
  517. LEDS 0x6011
  518. sethi.p %hi(insn_access_error),gr5
  519. setlo %lo(insn_access_error),gr5
  520. movsg esfr1,gr8
  521. movsg epcr0,gr9
  522. movsg esr0,gr10
  523. # now that we've accessed the exception regs, we can enable exceptions
  524. movsg psr,gr4
  525. ori gr4,#PSR_ET,gr4
  526. movgs gr4,psr
  527. jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
  528. ###############################################################################
  529. #
  530. # handle various instructions of dubious legality
  531. #
  532. ###############################################################################
  533. .globl __entry_unsupported_trap
  534. .globl __entry_illegal_instruction
  535. .globl __entry_privileged_instruction
  536. .globl __entry_debug_exception
  537. __entry_unsupported_trap:
  538. subi gr21,#4,gr21
  539. sti gr21,@(gr28,#REG_PC)
  540. __entry_illegal_instruction:
  541. __entry_privileged_instruction:
  542. __entry_debug_exception:
  543. LEDS 0x6012
  544. sethi.p %hi(illegal_instruction),gr5
  545. setlo %lo(illegal_instruction),gr5
  546. movsg esfr1,gr8
  547. movsg epcr0,gr9
  548. movsg esr0,gr10
  549. # now that we've accessed the exception regs, we can enable exceptions
  550. movsg psr,gr4
  551. ori gr4,#PSR_ET,gr4
  552. movgs gr4,psr
  553. jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
  554. ###############################################################################
  555. #
  556. # handle media exception
  557. #
  558. ###############################################################################
  559. .globl __entry_media_exception
  560. __entry_media_exception:
  561. LEDS 0x6013
  562. sethi.p %hi(media_exception),gr5
  563. setlo %lo(media_exception),gr5
  564. movsg msr0,gr8
  565. movsg msr1,gr9
  566. # now that we've accessed the exception regs, we can enable exceptions
  567. movsg psr,gr4
  568. ori gr4,#PSR_ET,gr4
  569. movgs gr4,psr
  570. jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
  571. ###############################################################################
  572. #
  573. # handle data MMU fault
  574. # handle data DAT fault (write-protect exception)
  575. #
  576. ###############################################################################
  577. #ifdef CONFIG_MMU
  578. .globl __entry_data_mmu_fault
  579. __entry_data_mmu_fault:
  580. .globl __entry_data_dat_fault
  581. __entry_data_dat_fault:
  582. LEDS 0x6014
  583. setlos #1,gr8
  584. movsg esr0,gr9
  585. movsg scr2,gr10 ; saved EAR0
  586. # now that we've accessed the exception regs, we can enable exceptions
  587. movsg psr,gr4
  588. ori gr4,#PSR_ET,gr4
  589. movgs gr4,psr
  590. sethi.p %hi(do_page_fault),gr5
  591. setlo %lo(do_page_fault),gr5
  592. jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
  593. #endif
  594. ###############################################################################
  595. #
  596. # handle data and instruction access exceptions
  597. #
  598. ###############################################################################
  599. .globl __entry_insn_access_exception
  600. .globl __entry_data_access_exception
  601. __entry_insn_access_exception:
  602. __entry_data_access_exception:
  603. LEDS 0x6016
  604. sethi.p %hi(memory_access_exception),gr5
  605. setlo %lo(memory_access_exception),gr5
  606. movsg esr0,gr8
  607. movsg scr2,gr9 ; saved EAR0
  608. movsg epcr0,gr10
  609. # now that we've accessed the exception regs, we can enable exceptions
  610. movsg psr,gr4
  611. ori gr4,#PSR_ET,gr4
  612. movgs gr4,psr
  613. jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
  614. ###############################################################################
  615. #
  616. # handle data access error
  617. #
  618. ###############################################################################
  619. .globl __entry_data_access_error
  620. __entry_data_access_error:
  621. LEDS 0x6016
  622. sethi.p %hi(data_access_error),gr5
  623. setlo %lo(data_access_error),gr5
  624. movsg esfr1,gr8
  625. movsg esr15,gr9
  626. movsg ear15,gr10
  627. # now that we've accessed the exception regs, we can enable exceptions
  628. movsg psr,gr4
  629. ori gr4,#PSR_ET,gr4
  630. movgs gr4,psr
  631. jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
  632. ###############################################################################
  633. #
  634. # handle data store error
  635. #
  636. ###############################################################################
  637. .globl __entry_data_store_error
  638. __entry_data_store_error:
  639. LEDS 0x6017
  640. sethi.p %hi(data_store_error),gr5
  641. setlo %lo(data_store_error),gr5
  642. movsg esfr1,gr8
  643. movsg esr14,gr9
  644. # now that we've accessed the exception regs, we can enable exceptions
  645. movsg psr,gr4
  646. ori gr4,#PSR_ET,gr4
  647. movgs gr4,psr
  648. jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
  649. ###############################################################################
  650. #
  651. # handle division exception
  652. #
  653. ###############################################################################
  654. .globl __entry_division_exception
  655. __entry_division_exception:
  656. LEDS 0x6018
  657. sethi.p %hi(division_exception),gr5
  658. setlo %lo(division_exception),gr5
  659. movsg esfr1,gr8
  660. movsg esr0,gr9
  661. movsg isr,gr10
  662. # now that we've accessed the exception regs, we can enable exceptions
  663. movsg psr,gr4
  664. ori gr4,#PSR_ET,gr4
  665. movgs gr4,psr
  666. jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
  667. ###############################################################################
  668. #
  669. # handle compound exception
  670. #
  671. ###############################################################################
  672. .globl __entry_compound_exception
  673. __entry_compound_exception:
  674. LEDS 0x6019
  675. sethi.p %hi(compound_exception),gr5
  676. setlo %lo(compound_exception),gr5
  677. movsg esfr1,gr8
  678. movsg esr0,gr9
  679. movsg esr14,gr10
  680. movsg esr15,gr11
  681. movsg msr0,gr12
  682. movsg msr1,gr13
  683. # now that we've accessed the exception regs, we can enable exceptions
  684. movsg psr,gr4
  685. ori gr4,#PSR_ET,gr4
  686. movgs gr4,psr
  687. jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
  688. ###############################################################################
  689. #
  690. # handle interrupts and NMIs
  691. #
  692. ###############################################################################
  693. .globl __entry_do_IRQ
  694. __entry_do_IRQ:
  695. LEDS 0x6020
  696. # we can enable exceptions
  697. movsg psr,gr4
  698. ori gr4,#PSR_ET,gr4
  699. movgs gr4,psr
  700. bra do_IRQ
  701. .globl __entry_do_NMI
  702. __entry_do_NMI:
  703. LEDS 0x6021
  704. # we can enable exceptions
  705. movsg psr,gr4
  706. ori gr4,#PSR_ET,gr4
  707. movgs gr4,psr
  708. bra do_NMI
  709. ###############################################################################
  710. #
  711. # the return path for a newly forked child process
  712. # - __switch_to() saved the old current pointer in GR8 for us
  713. #
  714. ###############################################################################
  715. .globl ret_from_fork
  716. ret_from_fork:
  717. LEDS 0x6100
  718. call schedule_tail
  719. # fork & co. return 0 to child
  720. setlos.p #0,gr8
  721. bra __syscall_exit
  722. ###################################################################################################
  723. #
  724. # Return to user mode is not as complex as all this looks,
  725. # but we want the default path for a system call return to
  726. # go as quickly as possible which is why some of this is
  727. # less clear than it otherwise should be.
  728. #
  729. ###################################################################################################
  730. .balign L1_CACHE_BYTES
  731. .globl system_call
  732. system_call:
  733. LEDS 0x6101
  734. movsg psr,gr4 ; enable exceptions
  735. ori gr4,#PSR_ET,gr4
  736. movgs gr4,psr
  737. sti gr7,@(gr28,#REG_SYSCALLNO)
  738. sti.p gr8,@(gr28,#REG_ORIG_GR8)
  739. subicc gr7,#nr_syscalls,gr0,icc0
  740. bnc icc0,#0,__syscall_badsys
  741. ldi @(gr15,#TI_FLAGS),gr4
  742. ori gr4,#_TIF_SYSCALL_TRACE,gr4
  743. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  744. bne icc0,#0,__syscall_trace_entry
  745. __syscall_call:
  746. slli.p gr7,#2,gr7
  747. sethi %hi(sys_call_table),gr5
  748. setlo %lo(sys_call_table),gr5
  749. ld @(gr5,gr7),gr4
  750. calll @(gr4,gr0)
  751. ###############################################################################
  752. #
  753. # return to interrupted process
  754. #
  755. ###############################################################################
  756. __syscall_exit:
  757. LEDS 0x6300
  758. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  759. # rebuild saved psr - execve will change it for init/main.c
  760. ldi @(gr28,#REG_PSR),gr22
  761. srli gr22,#1,gr5
  762. andi.p gr22,#~PSR_PS,gr22
  763. andi gr5,#PSR_PS,gr5
  764. or gr5,gr22,gr22
  765. ori gr22,#PSR_S,gr22
  766. # keep current PSR in GR23
  767. movsg psr,gr23
  768. # make sure we don't miss an interrupt setting need_resched or sigpending between
  769. # sampling and the RETT
  770. ori gr23,#PSR_PIL_14,gr23
  771. movgs gr23,psr
  772. ldi @(gr15,#TI_FLAGS),gr4
  773. sethi.p %hi(_TIF_ALLWORK_MASK),gr5
  774. setlo %lo(_TIF_ALLWORK_MASK),gr5
  775. andcc gr4,gr5,gr0,icc0
  776. bne icc0,#0,__syscall_exit_work
  777. # restore all registers and return
  778. __entry_return_direct:
  779. LEDS 0x6301
  780. andi gr22,#~PSR_ET,gr22
  781. movgs gr22,psr
  782. ldi @(gr28,#REG_ISR),gr23
  783. lddi @(gr28,#REG_CCR),gr24
  784. lddi @(gr28,#REG_LR) ,gr26
  785. ldi @(gr28,#REG_PC) ,gr21
  786. ldi @(gr28,#REG_TBR),gr20
  787. movgs gr20,tbr
  788. movgs gr21,pcsr
  789. movgs gr23,isr
  790. movgs gr24,ccr
  791. movgs gr25,cccr
  792. movgs gr26,lr
  793. movgs gr27,lcr
  794. lddi @(gr28,#REG_GNER0),gr4
  795. movgs gr4,gner0
  796. movgs gr5,gner1
  797. lddi @(gr28,#REG_IACC0),gr4
  798. movgs gr4,iacc0h
  799. movgs gr5,iacc0l
  800. lddi @(gr28,#REG_GR(4)) ,gr4
  801. lddi @(gr28,#REG_GR(6)) ,gr6
  802. lddi @(gr28,#REG_GR(8)) ,gr8
  803. lddi @(gr28,#REG_GR(10)),gr10
  804. lddi @(gr28,#REG_GR(12)),gr12
  805. lddi @(gr28,#REG_GR(14)),gr14
  806. lddi @(gr28,#REG_GR(16)),gr16
  807. lddi @(gr28,#REG_GR(18)),gr18
  808. lddi @(gr28,#REG_GR(20)),gr20
  809. lddi @(gr28,#REG_GR(22)),gr22
  810. lddi @(gr28,#REG_GR(24)),gr24
  811. lddi @(gr28,#REG_GR(26)),gr26
  812. ldi @(gr28,#REG_GR(29)),gr29
  813. lddi @(gr28,#REG_GR(30)),gr30
  814. # check to see if a debugging return is required
  815. LEDS 0x67f0
  816. movsg ccr,gr2
  817. ldi @(gr28,#REG__STATUS),gr3
  818. andicc gr3,#REG__STATUS_STEP,gr0,icc0
  819. bne icc0,#0,__entry_return_singlestep
  820. movgs gr2,ccr
  821. ldi @(gr28,#REG_SP) ,sp
  822. lddi @(gr28,#REG_GR(2)) ,gr2
  823. ldi @(gr28,#REG_GR(28)),gr28
  824. LEDS 0x67fe
  825. // movsg pcsr,gr31
  826. // LEDS32
  827. #if 0
  828. # store the current frame in the workram on the FR451
  829. movgs gr28,scr2
  830. sethi.p %hi(0xfe800000),gr28
  831. setlo %lo(0xfe800000),gr28
  832. stdi gr2,@(gr28,#REG_GR(2))
  833. stdi gr4,@(gr28,#REG_GR(4))
  834. stdi gr6,@(gr28,#REG_GR(6))
  835. stdi gr8,@(gr28,#REG_GR(8))
  836. stdi gr10,@(gr28,#REG_GR(10))
  837. stdi gr12,@(gr28,#REG_GR(12))
  838. stdi gr14,@(gr28,#REG_GR(14))
  839. stdi gr16,@(gr28,#REG_GR(16))
  840. stdi gr18,@(gr28,#REG_GR(18))
  841. stdi gr24,@(gr28,#REG_GR(24))
  842. stdi gr26,@(gr28,#REG_GR(26))
  843. sti gr29,@(gr28,#REG_GR(29))
  844. stdi gr30,@(gr28,#REG_GR(30))
  845. movsg tbr ,gr30
  846. sti gr30,@(gr28,#REG_TBR)
  847. movsg pcsr,gr30
  848. sti gr30,@(gr28,#REG_PC)
  849. movsg psr ,gr30
  850. sti gr30,@(gr28,#REG_PSR)
  851. movsg isr ,gr30
  852. sti gr30,@(gr28,#REG_ISR)
  853. movsg ccr ,gr30
  854. movsg cccr,gr31
  855. stdi gr30,@(gr28,#REG_CCR)
  856. movsg lr ,gr30
  857. movsg lcr ,gr31
  858. stdi gr30,@(gr28,#REG_LR)
  859. sti gr0 ,@(gr28,#REG_SYSCALLNO)
  860. movsg scr2,gr28
  861. #endif
  862. rett #0
  863. # return via break.S
  864. __entry_return_singlestep:
  865. movgs gr2,ccr
  866. lddi @(gr28,#REG_GR(2)) ,gr2
  867. ldi @(gr28,#REG_SP) ,sp
  868. ldi @(gr28,#REG_GR(28)),gr28
  869. LEDS 0x67ff
  870. break
  871. .globl __entry_return_singlestep_breaks_here
  872. __entry_return_singlestep_breaks_here:
  873. nop
  874. ###############################################################################
  875. #
  876. # return to a process interrupted in kernel space
  877. # - we need to consider preemption if that is enabled
  878. #
  879. ###############################################################################
  880. .balign L1_CACHE_BYTES
  881. __entry_return_from_kernel_exception:
  882. LEDS 0x6302
  883. movsg psr,gr23
  884. ori gr23,#PSR_PIL_14,gr23
  885. movgs gr23,psr
  886. bra __entry_return_direct
  887. .balign L1_CACHE_BYTES
  888. __entry_return_from_kernel_interrupt:
  889. LEDS 0x6303
  890. movsg psr,gr23
  891. ori gr23,#PSR_PIL_14,gr23
  892. movgs gr23,psr
  893. #ifdef CONFIG_PREEMPT
  894. ldi @(gr15,#TI_PRE_COUNT),gr5
  895. subicc gr5,#0,gr0,icc0
  896. beq icc0,#0,__entry_return_direct
  897. __entry_preempt_need_resched:
  898. ldi @(gr15,#TI_FLAGS),gr4
  899. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  900. beq icc0,#1,__entry_return_direct
  901. setlos #PREEMPT_ACTIVE,gr5
  902. sti gr5,@(gr15,#TI_FLAGS)
  903. andi gr23,#~PSR_PIL,gr23
  904. movgs gr23,psr
  905. call schedule
  906. sti gr0,@(gr15,#TI_PRE_COUNT)
  907. movsg psr,gr23
  908. ori gr23,#PSR_PIL_14,gr23
  909. movgs gr23,psr
  910. bra __entry_preempt_need_resched
  911. #else
  912. bra __entry_return_direct
  913. #endif
  914. ###############################################################################
  915. #
  916. # perform work that needs to be done immediately before resumption
  917. #
  918. ###############################################################################
  919. .globl __entry_return_from_user_exception
  920. .balign L1_CACHE_BYTES
  921. __entry_return_from_user_exception:
  922. LEDS 0x6501
  923. __entry_resume_userspace:
  924. # make sure we don't miss an interrupt setting need_resched or sigpending between
  925. # sampling and the RETT
  926. movsg psr,gr23
  927. ori gr23,#PSR_PIL_14,gr23
  928. movgs gr23,psr
  929. __entry_return_from_user_interrupt:
  930. LEDS 0x6402
  931. ldi @(gr15,#TI_FLAGS),gr4
  932. sethi.p %hi(_TIF_WORK_MASK),gr5
  933. setlo %lo(_TIF_WORK_MASK),gr5
  934. andcc gr4,gr5,gr0,icc0
  935. beq icc0,#1,__entry_return_direct
  936. __entry_work_pending:
  937. LEDS 0x6404
  938. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  939. beq icc0,#1,__entry_work_notifysig
  940. __entry_work_resched:
  941. LEDS 0x6408
  942. movsg psr,gr23
  943. andi gr23,#~PSR_PIL,gr23
  944. movgs gr23,psr
  945. call schedule
  946. movsg psr,gr23
  947. ori gr23,#PSR_PIL_14,gr23
  948. movgs gr23,psr
  949. LEDS 0x6401
  950. ldi @(gr15,#TI_FLAGS),gr4
  951. sethi.p %hi(_TIF_WORK_MASK),gr5
  952. setlo %lo(_TIF_WORK_MASK),gr5
  953. andcc gr4,gr5,gr0,icc0
  954. beq icc0,#1,__entry_return_direct
  955. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  956. bne icc0,#1,__entry_work_resched
  957. __entry_work_notifysig:
  958. LEDS 0x6410
  959. ori.p gr4,#0,gr8
  960. call do_notify_resume
  961. bra __entry_resume_userspace
  962. # perform syscall entry tracing
  963. __syscall_trace_entry:
  964. LEDS 0x6320
  965. setlos.p #0,gr8
  966. call do_syscall_trace
  967. ldi @(gr28,#REG_SYSCALLNO),gr7
  968. lddi @(gr28,#REG_GR(8)) ,gr8
  969. lddi @(gr28,#REG_GR(10)),gr10
  970. lddi.p @(gr28,#REG_GR(12)),gr12
  971. subicc gr7,#nr_syscalls,gr0,icc0
  972. bnc icc0,#0,__syscall_badsys
  973. bra __syscall_call
  974. # perform syscall exit tracing
  975. __syscall_exit_work:
  976. LEDS 0x6340
  977. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  978. beq icc0,#1,__entry_work_pending
  979. movsg psr,gr23
  980. andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule()
  981. movgs gr23,psr
  982. setlos.p #1,gr8
  983. call do_syscall_trace
  984. bra __entry_resume_userspace
  985. __syscall_badsys:
  986. LEDS 0x6380
  987. setlos #-ENOSYS,gr8
  988. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  989. bra __entry_resume_userspace
  990. ###############################################################################
  991. #
  992. # syscall vector table
  993. #
  994. ###############################################################################
  995. .section .rodata
  996. ALIGN
  997. .globl sys_call_table
  998. sys_call_table:
  999. .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
  1000. .long sys_exit
  1001. .long sys_fork
  1002. .long sys_read
  1003. .long sys_write
  1004. .long sys_open /* 5 */
  1005. .long sys_close
  1006. .long sys_waitpid
  1007. .long sys_creat
  1008. .long sys_link
  1009. .long sys_unlink /* 10 */
  1010. .long sys_execve
  1011. .long sys_chdir
  1012. .long sys_time
  1013. .long sys_mknod
  1014. .long sys_chmod /* 15 */
  1015. .long sys_lchown16
  1016. .long sys_ni_syscall /* old break syscall holder */
  1017. .long sys_stat
  1018. .long sys_lseek
  1019. .long sys_getpid /* 20 */
  1020. .long sys_mount
  1021. .long sys_oldumount
  1022. .long sys_setuid16
  1023. .long sys_getuid16
  1024. .long sys_ni_syscall // sys_stime /* 25 */
  1025. .long sys_ptrace
  1026. .long sys_alarm
  1027. .long sys_fstat
  1028. .long sys_pause
  1029. .long sys_utime /* 30 */
  1030. .long sys_ni_syscall /* old stty syscall holder */
  1031. .long sys_ni_syscall /* old gtty syscall holder */
  1032. .long sys_access
  1033. .long sys_nice
  1034. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1035. .long sys_sync
  1036. .long sys_kill
  1037. .long sys_rename
  1038. .long sys_mkdir
  1039. .long sys_rmdir /* 40 */
  1040. .long sys_dup
  1041. .long sys_pipe
  1042. .long sys_times
  1043. .long sys_ni_syscall /* old prof syscall holder */
  1044. .long sys_brk /* 45 */
  1045. .long sys_setgid16
  1046. .long sys_getgid16
  1047. .long sys_ni_syscall // sys_signal
  1048. .long sys_geteuid16
  1049. .long sys_getegid16 /* 50 */
  1050. .long sys_acct
  1051. .long sys_umount /* recycled never used phys( */
  1052. .long sys_ni_syscall /* old lock syscall holder */
  1053. .long sys_ioctl
  1054. .long sys_fcntl /* 55 */
  1055. .long sys_ni_syscall /* old mpx syscall holder */
  1056. .long sys_setpgid
  1057. .long sys_ni_syscall /* old ulimit syscall holder */
  1058. .long sys_ni_syscall /* old old uname syscall */
  1059. .long sys_umask /* 60 */
  1060. .long sys_chroot
  1061. .long sys_ustat
  1062. .long sys_dup2
  1063. .long sys_getppid
  1064. .long sys_getpgrp /* 65 */
  1065. .long sys_setsid
  1066. .long sys_sigaction
  1067. .long sys_ni_syscall // sys_sgetmask
  1068. .long sys_ni_syscall // sys_ssetmask
  1069. .long sys_setreuid16 /* 70 */
  1070. .long sys_setregid16
  1071. .long sys_sigsuspend
  1072. .long sys_ni_syscall // sys_sigpending
  1073. .long sys_sethostname
  1074. .long sys_setrlimit /* 75 */
  1075. .long sys_ni_syscall // sys_old_getrlimit
  1076. .long sys_getrusage
  1077. .long sys_gettimeofday
  1078. .long sys_settimeofday
  1079. .long sys_getgroups16 /* 80 */
  1080. .long sys_setgroups16
  1081. .long sys_ni_syscall /* old_select slot */
  1082. .long sys_symlink
  1083. .long sys_lstat
  1084. .long sys_readlink /* 85 */
  1085. .long sys_uselib
  1086. .long sys_swapon
  1087. .long sys_reboot
  1088. .long sys_ni_syscall // old_readdir
  1089. .long sys_ni_syscall /* 90 */ /* old_mmap slot */
  1090. .long sys_munmap
  1091. .long sys_truncate
  1092. .long sys_ftruncate
  1093. .long sys_fchmod
  1094. .long sys_fchown16 /* 95 */
  1095. .long sys_getpriority
  1096. .long sys_setpriority
  1097. .long sys_ni_syscall /* old profil syscall holder */
  1098. .long sys_statfs
  1099. .long sys_fstatfs /* 100 */
  1100. .long sys_ni_syscall /* ioperm for i386 */
  1101. .long sys_socketcall
  1102. .long sys_syslog
  1103. .long sys_setitimer
  1104. .long sys_getitimer /* 105 */
  1105. .long sys_newstat
  1106. .long sys_newlstat
  1107. .long sys_newfstat
  1108. .long sys_ni_syscall /* obsolete olduname( syscall */
  1109. .long sys_ni_syscall /* iopl for i386 */ /* 110 */
  1110. .long sys_vhangup
  1111. .long sys_ni_syscall /* obsolete idle( syscall */
  1112. .long sys_ni_syscall /* vm86old for i386 */
  1113. .long sys_wait4
  1114. .long sys_swapoff /* 115 */
  1115. .long sys_sysinfo
  1116. .long sys_ipc
  1117. .long sys_fsync
  1118. .long sys_sigreturn
  1119. .long sys_clone /* 120 */
  1120. .long sys_setdomainname
  1121. .long sys_newuname
  1122. .long sys_ni_syscall /* old "cacheflush" */
  1123. .long sys_adjtimex
  1124. .long sys_mprotect /* 125 */
  1125. .long sys_sigprocmask
  1126. .long sys_ni_syscall /* old "create_module" */
  1127. .long sys_init_module
  1128. .long sys_delete_module
  1129. .long sys_ni_syscall /* old "get_kernel_syms" */
  1130. .long sys_quotactl
  1131. .long sys_getpgid
  1132. .long sys_fchdir
  1133. .long sys_bdflush
  1134. .long sys_sysfs /* 135 */
  1135. .long sys_personality
  1136. .long sys_ni_syscall /* for afs_syscall */
  1137. .long sys_setfsuid16
  1138. .long sys_setfsgid16
  1139. .long sys_llseek /* 140 */
  1140. .long sys_getdents
  1141. .long sys_select
  1142. .long sys_flock
  1143. .long sys_msync
  1144. .long sys_readv /* 145 */
  1145. .long sys_writev
  1146. .long sys_getsid
  1147. .long sys_fdatasync
  1148. .long sys_sysctl
  1149. .long sys_mlock /* 150 */
  1150. .long sys_munlock
  1151. .long sys_mlockall
  1152. .long sys_munlockall
  1153. .long sys_sched_setparam
  1154. .long sys_sched_getparam /* 155 */
  1155. .long sys_sched_setscheduler
  1156. .long sys_sched_getscheduler
  1157. .long sys_sched_yield
  1158. .long sys_sched_get_priority_max
  1159. .long sys_sched_get_priority_min /* 160 */
  1160. .long sys_sched_rr_get_interval
  1161. .long sys_nanosleep
  1162. .long sys_mremap
  1163. .long sys_setresuid16
  1164. .long sys_getresuid16 /* 165 */
  1165. .long sys_ni_syscall /* for vm86 */
  1166. .long sys_ni_syscall /* Old sys_query_module */
  1167. .long sys_poll
  1168. .long sys_nfsservctl
  1169. .long sys_setresgid16 /* 170 */
  1170. .long sys_getresgid16
  1171. .long sys_prctl
  1172. .long sys_rt_sigreturn
  1173. .long sys_rt_sigaction
  1174. .long sys_rt_sigprocmask /* 175 */
  1175. .long sys_rt_sigpending
  1176. .long sys_rt_sigtimedwait
  1177. .long sys_rt_sigqueueinfo
  1178. .long sys_rt_sigsuspend
  1179. .long sys_pread64 /* 180 */
  1180. .long sys_pwrite64
  1181. .long sys_chown16
  1182. .long sys_getcwd
  1183. .long sys_capget
  1184. .long sys_capset /* 185 */
  1185. .long sys_sigaltstack
  1186. .long sys_sendfile
  1187. .long sys_ni_syscall /* streams1 */
  1188. .long sys_ni_syscall /* streams2 */
  1189. .long sys_vfork /* 190 */
  1190. .long sys_getrlimit
  1191. .long sys_mmap2
  1192. .long sys_truncate64
  1193. .long sys_ftruncate64
  1194. .long sys_stat64 /* 195 */
  1195. .long sys_lstat64
  1196. .long sys_fstat64
  1197. .long sys_lchown
  1198. .long sys_getuid
  1199. .long sys_getgid /* 200 */
  1200. .long sys_geteuid
  1201. .long sys_getegid
  1202. .long sys_setreuid
  1203. .long sys_setregid
  1204. .long sys_getgroups /* 205 */
  1205. .long sys_setgroups
  1206. .long sys_fchown
  1207. .long sys_setresuid
  1208. .long sys_getresuid
  1209. .long sys_setresgid /* 210 */
  1210. .long sys_getresgid
  1211. .long sys_chown
  1212. .long sys_setuid
  1213. .long sys_setgid
  1214. .long sys_setfsuid /* 215 */
  1215. .long sys_setfsgid
  1216. .long sys_pivot_root
  1217. .long sys_mincore
  1218. .long sys_madvise
  1219. .long sys_getdents64 /* 220 */
  1220. .long sys_fcntl64
  1221. .long sys_ni_syscall /* reserved for TUX */
  1222. .long sys_ni_syscall /* Reserved for Security */
  1223. .long sys_gettid
  1224. .long sys_readahead /* 225 */
  1225. .long sys_setxattr
  1226. .long sys_lsetxattr
  1227. .long sys_fsetxattr
  1228. .long sys_getxattr
  1229. .long sys_lgetxattr /* 230 */
  1230. .long sys_fgetxattr
  1231. .long sys_listxattr
  1232. .long sys_llistxattr
  1233. .long sys_flistxattr
  1234. .long sys_removexattr /* 235 */
  1235. .long sys_lremovexattr
  1236. .long sys_fremovexattr
  1237. .long sys_tkill
  1238. .long sys_sendfile64
  1239. .long sys_futex /* 240 */
  1240. .long sys_sched_setaffinity
  1241. .long sys_sched_getaffinity
  1242. .long sys_ni_syscall //sys_set_thread_area
  1243. .long sys_ni_syscall //sys_get_thread_area
  1244. .long sys_io_setup /* 245 */
  1245. .long sys_io_destroy
  1246. .long sys_io_getevents
  1247. .long sys_io_submit
  1248. .long sys_io_cancel
  1249. .long sys_fadvise64 /* 250 */
  1250. .long sys_ni_syscall
  1251. .long sys_exit_group
  1252. .long sys_lookup_dcookie
  1253. .long sys_epoll_create
  1254. .long sys_epoll_ctl /* 255 */
  1255. .long sys_epoll_wait
  1256. .long sys_remap_file_pages
  1257. .long sys_set_tid_address
  1258. .long sys_timer_create
  1259. .long sys_timer_settime /* 260 */
  1260. .long sys_timer_gettime
  1261. .long sys_timer_getoverrun
  1262. .long sys_timer_delete
  1263. .long sys_clock_settime
  1264. .long sys_clock_gettime /* 265 */
  1265. .long sys_clock_getres
  1266. .long sys_clock_nanosleep
  1267. .long sys_statfs64
  1268. .long sys_fstatfs64
  1269. .long sys_tgkill /* 270 */
  1270. .long sys_utimes
  1271. .long sys_fadvise64_64
  1272. .long sys_ni_syscall /* sys_vserver */
  1273. .long sys_mbind
  1274. .long sys_get_mempolicy
  1275. .long sys_set_mempolicy
  1276. .long sys_mq_open
  1277. .long sys_mq_unlink
  1278. .long sys_mq_timedsend
  1279. .long sys_mq_timedreceive /* 280 */
  1280. .long sys_mq_notify
  1281. .long sys_mq_getsetattr
  1282. .long sys_ni_syscall /* reserved for kexec */
  1283. .long sys_waitid
  1284. .long sys_ni_syscall /* 285 */ /* available */
  1285. .long sys_add_key
  1286. .long sys_request_key
  1287. .long sys_keyctl
  1288. .long sys_ioprio_set
  1289. .long sys_ioprio_get /* 290 */
  1290. .long sys_inotify_init
  1291. .long sys_inotify_add_watch
  1292. .long sys_inotify_rm_watch
  1293. .long sys_migrate_pages
  1294. .long sys_openat /* 295 */
  1295. .long sys_mkdirat
  1296. .long sys_mknodat
  1297. .long sys_fchownat
  1298. .long sys_futimesat
  1299. .long sys_fstatat64 /* 300 */
  1300. .long sys_unlinkat
  1301. .long sys_renameat
  1302. .long sys_linkat
  1303. .long sys_symlinkat
  1304. .long sys_readlinkat /* 305 */
  1305. .long sys_fchmodat
  1306. .long sys_faccessat
  1307. .long sys_pselect6
  1308. .long sys_ppoll
  1309. syscall_table_size = (. - sys_call_table)