entry.S 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488
  1. /* entry.S: FR-V entry
  2. *
  3. * Copyright (C) 2003 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. *
  12. * Entry to the kernel is "interesting":
  13. * (1) There are no stack pointers, not even for the kernel
  14. * (2) General Registers should not be clobbered
  15. * (3) There are no kernel-only data registers
  16. * (4) Since all addressing modes are wrt to a General Register, no global
  17. * variables can be reached
  18. *
  19. * We deal with this by declaring that we shall kill GR28 on entering the
  20. * kernel from userspace
  21. *
  22. * However, since break interrupts can interrupt the CPU even when PSR.ET==0,
  23. * they can't rely on GR28 to be anything useful, and so need to clobber a
  24. * separate register (GR31). Break interrupts are managed in break.S
  25. *
  26. * GR29 _is_ saved, and holds the current task pointer globally
  27. *
  28. */
  29. #include <linux/sys.h>
  30. #include <linux/linkage.h>
  31. #include <asm/thread_info.h>
  32. #include <asm/setup.h>
  33. #include <asm/segment.h>
  34. #include <asm/ptrace.h>
  35. #include <asm/errno.h>
  36. #include <asm/cache.h>
  37. #include <asm/spr-regs.h>
  38. #define nr_syscalls ((syscall_table_size)/4)
  39. .text
  40. .balign 4
  41. .macro LEDS val
  42. # sethi.p %hi(0xe1200004),gr30
  43. # setlo %lo(0xe1200004),gr30
  44. # setlos #~\val,gr31
  45. # st gr31,@(gr30,gr0)
  46. # sethi.p %hi(0xffc00100),gr30
  47. # setlo %lo(0xffc00100),gr30
  48. # sth gr0,@(gr30,gr0)
  49. # membar
  50. .endm
  51. .macro LEDS32
  52. # not gr31,gr31
  53. # sethi.p %hi(0xe1200004),gr30
  54. # setlo %lo(0xe1200004),gr30
  55. # st.p gr31,@(gr30,gr0)
  56. # srli gr31,#16,gr31
  57. # sethi.p %hi(0xffc00100),gr30
  58. # setlo %lo(0xffc00100),gr30
  59. # sth gr31,@(gr30,gr0)
  60. # membar
  61. .endm
  62. ###############################################################################
  63. #
  64. # entry point for External interrupts received whilst executing userspace code
  65. #
  66. ###############################################################################
  67. .globl __entry_uspace_external_interrupt
  68. .type __entry_uspace_external_interrupt,@function
  69. __entry_uspace_external_interrupt:
  70. LEDS 0x6200
  71. sethi.p %hi(__kernel_frame0_ptr),gr28
  72. setlo %lo(__kernel_frame0_ptr),gr28
  73. ldi @(gr28,#0),gr28
  74. # handle h/w single-step through exceptions
  75. sti gr0,@(gr28,#REG__STATUS)
  76. .globl __entry_uspace_external_interrupt_reentry
  77. __entry_uspace_external_interrupt_reentry:
  78. LEDS 0x6201
  79. setlos #REG__END,gr30
  80. dcpl gr28,gr30,#0
  81. # finish building the exception frame
  82. sti sp, @(gr28,#REG_SP)
  83. stdi gr2, @(gr28,#REG_GR(2))
  84. stdi gr4, @(gr28,#REG_GR(4))
  85. stdi gr6, @(gr28,#REG_GR(6))
  86. stdi gr8, @(gr28,#REG_GR(8))
  87. stdi gr10,@(gr28,#REG_GR(10))
  88. stdi gr12,@(gr28,#REG_GR(12))
  89. stdi gr14,@(gr28,#REG_GR(14))
  90. stdi gr16,@(gr28,#REG_GR(16))
  91. stdi gr18,@(gr28,#REG_GR(18))
  92. stdi gr20,@(gr28,#REG_GR(20))
  93. stdi gr22,@(gr28,#REG_GR(22))
  94. stdi gr24,@(gr28,#REG_GR(24))
  95. stdi gr26,@(gr28,#REG_GR(26))
  96. sti gr0, @(gr28,#REG_GR(28))
  97. sti gr29,@(gr28,#REG_GR(29))
  98. stdi.p gr30,@(gr28,#REG_GR(30))
  99. # set up the kernel stack pointer
  100. ori gr28,0,sp
  101. movsg tbr ,gr20
  102. movsg psr ,gr22
  103. movsg pcsr,gr21
  104. movsg isr ,gr23
  105. movsg ccr ,gr24
  106. movsg cccr,gr25
  107. movsg lr ,gr26
  108. movsg lcr ,gr27
  109. setlos.p #-1,gr4
  110. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  111. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  112. slli gr5,#1,gr5
  113. or gr6,gr5,gr5
  114. andi gr5,#~PSR_ET,gr5
  115. sti gr20,@(gr28,#REG_TBR)
  116. sti gr21,@(gr28,#REG_PC)
  117. sti gr5 ,@(gr28,#REG_PSR)
  118. sti gr23,@(gr28,#REG_ISR)
  119. stdi gr24,@(gr28,#REG_CCR)
  120. stdi gr26,@(gr28,#REG_LR)
  121. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  122. movsg iacc0h,gr4
  123. movsg iacc0l,gr5
  124. stdi gr4,@(gr28,#REG_IACC0)
  125. movsg gner0,gr4
  126. movsg gner1,gr5
  127. stdi.p gr4,@(gr28,#REG_GNER0)
  128. # interrupts start off fully disabled in the interrupt handler
  129. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  130. # set up kernel global registers
  131. sethi.p %hi(__kernel_current_task),gr5
  132. setlo %lo(__kernel_current_task),gr5
  133. sethi.p %hi(_gp),gr16
  134. setlo %lo(_gp),gr16
  135. ldi @(gr5,#0),gr29
  136. ldi.p @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  137. # make sure we (the kernel) get div-zero and misalignment exceptions
  138. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  139. movgs gr5,isr
  140. # switch to the kernel trap table
  141. sethi.p %hi(__entry_kerneltrap_table),gr6
  142. setlo %lo(__entry_kerneltrap_table),gr6
  143. movgs gr6,tbr
  144. # set the return address
  145. sethi.p %hi(__entry_return_from_user_interrupt),gr4
  146. setlo %lo(__entry_return_from_user_interrupt),gr4
  147. movgs gr4,lr
  148. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  149. movsg psr,gr4
  150. ori gr4,#PSR_PIL_14,gr4
  151. movgs gr4,psr
  152. ori gr4,#PSR_PIL_14|PSR_ET,gr4
  153. movgs gr4,psr
  154. LEDS 0x6202
  155. bra do_IRQ
  156. .size __entry_uspace_external_interrupt,.-__entry_uspace_external_interrupt
  157. ###############################################################################
  158. #
  159. # entry point for External interrupts received whilst executing kernel code
  160. # - on arriving here, the following registers should already be set up:
  161. # GR15 - current thread_info struct pointer
  162. # GR16 - kernel GP-REL pointer
  163. # GR29 - current task struct pointer
  164. # TBR - kernel trap vector table
  165. # ISR - kernel's preferred integer controls
  166. #
  167. ###############################################################################
  168. .globl __entry_kernel_external_interrupt
  169. .type __entry_kernel_external_interrupt,@function
  170. __entry_kernel_external_interrupt:
  171. LEDS 0x6210
  172. // sub sp,gr15,gr31
  173. // LEDS32
  174. # set up the stack pointer
  175. or.p sp,gr0,gr30
  176. subi sp,#REG__END,sp
  177. sti gr30,@(sp,#REG_SP)
  178. # handle h/w single-step through exceptions
  179. sti gr0,@(sp,#REG__STATUS)
  180. .globl __entry_kernel_external_interrupt_reentry
  181. __entry_kernel_external_interrupt_reentry:
  182. LEDS 0x6211
  183. # set up the exception frame
  184. setlos #REG__END,gr30
  185. dcpl sp,gr30,#0
  186. sti.p gr28,@(sp,#REG_GR(28))
  187. ori sp,0,gr28
  188. # finish building the exception frame
  189. stdi gr2,@(gr28,#REG_GR(2))
  190. stdi gr4,@(gr28,#REG_GR(4))
  191. stdi gr6,@(gr28,#REG_GR(6))
  192. stdi gr8,@(gr28,#REG_GR(8))
  193. stdi gr10,@(gr28,#REG_GR(10))
  194. stdi gr12,@(gr28,#REG_GR(12))
  195. stdi gr14,@(gr28,#REG_GR(14))
  196. stdi gr16,@(gr28,#REG_GR(16))
  197. stdi gr18,@(gr28,#REG_GR(18))
  198. stdi gr20,@(gr28,#REG_GR(20))
  199. stdi gr22,@(gr28,#REG_GR(22))
  200. stdi gr24,@(gr28,#REG_GR(24))
  201. stdi gr26,@(gr28,#REG_GR(26))
  202. sti gr29,@(gr28,#REG_GR(29))
  203. stdi.p gr30,@(gr28,#REG_GR(30))
  204. # note virtual interrupts will be fully enabled upon return
  205. subicc gr0,#1,gr0,icc2 /* clear Z, set C */
  206. movsg tbr ,gr20
  207. movsg psr ,gr22
  208. movsg pcsr,gr21
  209. movsg isr ,gr23
  210. movsg ccr ,gr24
  211. movsg cccr,gr25
  212. movsg lr ,gr26
  213. movsg lcr ,gr27
  214. setlos.p #-1,gr4
  215. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  216. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  217. slli gr5,#1,gr5
  218. or gr6,gr5,gr5
  219. andi.p gr5,#~PSR_ET,gr5
  220. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  221. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  222. andi gr25,#~0xc0,gr25
  223. sti gr20,@(gr28,#REG_TBR)
  224. sti gr21,@(gr28,#REG_PC)
  225. sti gr5 ,@(gr28,#REG_PSR)
  226. sti gr23,@(gr28,#REG_ISR)
  227. stdi gr24,@(gr28,#REG_CCR)
  228. stdi gr26,@(gr28,#REG_LR)
  229. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  230. movsg iacc0h,gr4
  231. movsg iacc0l,gr5
  232. stdi gr4,@(gr28,#REG_IACC0)
  233. movsg gner0,gr4
  234. movsg gner1,gr5
  235. stdi.p gr4,@(gr28,#REG_GNER0)
  236. # interrupts start off fully disabled in the interrupt handler
  237. subcc gr0,gr0,gr0,icc2 /* set Z and clear C */
  238. # set the return address
  239. sethi.p %hi(__entry_return_from_kernel_interrupt),gr4
  240. setlo %lo(__entry_return_from_kernel_interrupt),gr4
  241. movgs gr4,lr
  242. # clear power-saving mode flags
  243. movsg hsr0,gr4
  244. andi gr4,#~HSR0_PDM,gr4
  245. movgs gr4,hsr0
  246. # raise the minimum interrupt priority to 15 (NMI only) and enable exceptions
  247. movsg psr,gr4
  248. ori gr4,#PSR_PIL_14,gr4
  249. movgs gr4,psr
  250. ori gr4,#PSR_ET,gr4
  251. movgs gr4,psr
  252. LEDS 0x6212
  253. bra do_IRQ
  254. .size __entry_kernel_external_interrupt,.-__entry_kernel_external_interrupt
  255. ###############################################################################
  256. #
  257. # deal with interrupts that were actually virtually disabled
  258. # - we need to really disable them, flag the fact and return immediately
  259. # - if you change this, you must alter break.S also
  260. #
  261. ###############################################################################
  262. .balign L1_CACHE_BYTES
  263. .globl __entry_kernel_external_interrupt_virtually_disabled
  264. .type __entry_kernel_external_interrupt_virtually_disabled,@function
  265. __entry_kernel_external_interrupt_virtually_disabled:
  266. movsg psr,gr30
  267. andi gr30,#~PSR_PIL,gr30
  268. ori gr30,#PSR_PIL_14,gr30 ; debugging interrupts only
  269. movgs gr30,psr
  270. subcc gr0,gr0,gr0,icc2 ; leave Z set, clear C
  271. rett #0
  272. .size __entry_kernel_external_interrupt_virtually_disabled,.-__entry_kernel_external_interrupt_virtually_disabled
  273. ###############################################################################
  274. #
  275. # deal with re-enablement of interrupts that were pending when virtually re-enabled
  276. # - set ICC2.C, re-enable the real interrupts and return
  277. # - we can clear ICC2.Z because we shouldn't be here if it's not 0 [due to TIHI]
  278. # - if you change this, you must alter break.S also
  279. #
  280. ###############################################################################
  281. .balign L1_CACHE_BYTES
  282. .globl __entry_kernel_external_interrupt_virtual_reenable
  283. .type __entry_kernel_external_interrupt_virtual_reenable,@function
  284. __entry_kernel_external_interrupt_virtual_reenable:
  285. movsg psr,gr30
  286. andi gr30,#~PSR_PIL,gr30 ; re-enable interrupts
  287. movgs gr30,psr
  288. subicc gr0,#1,gr0,icc2 ; clear Z, set C
  289. rett #0
  290. .size __entry_kernel_external_interrupt_virtual_reenable,.-__entry_kernel_external_interrupt_virtual_reenable
  291. ###############################################################################
  292. #
  293. # entry point for Software and Progam interrupts generated whilst executing userspace code
  294. #
  295. ###############################################################################
  296. .globl __entry_uspace_softprog_interrupt
  297. .type __entry_uspace_softprog_interrupt,@function
  298. .globl __entry_uspace_handle_mmu_fault
  299. __entry_uspace_softprog_interrupt:
  300. LEDS 0x6000
  301. #ifdef CONFIG_MMU
  302. movsg ear0,gr28
  303. __entry_uspace_handle_mmu_fault:
  304. movgs gr28,scr2
  305. #endif
  306. sethi.p %hi(__kernel_frame0_ptr),gr28
  307. setlo %lo(__kernel_frame0_ptr),gr28
  308. ldi @(gr28,#0),gr28
  309. # handle h/w single-step through exceptions
  310. sti gr0,@(gr28,#REG__STATUS)
  311. .globl __entry_uspace_softprog_interrupt_reentry
  312. __entry_uspace_softprog_interrupt_reentry:
  313. LEDS 0x6001
  314. setlos #REG__END,gr30
  315. dcpl gr28,gr30,#0
  316. # set up the kernel stack pointer
  317. sti.p sp,@(gr28,#REG_SP)
  318. ori gr28,0,sp
  319. sti gr0,@(gr28,#REG_GR(28))
  320. stdi gr20,@(gr28,#REG_GR(20))
  321. stdi gr22,@(gr28,#REG_GR(22))
  322. movsg tbr,gr20
  323. movsg pcsr,gr21
  324. movsg psr,gr22
  325. sethi.p %hi(__entry_return_from_user_exception),gr23
  326. setlo %lo(__entry_return_from_user_exception),gr23
  327. bra __entry_common
  328. .size __entry_uspace_softprog_interrupt,.-__entry_uspace_softprog_interrupt
  329. # single-stepping was disabled on entry to a TLB handler that then faulted
  330. #ifdef CONFIG_MMU
  331. .globl __entry_uspace_handle_mmu_fault_sstep
  332. __entry_uspace_handle_mmu_fault_sstep:
  333. movgs gr28,scr2
  334. sethi.p %hi(__kernel_frame0_ptr),gr28
  335. setlo %lo(__kernel_frame0_ptr),gr28
  336. ldi @(gr28,#0),gr28
  337. # flag single-step re-enablement
  338. sti gr0,@(gr28,#REG__STATUS)
  339. bra __entry_uspace_softprog_interrupt_reentry
  340. #endif
  341. ###############################################################################
  342. #
  343. # entry point for Software and Progam interrupts generated whilst executing kernel code
  344. #
  345. ###############################################################################
  346. .globl __entry_kernel_softprog_interrupt
  347. .type __entry_kernel_softprog_interrupt,@function
  348. __entry_kernel_softprog_interrupt:
  349. LEDS 0x6004
  350. #ifdef CONFIG_MMU
  351. movsg ear0,gr30
  352. movgs gr30,scr2
  353. #endif
  354. .globl __entry_kernel_handle_mmu_fault
  355. __entry_kernel_handle_mmu_fault:
  356. # set up the stack pointer
  357. subi sp,#REG__END,sp
  358. sti sp,@(sp,#REG_SP)
  359. sti sp,@(sp,#REG_SP-4)
  360. andi sp,#~7,sp
  361. # handle h/w single-step through exceptions
  362. sti gr0,@(sp,#REG__STATUS)
  363. .globl __entry_kernel_softprog_interrupt_reentry
  364. __entry_kernel_softprog_interrupt_reentry:
  365. LEDS 0x6005
  366. setlos #REG__END,gr30
  367. dcpl sp,gr30,#0
  368. # set up the exception frame
  369. sti.p gr28,@(sp,#REG_GR(28))
  370. ori sp,0,gr28
  371. stdi gr20,@(gr28,#REG_GR(20))
  372. stdi gr22,@(gr28,#REG_GR(22))
  373. ldi @(sp,#REG_SP),gr22 /* reconstruct the old SP */
  374. addi gr22,#REG__END,gr22
  375. sti gr22,@(sp,#REG_SP)
  376. # set CCCR.CC3 to Undefined to abort atomic-modify completion inside the kernel
  377. # - for an explanation of how it works, see: Documentation/fujitsu/frv/atomic-ops.txt
  378. movsg cccr,gr20
  379. andi gr20,#~0xc0,gr20
  380. movgs gr20,cccr
  381. movsg tbr,gr20
  382. movsg pcsr,gr21
  383. movsg psr,gr22
  384. sethi.p %hi(__entry_return_from_kernel_exception),gr23
  385. setlo %lo(__entry_return_from_kernel_exception),gr23
  386. bra __entry_common
  387. .size __entry_kernel_softprog_interrupt,.-__entry_kernel_softprog_interrupt
  388. # single-stepping was disabled on entry to a TLB handler that then faulted
  389. #ifdef CONFIG_MMU
  390. .globl __entry_kernel_handle_mmu_fault_sstep
  391. __entry_kernel_handle_mmu_fault_sstep:
  392. # set up the stack pointer
  393. subi sp,#REG__END,sp
  394. sti sp,@(sp,#REG_SP)
  395. sti sp,@(sp,#REG_SP-4)
  396. andi sp,#~7,sp
  397. # flag single-step re-enablement
  398. sethi #REG__STATUS_STEP,gr30
  399. sti gr30,@(sp,#REG__STATUS)
  400. bra __entry_kernel_softprog_interrupt_reentry
  401. #endif
  402. ###############################################################################
  403. #
  404. # the rest of the kernel entry point code
  405. # - on arriving here, the following registers should be set up:
  406. # GR1 - kernel stack pointer
  407. # GR7 - syscall number (trap 0 only)
  408. # GR8-13 - syscall args (trap 0 only)
  409. # GR20 - saved TBR
  410. # GR21 - saved PC
  411. # GR22 - saved PSR
  412. # GR23 - return handler address
  413. # GR28 - exception frame on stack
  414. # SCR2 - saved EAR0 where applicable (clobbered by ICI & ICEF insns on FR451)
  415. # PSR - PSR.S 1, PSR.ET 0
  416. #
  417. ###############################################################################
  418. .globl __entry_common
  419. .type __entry_common,@function
  420. __entry_common:
  421. LEDS 0x6008
  422. # finish building the exception frame
  423. stdi gr2,@(gr28,#REG_GR(2))
  424. stdi gr4,@(gr28,#REG_GR(4))
  425. stdi gr6,@(gr28,#REG_GR(6))
  426. stdi gr8,@(gr28,#REG_GR(8))
  427. stdi gr10,@(gr28,#REG_GR(10))
  428. stdi gr12,@(gr28,#REG_GR(12))
  429. stdi gr14,@(gr28,#REG_GR(14))
  430. stdi gr16,@(gr28,#REG_GR(16))
  431. stdi gr18,@(gr28,#REG_GR(18))
  432. stdi gr24,@(gr28,#REG_GR(24))
  433. stdi gr26,@(gr28,#REG_GR(26))
  434. sti gr29,@(gr28,#REG_GR(29))
  435. stdi gr30,@(gr28,#REG_GR(30))
  436. movsg lcr ,gr27
  437. movsg lr ,gr26
  438. movgs gr23,lr
  439. movsg cccr,gr25
  440. movsg ccr ,gr24
  441. movsg isr ,gr23
  442. setlos.p #-1,gr4
  443. andi gr22,#PSR_PS,gr5 /* try to rebuild original PSR value */
  444. andi.p gr22,#~(PSR_PS|PSR_S),gr6
  445. slli gr5,#1,gr5
  446. or gr6,gr5,gr5
  447. andi gr5,#~PSR_ET,gr5
  448. sti gr20,@(gr28,#REG_TBR)
  449. sti gr21,@(gr28,#REG_PC)
  450. sti gr5 ,@(gr28,#REG_PSR)
  451. sti gr23,@(gr28,#REG_ISR)
  452. stdi gr24,@(gr28,#REG_CCR)
  453. stdi gr26,@(gr28,#REG_LR)
  454. sti gr4 ,@(gr28,#REG_SYSCALLNO)
  455. movsg iacc0h,gr4
  456. movsg iacc0l,gr5
  457. stdi gr4,@(gr28,#REG_IACC0)
  458. movsg gner0,gr4
  459. movsg gner1,gr5
  460. stdi.p gr4,@(gr28,#REG_GNER0)
  461. # set up virtual interrupt disablement
  462. subicc gr0,#1,gr0,icc2 /* clear Z flag, set C flag */
  463. # set up kernel global registers
  464. sethi.p %hi(__kernel_current_task),gr5
  465. setlo %lo(__kernel_current_task),gr5
  466. sethi.p %hi(_gp),gr16
  467. setlo %lo(_gp),gr16
  468. ldi @(gr5,#0),gr29
  469. ldi @(gr29,#4),gr15 ; __current_thread_info = current->thread_info
  470. # switch to the kernel trap table
  471. sethi.p %hi(__entry_kerneltrap_table),gr6
  472. setlo %lo(__entry_kerneltrap_table),gr6
  473. movgs gr6,tbr
  474. # make sure we (the kernel) get div-zero and misalignment exceptions
  475. setlos #ISR_EDE|ISR_DTT_DIVBYZERO|ISR_EMAM_EXCEPTION,gr5
  476. movgs gr5,isr
  477. # clear power-saving mode flags
  478. movsg hsr0,gr4
  479. andi gr4,#~HSR0_PDM,gr4
  480. movgs gr4,hsr0
  481. # multiplex again using old TBR as a guide
  482. setlos.p #TBR_TT,gr3
  483. sethi %hi(__entry_vector_table),gr6
  484. and.p gr20,gr3,gr5
  485. setlo %lo(__entry_vector_table),gr6
  486. srli gr5,#2,gr5
  487. ld @(gr5,gr6),gr5
  488. LEDS 0x6009
  489. jmpl @(gr5,gr0)
  490. .size __entry_common,.-__entry_common
  491. ###############################################################################
  492. #
  493. # handle instruction MMU fault
  494. #
  495. ###############################################################################
  496. #ifdef CONFIG_MMU
  497. .globl __entry_insn_mmu_fault
  498. __entry_insn_mmu_fault:
  499. LEDS 0x6010
  500. setlos #0,gr8
  501. movsg esr0,gr9
  502. movsg scr2,gr10
  503. # now that we've accessed the exception regs, we can enable exceptions
  504. movsg psr,gr4
  505. ori gr4,#PSR_ET,gr4
  506. movgs gr4,psr
  507. sethi.p %hi(do_page_fault),gr5
  508. setlo %lo(do_page_fault),gr5
  509. jmpl @(gr5,gr0) ; call do_page_fault(0,esr0,ear0)
  510. #endif
  511. ###############################################################################
  512. #
  513. # handle instruction access error
  514. #
  515. ###############################################################################
  516. .globl __entry_insn_access_error
  517. __entry_insn_access_error:
  518. LEDS 0x6011
  519. sethi.p %hi(insn_access_error),gr5
  520. setlo %lo(insn_access_error),gr5
  521. movsg esfr1,gr8
  522. movsg epcr0,gr9
  523. movsg esr0,gr10
  524. # now that we've accessed the exception regs, we can enable exceptions
  525. movsg psr,gr4
  526. ori gr4,#PSR_ET,gr4
  527. movgs gr4,psr
  528. jmpl @(gr5,gr0) ; call insn_access_error(esfr1,epcr0,esr0)
  529. ###############################################################################
  530. #
  531. # handle various instructions of dubious legality
  532. #
  533. ###############################################################################
  534. .globl __entry_unsupported_trap
  535. .globl __entry_illegal_instruction
  536. .globl __entry_privileged_instruction
  537. .globl __entry_debug_exception
  538. __entry_unsupported_trap:
  539. subi gr21,#4,gr21
  540. sti gr21,@(gr28,#REG_PC)
  541. __entry_illegal_instruction:
  542. __entry_privileged_instruction:
  543. __entry_debug_exception:
  544. LEDS 0x6012
  545. sethi.p %hi(illegal_instruction),gr5
  546. setlo %lo(illegal_instruction),gr5
  547. movsg esfr1,gr8
  548. movsg epcr0,gr9
  549. movsg esr0,gr10
  550. # now that we've accessed the exception regs, we can enable exceptions
  551. movsg psr,gr4
  552. ori gr4,#PSR_ET,gr4
  553. movgs gr4,psr
  554. jmpl @(gr5,gr0) ; call ill_insn(esfr1,epcr0,esr0)
  555. ###############################################################################
  556. #
  557. # handle media exception
  558. #
  559. ###############################################################################
  560. .globl __entry_media_exception
  561. __entry_media_exception:
  562. LEDS 0x6013
  563. sethi.p %hi(media_exception),gr5
  564. setlo %lo(media_exception),gr5
  565. movsg msr0,gr8
  566. movsg msr1,gr9
  567. # now that we've accessed the exception regs, we can enable exceptions
  568. movsg psr,gr4
  569. ori gr4,#PSR_ET,gr4
  570. movgs gr4,psr
  571. jmpl @(gr5,gr0) ; call media_excep(msr0,msr1)
  572. ###############################################################################
  573. #
  574. # handle data MMU fault
  575. # handle data DAT fault (write-protect exception)
  576. #
  577. ###############################################################################
  578. #ifdef CONFIG_MMU
  579. .globl __entry_data_mmu_fault
  580. __entry_data_mmu_fault:
  581. .globl __entry_data_dat_fault
  582. __entry_data_dat_fault:
  583. LEDS 0x6014
  584. setlos #1,gr8
  585. movsg esr0,gr9
  586. movsg scr2,gr10 ; saved EAR0
  587. # now that we've accessed the exception regs, we can enable exceptions
  588. movsg psr,gr4
  589. ori gr4,#PSR_ET,gr4
  590. movgs gr4,psr
  591. sethi.p %hi(do_page_fault),gr5
  592. setlo %lo(do_page_fault),gr5
  593. jmpl @(gr5,gr0) ; call do_page_fault(1,esr0,ear0)
  594. #endif
  595. ###############################################################################
  596. #
  597. # handle data and instruction access exceptions
  598. #
  599. ###############################################################################
  600. .globl __entry_insn_access_exception
  601. .globl __entry_data_access_exception
  602. __entry_insn_access_exception:
  603. __entry_data_access_exception:
  604. LEDS 0x6016
  605. sethi.p %hi(memory_access_exception),gr5
  606. setlo %lo(memory_access_exception),gr5
  607. movsg esr0,gr8
  608. movsg scr2,gr9 ; saved EAR0
  609. movsg epcr0,gr10
  610. # now that we've accessed the exception regs, we can enable exceptions
  611. movsg psr,gr4
  612. ori gr4,#PSR_ET,gr4
  613. movgs gr4,psr
  614. jmpl @(gr5,gr0) ; call memory_access_error(esr0,ear0,epcr0)
  615. ###############################################################################
  616. #
  617. # handle data access error
  618. #
  619. ###############################################################################
  620. .globl __entry_data_access_error
  621. __entry_data_access_error:
  622. LEDS 0x6016
  623. sethi.p %hi(data_access_error),gr5
  624. setlo %lo(data_access_error),gr5
  625. movsg esfr1,gr8
  626. movsg esr15,gr9
  627. movsg ear15,gr10
  628. # now that we've accessed the exception regs, we can enable exceptions
  629. movsg psr,gr4
  630. ori gr4,#PSR_ET,gr4
  631. movgs gr4,psr
  632. jmpl @(gr5,gr0) ; call data_access_error(esfr1,esr15,ear15)
  633. ###############################################################################
  634. #
  635. # handle data store error
  636. #
  637. ###############################################################################
  638. .globl __entry_data_store_error
  639. __entry_data_store_error:
  640. LEDS 0x6017
  641. sethi.p %hi(data_store_error),gr5
  642. setlo %lo(data_store_error),gr5
  643. movsg esfr1,gr8
  644. movsg esr14,gr9
  645. # now that we've accessed the exception regs, we can enable exceptions
  646. movsg psr,gr4
  647. ori gr4,#PSR_ET,gr4
  648. movgs gr4,psr
  649. jmpl @(gr5,gr0) ; call data_store_error(esfr1,esr14)
  650. ###############################################################################
  651. #
  652. # handle division exception
  653. #
  654. ###############################################################################
  655. .globl __entry_division_exception
  656. __entry_division_exception:
  657. LEDS 0x6018
  658. sethi.p %hi(division_exception),gr5
  659. setlo %lo(division_exception),gr5
  660. movsg esfr1,gr8
  661. movsg esr0,gr9
  662. movsg isr,gr10
  663. # now that we've accessed the exception regs, we can enable exceptions
  664. movsg psr,gr4
  665. ori gr4,#PSR_ET,gr4
  666. movgs gr4,psr
  667. jmpl @(gr5,gr0) ; call div_excep(esfr1,esr0,isr)
  668. ###############################################################################
  669. #
  670. # handle compound exception
  671. #
  672. ###############################################################################
  673. .globl __entry_compound_exception
  674. __entry_compound_exception:
  675. LEDS 0x6019
  676. sethi.p %hi(compound_exception),gr5
  677. setlo %lo(compound_exception),gr5
  678. movsg esfr1,gr8
  679. movsg esr0,gr9
  680. movsg esr14,gr10
  681. movsg esr15,gr11
  682. movsg msr0,gr12
  683. movsg msr1,gr13
  684. # now that we've accessed the exception regs, we can enable exceptions
  685. movsg psr,gr4
  686. ori gr4,#PSR_ET,gr4
  687. movgs gr4,psr
  688. jmpl @(gr5,gr0) ; call comp_excep(esfr1,esr0,esr14,esr15,msr0,msr1)
  689. ###############################################################################
  690. #
  691. # handle interrupts and NMIs
  692. #
  693. ###############################################################################
  694. .globl __entry_do_IRQ
  695. __entry_do_IRQ:
  696. LEDS 0x6020
  697. # we can enable exceptions
  698. movsg psr,gr4
  699. ori gr4,#PSR_ET,gr4
  700. movgs gr4,psr
  701. bra do_IRQ
  702. .globl __entry_do_NMI
  703. __entry_do_NMI:
  704. LEDS 0x6021
  705. # we can enable exceptions
  706. movsg psr,gr4
  707. ori gr4,#PSR_ET,gr4
  708. movgs gr4,psr
  709. bra do_NMI
  710. ###############################################################################
  711. #
  712. # the return path for a newly forked child process
  713. # - __switch_to() saved the old current pointer in GR8 for us
  714. #
  715. ###############################################################################
  716. .globl ret_from_fork
  717. ret_from_fork:
  718. LEDS 0x6100
  719. call schedule_tail
  720. # fork & co. return 0 to child
  721. setlos.p #0,gr8
  722. bra __syscall_exit
  723. ###################################################################################################
  724. #
  725. # Return to user mode is not as complex as all this looks,
  726. # but we want the default path for a system call return to
  727. # go as quickly as possible which is why some of this is
  728. # less clear than it otherwise should be.
  729. #
  730. ###################################################################################################
  731. .balign L1_CACHE_BYTES
  732. .globl system_call
  733. system_call:
  734. LEDS 0x6101
  735. movsg psr,gr4 ; enable exceptions
  736. ori gr4,#PSR_ET,gr4
  737. movgs gr4,psr
  738. sti gr7,@(gr28,#REG_SYSCALLNO)
  739. sti.p gr8,@(gr28,#REG_ORIG_GR8)
  740. subicc gr7,#nr_syscalls,gr0,icc0
  741. bnc icc0,#0,__syscall_badsys
  742. ldi @(gr15,#TI_FLAGS),gr4
  743. ori gr4,#_TIF_SYSCALL_TRACE,gr4
  744. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  745. bne icc0,#0,__syscall_trace_entry
  746. __syscall_call:
  747. slli.p gr7,#2,gr7
  748. sethi %hi(sys_call_table),gr5
  749. setlo %lo(sys_call_table),gr5
  750. ld @(gr5,gr7),gr4
  751. calll @(gr4,gr0)
  752. ###############################################################################
  753. #
  754. # return to interrupted process
  755. #
  756. ###############################################################################
  757. __syscall_exit:
  758. LEDS 0x6300
  759. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  760. # rebuild saved psr - execve will change it for init/main.c
  761. ldi @(gr28,#REG_PSR),gr22
  762. srli gr22,#1,gr5
  763. andi.p gr22,#~PSR_PS,gr22
  764. andi gr5,#PSR_PS,gr5
  765. or gr5,gr22,gr22
  766. ori gr22,#PSR_S,gr22
  767. # keep current PSR in GR23
  768. movsg psr,gr23
  769. # make sure we don't miss an interrupt setting need_resched or sigpending between
  770. # sampling and the RETT
  771. ori gr23,#PSR_PIL_14,gr23
  772. movgs gr23,psr
  773. ldi @(gr15,#TI_FLAGS),gr4
  774. sethi.p %hi(_TIF_ALLWORK_MASK),gr5
  775. setlo %lo(_TIF_ALLWORK_MASK),gr5
  776. andcc gr4,gr5,gr0,icc0
  777. bne icc0,#0,__syscall_exit_work
  778. # restore all registers and return
  779. __entry_return_direct:
  780. LEDS 0x6301
  781. andi gr22,#~PSR_ET,gr22
  782. movgs gr22,psr
  783. ldi @(gr28,#REG_ISR),gr23
  784. lddi @(gr28,#REG_CCR),gr24
  785. lddi @(gr28,#REG_LR) ,gr26
  786. ldi @(gr28,#REG_PC) ,gr21
  787. ldi @(gr28,#REG_TBR),gr20
  788. movgs gr20,tbr
  789. movgs gr21,pcsr
  790. movgs gr23,isr
  791. movgs gr24,ccr
  792. movgs gr25,cccr
  793. movgs gr26,lr
  794. movgs gr27,lcr
  795. lddi @(gr28,#REG_GNER0),gr4
  796. movgs gr4,gner0
  797. movgs gr5,gner1
  798. lddi @(gr28,#REG_IACC0),gr4
  799. movgs gr4,iacc0h
  800. movgs gr5,iacc0l
  801. lddi @(gr28,#REG_GR(4)) ,gr4
  802. lddi @(gr28,#REG_GR(6)) ,gr6
  803. lddi @(gr28,#REG_GR(8)) ,gr8
  804. lddi @(gr28,#REG_GR(10)),gr10
  805. lddi @(gr28,#REG_GR(12)),gr12
  806. lddi @(gr28,#REG_GR(14)),gr14
  807. lddi @(gr28,#REG_GR(16)),gr16
  808. lddi @(gr28,#REG_GR(18)),gr18
  809. lddi @(gr28,#REG_GR(20)),gr20
  810. lddi @(gr28,#REG_GR(22)),gr22
  811. lddi @(gr28,#REG_GR(24)),gr24
  812. lddi @(gr28,#REG_GR(26)),gr26
  813. ldi @(gr28,#REG_GR(29)),gr29
  814. lddi @(gr28,#REG_GR(30)),gr30
  815. # check to see if a debugging return is required
  816. LEDS 0x67f0
  817. movsg ccr,gr2
  818. ldi @(gr28,#REG__STATUS),gr3
  819. andicc gr3,#REG__STATUS_STEP,gr0,icc0
  820. bne icc0,#0,__entry_return_singlestep
  821. movgs gr2,ccr
  822. ldi @(gr28,#REG_SP) ,sp
  823. lddi @(gr28,#REG_GR(2)) ,gr2
  824. ldi @(gr28,#REG_GR(28)),gr28
  825. LEDS 0x67fe
  826. // movsg pcsr,gr31
  827. // LEDS32
  828. #if 0
  829. # store the current frame in the workram on the FR451
  830. movgs gr28,scr2
  831. sethi.p %hi(0xfe800000),gr28
  832. setlo %lo(0xfe800000),gr28
  833. stdi gr2,@(gr28,#REG_GR(2))
  834. stdi gr4,@(gr28,#REG_GR(4))
  835. stdi gr6,@(gr28,#REG_GR(6))
  836. stdi gr8,@(gr28,#REG_GR(8))
  837. stdi gr10,@(gr28,#REG_GR(10))
  838. stdi gr12,@(gr28,#REG_GR(12))
  839. stdi gr14,@(gr28,#REG_GR(14))
  840. stdi gr16,@(gr28,#REG_GR(16))
  841. stdi gr18,@(gr28,#REG_GR(18))
  842. stdi gr24,@(gr28,#REG_GR(24))
  843. stdi gr26,@(gr28,#REG_GR(26))
  844. sti gr29,@(gr28,#REG_GR(29))
  845. stdi gr30,@(gr28,#REG_GR(30))
  846. movsg tbr ,gr30
  847. sti gr30,@(gr28,#REG_TBR)
  848. movsg pcsr,gr30
  849. sti gr30,@(gr28,#REG_PC)
  850. movsg psr ,gr30
  851. sti gr30,@(gr28,#REG_PSR)
  852. movsg isr ,gr30
  853. sti gr30,@(gr28,#REG_ISR)
  854. movsg ccr ,gr30
  855. movsg cccr,gr31
  856. stdi gr30,@(gr28,#REG_CCR)
  857. movsg lr ,gr30
  858. movsg lcr ,gr31
  859. stdi gr30,@(gr28,#REG_LR)
  860. sti gr0 ,@(gr28,#REG_SYSCALLNO)
  861. movsg scr2,gr28
  862. #endif
  863. rett #0
  864. # return via break.S
  865. __entry_return_singlestep:
  866. movgs gr2,ccr
  867. lddi @(gr28,#REG_GR(2)) ,gr2
  868. ldi @(gr28,#REG_SP) ,sp
  869. ldi @(gr28,#REG_GR(28)),gr28
  870. LEDS 0x67ff
  871. break
  872. .globl __entry_return_singlestep_breaks_here
  873. __entry_return_singlestep_breaks_here:
  874. nop
  875. ###############################################################################
  876. #
  877. # return to a process interrupted in kernel space
  878. # - we need to consider preemption if that is enabled
  879. #
  880. ###############################################################################
  881. .balign L1_CACHE_BYTES
  882. __entry_return_from_kernel_exception:
  883. LEDS 0x6302
  884. movsg psr,gr23
  885. ori gr23,#PSR_PIL_14,gr23
  886. movgs gr23,psr
  887. bra __entry_return_direct
  888. .balign L1_CACHE_BYTES
  889. __entry_return_from_kernel_interrupt:
  890. LEDS 0x6303
  891. movsg psr,gr23
  892. ori gr23,#PSR_PIL_14,gr23
  893. movgs gr23,psr
  894. #ifdef CONFIG_PREEMPT
  895. ldi @(gr15,#TI_PRE_COUNT),gr5
  896. subicc gr5,#0,gr0,icc0
  897. beq icc0,#0,__entry_return_direct
  898. __entry_preempt_need_resched:
  899. ldi @(gr15,#TI_FLAGS),gr4
  900. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  901. beq icc0,#1,__entry_return_direct
  902. setlos #PREEMPT_ACTIVE,gr5
  903. sti gr5,@(gr15,#TI_FLAGS)
  904. andi gr23,#~PSR_PIL,gr23
  905. movgs gr23,psr
  906. call schedule
  907. sti gr0,@(gr15,#TI_PRE_COUNT)
  908. movsg psr,gr23
  909. ori gr23,#PSR_PIL_14,gr23
  910. movgs gr23,psr
  911. bra __entry_preempt_need_resched
  912. #else
  913. bra __entry_return_direct
  914. #endif
  915. ###############################################################################
  916. #
  917. # perform work that needs to be done immediately before resumption
  918. #
  919. ###############################################################################
  920. .globl __entry_return_from_user_exception
  921. .balign L1_CACHE_BYTES
  922. __entry_return_from_user_exception:
  923. LEDS 0x6501
  924. __entry_resume_userspace:
  925. # make sure we don't miss an interrupt setting need_resched or sigpending between
  926. # sampling and the RETT
  927. movsg psr,gr23
  928. ori gr23,#PSR_PIL_14,gr23
  929. movgs gr23,psr
  930. __entry_return_from_user_interrupt:
  931. LEDS 0x6402
  932. ldi @(gr15,#TI_FLAGS),gr4
  933. sethi.p %hi(_TIF_WORK_MASK),gr5
  934. setlo %lo(_TIF_WORK_MASK),gr5
  935. andcc gr4,gr5,gr0,icc0
  936. beq icc0,#1,__entry_return_direct
  937. __entry_work_pending:
  938. LEDS 0x6404
  939. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  940. beq icc0,#1,__entry_work_notifysig
  941. __entry_work_resched:
  942. LEDS 0x6408
  943. movsg psr,gr23
  944. andi gr23,#~PSR_PIL,gr23
  945. movgs gr23,psr
  946. call schedule
  947. movsg psr,gr23
  948. ori gr23,#PSR_PIL_14,gr23
  949. movgs gr23,psr
  950. LEDS 0x6401
  951. ldi @(gr15,#TI_FLAGS),gr4
  952. sethi.p %hi(_TIF_WORK_MASK),gr5
  953. setlo %lo(_TIF_WORK_MASK),gr5
  954. andcc gr4,gr5,gr0,icc0
  955. beq icc0,#1,__entry_return_direct
  956. andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
  957. bne icc0,#1,__entry_work_resched
  958. __entry_work_notifysig:
  959. LEDS 0x6410
  960. ori.p gr4,#0,gr8
  961. call do_notify_resume
  962. bra __entry_resume_userspace
  963. # perform syscall entry tracing
  964. __syscall_trace_entry:
  965. LEDS 0x6320
  966. setlos.p #0,gr8
  967. call do_syscall_trace
  968. ldi @(gr28,#REG_SYSCALLNO),gr7
  969. lddi @(gr28,#REG_GR(8)) ,gr8
  970. lddi @(gr28,#REG_GR(10)),gr10
  971. lddi.p @(gr28,#REG_GR(12)),gr12
  972. subicc gr7,#nr_syscalls,gr0,icc0
  973. bnc icc0,#0,__syscall_badsys
  974. bra __syscall_call
  975. # perform syscall exit tracing
  976. __syscall_exit_work:
  977. LEDS 0x6340
  978. andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
  979. beq icc0,#1,__entry_work_pending
  980. movsg psr,gr23
  981. andi gr23,#~PSR_PIL,gr23 ; could let do_syscall_trace() call schedule()
  982. movgs gr23,psr
  983. setlos.p #1,gr8
  984. call do_syscall_trace
  985. bra __entry_resume_userspace
  986. __syscall_badsys:
  987. LEDS 0x6380
  988. setlos #-ENOSYS,gr8
  989. sti gr8,@(gr28,#REG_GR(8)) ; save return value
  990. bra __entry_resume_userspace
  991. ###############################################################################
  992. #
  993. # syscall vector table
  994. #
  995. ###############################################################################
  996. .section .rodata
  997. ALIGN
  998. .globl sys_call_table
  999. sys_call_table:
  1000. .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
  1001. .long sys_exit
  1002. .long sys_fork
  1003. .long sys_read
  1004. .long sys_write
  1005. .long sys_open /* 5 */
  1006. .long sys_close
  1007. .long sys_waitpid
  1008. .long sys_creat
  1009. .long sys_link
  1010. .long sys_unlink /* 10 */
  1011. .long sys_execve
  1012. .long sys_chdir
  1013. .long sys_time
  1014. .long sys_mknod
  1015. .long sys_chmod /* 15 */
  1016. .long sys_lchown16
  1017. .long sys_ni_syscall /* old break syscall holder */
  1018. .long sys_stat
  1019. .long sys_lseek
  1020. .long sys_getpid /* 20 */
  1021. .long sys_mount
  1022. .long sys_oldumount
  1023. .long sys_setuid16
  1024. .long sys_getuid16
  1025. .long sys_ni_syscall // sys_stime /* 25 */
  1026. .long sys_ptrace
  1027. .long sys_alarm
  1028. .long sys_fstat
  1029. .long sys_pause
  1030. .long sys_utime /* 30 */
  1031. .long sys_ni_syscall /* old stty syscall holder */
  1032. .long sys_ni_syscall /* old gtty syscall holder */
  1033. .long sys_access
  1034. .long sys_nice
  1035. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1036. .long sys_sync
  1037. .long sys_kill
  1038. .long sys_rename
  1039. .long sys_mkdir
  1040. .long sys_rmdir /* 40 */
  1041. .long sys_dup
  1042. .long sys_pipe
  1043. .long sys_times
  1044. .long sys_ni_syscall /* old prof syscall holder */
  1045. .long sys_brk /* 45 */
  1046. .long sys_setgid16
  1047. .long sys_getgid16
  1048. .long sys_ni_syscall // sys_signal
  1049. .long sys_geteuid16
  1050. .long sys_getegid16 /* 50 */
  1051. .long sys_acct
  1052. .long sys_umount /* recycled never used phys( */
  1053. .long sys_ni_syscall /* old lock syscall holder */
  1054. .long sys_ioctl
  1055. .long sys_fcntl /* 55 */
  1056. .long sys_ni_syscall /* old mpx syscall holder */
  1057. .long sys_setpgid
  1058. .long sys_ni_syscall /* old ulimit syscall holder */
  1059. .long sys_ni_syscall /* old old uname syscall */
  1060. .long sys_umask /* 60 */
  1061. .long sys_chroot
  1062. .long sys_ustat
  1063. .long sys_dup2
  1064. .long sys_getppid
  1065. .long sys_getpgrp /* 65 */
  1066. .long sys_setsid
  1067. .long sys_sigaction
  1068. .long sys_ni_syscall // sys_sgetmask
  1069. .long sys_ni_syscall // sys_ssetmask
  1070. .long sys_setreuid16 /* 70 */
  1071. .long sys_setregid16
  1072. .long sys_sigsuspend
  1073. .long sys_ni_syscall // sys_sigpending
  1074. .long sys_sethostname
  1075. .long sys_setrlimit /* 75 */
  1076. .long sys_ni_syscall // sys_old_getrlimit
  1077. .long sys_getrusage
  1078. .long sys_gettimeofday
  1079. .long sys_settimeofday
  1080. .long sys_getgroups16 /* 80 */
  1081. .long sys_setgroups16
  1082. .long sys_ni_syscall /* old_select slot */
  1083. .long sys_symlink
  1084. .long sys_lstat
  1085. .long sys_readlink /* 85 */
  1086. .long sys_uselib
  1087. .long sys_swapon
  1088. .long sys_reboot
  1089. .long sys_ni_syscall // old_readdir
  1090. .long sys_ni_syscall /* 90 */ /* old_mmap slot */
  1091. .long sys_munmap
  1092. .long sys_truncate
  1093. .long sys_ftruncate
  1094. .long sys_fchmod
  1095. .long sys_fchown16 /* 95 */
  1096. .long sys_getpriority
  1097. .long sys_setpriority
  1098. .long sys_ni_syscall /* old profil syscall holder */
  1099. .long sys_statfs
  1100. .long sys_fstatfs /* 100 */
  1101. .long sys_ni_syscall /* ioperm for i386 */
  1102. .long sys_socketcall
  1103. .long sys_syslog
  1104. .long sys_setitimer
  1105. .long sys_getitimer /* 105 */
  1106. .long sys_newstat
  1107. .long sys_newlstat
  1108. .long sys_newfstat
  1109. .long sys_ni_syscall /* obsolete olduname( syscall */
  1110. .long sys_ni_syscall /* iopl for i386 */ /* 110 */
  1111. .long sys_vhangup
  1112. .long sys_ni_syscall /* obsolete idle( syscall */
  1113. .long sys_ni_syscall /* vm86old for i386 */
  1114. .long sys_wait4
  1115. .long sys_swapoff /* 115 */
  1116. .long sys_sysinfo
  1117. .long sys_ipc
  1118. .long sys_fsync
  1119. .long sys_sigreturn
  1120. .long sys_clone /* 120 */
  1121. .long sys_setdomainname
  1122. .long sys_newuname
  1123. .long sys_ni_syscall /* old "cacheflush" */
  1124. .long sys_adjtimex
  1125. .long sys_mprotect /* 125 */
  1126. .long sys_sigprocmask
  1127. .long sys_ni_syscall /* old "create_module" */
  1128. .long sys_init_module
  1129. .long sys_delete_module
  1130. .long sys_ni_syscall /* old "get_kernel_syms" */
  1131. .long sys_quotactl
  1132. .long sys_getpgid
  1133. .long sys_fchdir
  1134. .long sys_bdflush
  1135. .long sys_sysfs /* 135 */
  1136. .long sys_personality
  1137. .long sys_ni_syscall /* for afs_syscall */
  1138. .long sys_setfsuid16
  1139. .long sys_setfsgid16
  1140. .long sys_llseek /* 140 */
  1141. .long sys_getdents
  1142. .long sys_select
  1143. .long sys_flock
  1144. .long sys_msync
  1145. .long sys_readv /* 145 */
  1146. .long sys_writev
  1147. .long sys_getsid
  1148. .long sys_fdatasync
  1149. .long sys_sysctl
  1150. .long sys_mlock /* 150 */
  1151. .long sys_munlock
  1152. .long sys_mlockall
  1153. .long sys_munlockall
  1154. .long sys_sched_setparam
  1155. .long sys_sched_getparam /* 155 */
  1156. .long sys_sched_setscheduler
  1157. .long sys_sched_getscheduler
  1158. .long sys_sched_yield
  1159. .long sys_sched_get_priority_max
  1160. .long sys_sched_get_priority_min /* 160 */
  1161. .long sys_sched_rr_get_interval
  1162. .long sys_nanosleep
  1163. .long sys_mremap
  1164. .long sys_setresuid16
  1165. .long sys_getresuid16 /* 165 */
  1166. .long sys_ni_syscall /* for vm86 */
  1167. .long sys_ni_syscall /* Old sys_query_module */
  1168. .long sys_poll
  1169. .long sys_nfsservctl
  1170. .long sys_setresgid16 /* 170 */
  1171. .long sys_getresgid16
  1172. .long sys_prctl
  1173. .long sys_rt_sigreturn
  1174. .long sys_rt_sigaction
  1175. .long sys_rt_sigprocmask /* 175 */
  1176. .long sys_rt_sigpending
  1177. .long sys_rt_sigtimedwait
  1178. .long sys_rt_sigqueueinfo
  1179. .long sys_rt_sigsuspend
  1180. .long sys_pread64 /* 180 */
  1181. .long sys_pwrite64
  1182. .long sys_chown16
  1183. .long sys_getcwd
  1184. .long sys_capget
  1185. .long sys_capset /* 185 */
  1186. .long sys_sigaltstack
  1187. .long sys_sendfile
  1188. .long sys_ni_syscall /* streams1 */
  1189. .long sys_ni_syscall /* streams2 */
  1190. .long sys_vfork /* 190 */
  1191. .long sys_getrlimit
  1192. .long sys_mmap2
  1193. .long sys_truncate64
  1194. .long sys_ftruncate64
  1195. .long sys_stat64 /* 195 */
  1196. .long sys_lstat64
  1197. .long sys_fstat64
  1198. .long sys_lchown
  1199. .long sys_getuid
  1200. .long sys_getgid /* 200 */
  1201. .long sys_geteuid
  1202. .long sys_getegid
  1203. .long sys_setreuid
  1204. .long sys_setregid
  1205. .long sys_getgroups /* 205 */
  1206. .long sys_setgroups
  1207. .long sys_fchown
  1208. .long sys_setresuid
  1209. .long sys_getresuid
  1210. .long sys_setresgid /* 210 */
  1211. .long sys_getresgid
  1212. .long sys_chown
  1213. .long sys_setuid
  1214. .long sys_setgid
  1215. .long sys_setfsuid /* 215 */
  1216. .long sys_setfsgid
  1217. .long sys_pivot_root
  1218. .long sys_mincore
  1219. .long sys_madvise
  1220. .long sys_getdents64 /* 220 */
  1221. .long sys_fcntl64
  1222. .long sys_ni_syscall /* reserved for TUX */
  1223. .long sys_ni_syscall /* Reserved for Security */
  1224. .long sys_gettid
  1225. .long sys_readahead /* 225 */
  1226. .long sys_setxattr
  1227. .long sys_lsetxattr
  1228. .long sys_fsetxattr
  1229. .long sys_getxattr
  1230. .long sys_lgetxattr /* 230 */
  1231. .long sys_fgetxattr
  1232. .long sys_listxattr
  1233. .long sys_llistxattr
  1234. .long sys_flistxattr
  1235. .long sys_removexattr /* 235 */
  1236. .long sys_lremovexattr
  1237. .long sys_fremovexattr
  1238. .long sys_tkill
  1239. .long sys_sendfile64
  1240. .long sys_futex /* 240 */
  1241. .long sys_sched_setaffinity
  1242. .long sys_sched_getaffinity
  1243. .long sys_ni_syscall //sys_set_thread_area
  1244. .long sys_ni_syscall //sys_get_thread_area
  1245. .long sys_io_setup /* 245 */
  1246. .long sys_io_destroy
  1247. .long sys_io_getevents
  1248. .long sys_io_submit
  1249. .long sys_io_cancel
  1250. .long sys_fadvise64 /* 250 */
  1251. .long sys_ni_syscall
  1252. .long sys_exit_group
  1253. .long sys_lookup_dcookie
  1254. .long sys_epoll_create
  1255. .long sys_epoll_ctl /* 255 */
  1256. .long sys_epoll_wait
  1257. .long sys_remap_file_pages
  1258. .long sys_set_tid_address
  1259. .long sys_timer_create
  1260. .long sys_timer_settime /* 260 */
  1261. .long sys_timer_gettime
  1262. .long sys_timer_getoverrun
  1263. .long sys_timer_delete
  1264. .long sys_clock_settime
  1265. .long sys_clock_gettime /* 265 */
  1266. .long sys_clock_getres
  1267. .long sys_clock_nanosleep
  1268. .long sys_statfs64
  1269. .long sys_fstatfs64
  1270. .long sys_tgkill /* 270 */
  1271. .long sys_utimes
  1272. .long sys_fadvise64_64
  1273. .long sys_ni_syscall /* sys_vserver */
  1274. .long sys_mbind
  1275. .long sys_get_mempolicy
  1276. .long sys_set_mempolicy
  1277. .long sys_mq_open
  1278. .long sys_mq_unlink
  1279. .long sys_mq_timedsend
  1280. .long sys_mq_timedreceive /* 280 */
  1281. .long sys_mq_notify
  1282. .long sys_mq_getsetattr
  1283. .long sys_ni_syscall /* reserved for kexec */
  1284. .long sys_waitid
  1285. .long sys_ni_syscall /* 285 */ /* available */
  1286. .long sys_add_key
  1287. .long sys_request_key
  1288. .long sys_keyctl
  1289. .long sys_ioprio_set
  1290. .long sys_ioprio_get /* 290 */
  1291. .long sys_inotify_init
  1292. .long sys_inotify_add_watch
  1293. .long sys_inotify_rm_watch
  1294. .long sys_migrate_pages
  1295. .long sys_openat /* 295 */
  1296. .long sys_mkdirat
  1297. .long sys_mknodat
  1298. .long sys_fchownat
  1299. .long sys_futimesat
  1300. .long sys_fstatat64 /* 300 */
  1301. .long sys_unlinkat
  1302. .long sys_renameat
  1303. .long sys_linkat
  1304. .long sys_symlinkat
  1305. .long sys_readlinkat /* 305 */
  1306. .long sys_fchmodat
  1307. .long sys_faccessat
  1308. .long sys_pselect6
  1309. .long sys_ppoll
  1310. syscall_table_size = (. - sys_call_table)