entry-nommu.S 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588
  1. /*
  2. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  3. * Copyright (C) 2007-2009 PetaLogix
  4. * Copyright (C) 2006 Atmark Techno, Inc.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file "COPYING" in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/linkage.h>
  11. #include <asm/thread_info.h>
  12. #include <linux/errno.h>
  13. #include <asm/entry.h>
  14. #include <asm/asm-offsets.h>
  15. #include <asm/registers.h>
  16. #include <asm/unistd.h>
  17. #include <asm/percpu.h>
  18. #include <asm/signal.h>
  19. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  20. .macro disable_irq
  21. msrclr r0, MSR_IE
  22. .endm
  23. .macro enable_irq
  24. msrset r0, MSR_IE
  25. .endm
  26. .macro clear_bip
  27. msrclr r0, MSR_BIP
  28. .endm
  29. #else
  30. .macro disable_irq
  31. mfs r11, rmsr
  32. andi r11, r11, ~MSR_IE
  33. mts rmsr, r11
  34. .endm
  35. .macro enable_irq
  36. mfs r11, rmsr
  37. ori r11, r11, MSR_IE
  38. mts rmsr, r11
  39. .endm
  40. .macro clear_bip
  41. mfs r11, rmsr
  42. andi r11, r11, ~MSR_BIP
  43. mts rmsr, r11
  44. .endm
  45. #endif
  46. ENTRY(_interrupt)
  47. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  48. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  49. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  50. beqid r11, 1f
  51. nop
  52. brid 2f /* jump over */
  53. addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
  54. 1: /* switch to kernel stack */
  55. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  56. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  57. /* calculate kernel stack pointer */
  58. addik r1, r1, THREAD_SIZE - PT_SIZE
  59. 2:
  60. swi r11, r1, PT_MODE /* store the mode */
  61. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  62. swi r2, r1, PT_R2
  63. swi r3, r1, PT_R3
  64. swi r4, r1, PT_R4
  65. swi r5, r1, PT_R5
  66. swi r6, r1, PT_R6
  67. swi r7, r1, PT_R7
  68. swi r8, r1, PT_R8
  69. swi r9, r1, PT_R9
  70. swi r10, r1, PT_R10
  71. swi r11, r1, PT_R11
  72. swi r12, r1, PT_R12
  73. swi r13, r1, PT_R13
  74. swi r14, r1, PT_R14
  75. swi r14, r1, PT_PC
  76. swi r15, r1, PT_R15
  77. swi r16, r1, PT_R16
  78. swi r17, r1, PT_R17
  79. swi r18, r1, PT_R18
  80. swi r19, r1, PT_R19
  81. swi r20, r1, PT_R20
  82. swi r21, r1, PT_R21
  83. swi r22, r1, PT_R22
  84. swi r23, r1, PT_R23
  85. swi r24, r1, PT_R24
  86. swi r25, r1, PT_R25
  87. swi r26, r1, PT_R26
  88. swi r27, r1, PT_R27
  89. swi r28, r1, PT_R28
  90. swi r29, r1, PT_R29
  91. swi r30, r1, PT_R30
  92. swi r31, r1, PT_R31
  93. /* special purpose registers */
  94. mfs r11, rmsr
  95. swi r11, r1, PT_MSR
  96. mfs r11, rear
  97. swi r11, r1, PT_EAR
  98. mfs r11, resr
  99. swi r11, r1, PT_ESR
  100. mfs r11, rfsr
  101. swi r11, r1, PT_FSR
  102. /* reload original stack pointer and save it */
  103. lwi r11, r0, PER_CPU(ENTRY_SP)
  104. swi r11, r1, PT_R1
  105. /* update mode indicator we are in kernel mode */
  106. addik r11, r0, 1
  107. swi r11, r0, PER_CPU(KM)
  108. /* restore r31 */
  109. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  110. /* prepare the link register, the argument and jump */
  111. la r15, r0, ret_from_intr - 8
  112. addk r6, r0, r15
  113. braid do_IRQ
  114. add r5, r0, r1
  115. ret_from_intr:
  116. lwi r11, r1, PT_MODE
  117. bneid r11, 3f
  118. lwi r6, r31, TS_THREAD_INFO /* get thread info */
  119. lwi r19, r6, TI_FLAGS /* get flags in thread info */
  120. /* do an extra work if any bits are set */
  121. andi r11, r19, _TIF_NEED_RESCHED
  122. beqi r11, 1f
  123. bralid r15, schedule
  124. nop
  125. 1: andi r11, r19, _TIF_SIGPENDING
  126. beqid r11, no_intr_reshed
  127. addk r5, r1, r0
  128. addk r7, r0, r0
  129. bralid r15, do_signal
  130. addk r6, r0, r0
  131. no_intr_reshed:
  132. /* save mode indicator */
  133. lwi r11, r1, PT_MODE
  134. 3:
  135. swi r11, r0, PER_CPU(KM)
  136. /* save r31 */
  137. swi r31, r0, PER_CPU(CURRENT_SAVE)
  138. restore_context:
  139. /* special purpose registers */
  140. lwi r11, r1, PT_FSR
  141. mts rfsr, r11
  142. lwi r11, r1, PT_ESR
  143. mts resr, r11
  144. lwi r11, r1, PT_EAR
  145. mts rear, r11
  146. lwi r11, r1, PT_MSR
  147. mts rmsr, r11
  148. lwi r31, r1, PT_R31
  149. lwi r30, r1, PT_R30
  150. lwi r29, r1, PT_R29
  151. lwi r28, r1, PT_R28
  152. lwi r27, r1, PT_R27
  153. lwi r26, r1, PT_R26
  154. lwi r25, r1, PT_R25
  155. lwi r24, r1, PT_R24
  156. lwi r23, r1, PT_R23
  157. lwi r22, r1, PT_R22
  158. lwi r21, r1, PT_R21
  159. lwi r20, r1, PT_R20
  160. lwi r19, r1, PT_R19
  161. lwi r18, r1, PT_R18
  162. lwi r17, r1, PT_R17
  163. lwi r16, r1, PT_R16
  164. lwi r15, r1, PT_R15
  165. lwi r14, r1, PT_PC
  166. lwi r13, r1, PT_R13
  167. lwi r12, r1, PT_R12
  168. lwi r11, r1, PT_R11
  169. lwi r10, r1, PT_R10
  170. lwi r9, r1, PT_R9
  171. lwi r8, r1, PT_R8
  172. lwi r7, r1, PT_R7
  173. lwi r6, r1, PT_R6
  174. lwi r5, r1, PT_R5
  175. lwi r4, r1, PT_R4
  176. lwi r3, r1, PT_R3
  177. lwi r2, r1, PT_R2
  178. lwi r1, r1, PT_R1
  179. rtid r14, 0
  180. nop
  181. ENTRY(_reset)
  182. brai 0;
  183. ENTRY(_user_exception)
  184. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  185. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  186. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  187. beqid r11, 1f /* Already in kernel mode? */
  188. nop
  189. brid 2f /* jump over */
  190. addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
  191. 1: /* Switch to kernel stack */
  192. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  193. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  194. /* calculate kernel stack pointer */
  195. addik r1, r1, THREAD_SIZE - PT_SIZE
  196. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  197. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  198. 2:
  199. swi r11, r1, PT_MODE /* store the mode */
  200. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  201. /* save them on stack */
  202. swi r2, r1, PT_R2
  203. swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
  204. swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
  205. swi r5, r1, PT_R5
  206. swi r6, r1, PT_R6
  207. swi r7, r1, PT_R7
  208. swi r8, r1, PT_R8
  209. swi r9, r1, PT_R9
  210. swi r10, r1, PT_R10
  211. swi r11, r1, PT_R11
  212. /* r12: _always_ in clobber list; see unistd.h */
  213. swi r12, r1, PT_R12
  214. swi r13, r1, PT_R13
  215. /* r14: _always_ in clobber list; see unistd.h */
  216. swi r14, r1, PT_R14
  217. /* but we want to return to the next inst. */
  218. addik r14, r14, 0x4
  219. swi r14, r1, PT_PC /* increment by 4 and store in pc */
  220. swi r15, r1, PT_R15
  221. swi r16, r1, PT_R16
  222. swi r17, r1, PT_R17
  223. swi r18, r1, PT_R18
  224. swi r19, r1, PT_R19
  225. swi r20, r1, PT_R20
  226. swi r21, r1, PT_R21
  227. swi r22, r1, PT_R22
  228. swi r23, r1, PT_R23
  229. swi r24, r1, PT_R24
  230. swi r25, r1, PT_R25
  231. swi r26, r1, PT_R26
  232. swi r27, r1, PT_R27
  233. swi r28, r1, PT_R28
  234. swi r29, r1, PT_R29
  235. swi r30, r1, PT_R30
  236. swi r31, r1, PT_R31
  237. disable_irq
  238. nop /* make sure IE bit is in effect */
  239. clear_bip /* once IE is in effect it is safe to clear BIP */
  240. nop
  241. /* special purpose registers */
  242. mfs r11, rmsr
  243. swi r11, r1, PT_MSR
  244. mfs r11, rear
  245. swi r11, r1, PT_EAR
  246. mfs r11, resr
  247. swi r11, r1, PT_ESR
  248. mfs r11, rfsr
  249. swi r11, r1, PT_FSR
  250. /* reload original stack pointer and save it */
  251. lwi r11, r0, PER_CPU(ENTRY_SP)
  252. swi r11, r1, PT_R1
  253. /* update mode indicator we are in kernel mode */
  254. addik r11, r0, 1
  255. swi r11, r0, PER_CPU(KM)
  256. /* restore r31 */
  257. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  258. /* re-enable interrupts now we are in kernel mode */
  259. enable_irq
  260. /* See if the system call number is valid. */
  261. addi r11, r12, -__NR_syscalls
  262. bgei r11, 1f /* return to user if not valid */
  263. /* Figure out which function to use for this system call. */
  264. /* Note Microblaze barrel shift is optional, so don't rely on it */
  265. add r12, r12, r12 /* convert num -> ptr */
  266. add r12, r12, r12
  267. lwi r12, r12, sys_call_table /* Get function pointer */
  268. la r15, r0, ret_to_user-8 /* set return address */
  269. bra r12 /* Make the system call. */
  270. bri 0 /* won't reach here */
  271. 1:
  272. brid ret_to_user /* jump to syscall epilogue */
  273. addi r3, r0, -ENOSYS /* set errno in delay slot */
  274. /*
  275. * Debug traps are like a system call, but entered via brki r14, 0x60
  276. * All we need to do is send the SIGTRAP signal to current, ptrace and do_signal
  277. * will handle the rest
  278. */
  279. ENTRY(_debug_exception)
  280. swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
  281. lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
  282. lwi r1, r1, TS_THREAD_INFO /* get the thread info */
  283. addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
  284. swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
  285. lwi r11, r0, PER_CPU(KM) /* load mode indicator */
  286. //save_context:
  287. swi r11, r1, PT_MODE /* store the mode */
  288. lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
  289. /* save them on stack */
  290. swi r2, r1, PT_R2
  291. swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
  292. swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
  293. swi r5, r1, PT_R5
  294. swi r6, r1, PT_R6
  295. swi r7, r1, PT_R7
  296. swi r8, r1, PT_R8
  297. swi r9, r1, PT_R9
  298. swi r10, r1, PT_R10
  299. swi r11, r1, PT_R11
  300. /* r12: _always_ in clobber list; see unistd.h */
  301. swi r12, r1, PT_R12
  302. swi r13, r1, PT_R13
  303. /* r14: _always_ in clobber list; see unistd.h */
  304. swi r14, r1, PT_R14
  305. swi r14, r1, PT_PC /* Will return to interrupted instruction */
  306. swi r15, r1, PT_R15
  307. swi r16, r1, PT_R16
  308. swi r17, r1, PT_R17
  309. swi r18, r1, PT_R18
  310. swi r19, r1, PT_R19
  311. swi r20, r1, PT_R20
  312. swi r21, r1, PT_R21
  313. swi r22, r1, PT_R22
  314. swi r23, r1, PT_R23
  315. swi r24, r1, PT_R24
  316. swi r25, r1, PT_R25
  317. swi r26, r1, PT_R26
  318. swi r27, r1, PT_R27
  319. swi r28, r1, PT_R28
  320. swi r29, r1, PT_R29
  321. swi r30, r1, PT_R30
  322. swi r31, r1, PT_R31
  323. disable_irq
  324. nop /* make sure IE bit is in effect */
  325. clear_bip /* once IE is in effect it is safe to clear BIP */
  326. nop
  327. /* special purpose registers */
  328. mfs r11, rmsr
  329. swi r11, r1, PT_MSR
  330. mfs r11, rear
  331. swi r11, r1, PT_EAR
  332. mfs r11, resr
  333. swi r11, r1, PT_ESR
  334. mfs r11, rfsr
  335. swi r11, r1, PT_FSR
  336. /* reload original stack pointer and save it */
  337. lwi r11, r0, PER_CPU(ENTRY_SP)
  338. swi r11, r1, PT_R1
  339. /* update mode indicator we are in kernel mode */
  340. addik r11, r0, 1
  341. swi r11, r0, PER_CPU(KM)
  342. /* restore r31 */
  343. lwi r31, r0, PER_CPU(CURRENT_SAVE)
  344. /* re-enable interrupts now we are in kernel mode */
  345. enable_irq
  346. addi r5, r0, SIGTRAP /* sending the trap signal */
  347. add r6, r0, r31 /* to current */
  348. bralid r15, send_sig
  349. add r7, r0, r0 /* 3rd param zero */
  350. /* Restore r3/r4 to work around how ret_to_user works */
  351. lwi r3, r1, PT_R3
  352. lwi r4, r1, PT_R4
  353. bri ret_to_user
  354. ENTRY(_break)
  355. bri 0
  356. /* struct task_struct *_switch_to(struct thread_info *prev,
  357. struct thread_info *next); */
  358. ENTRY(_switch_to)
  359. /* prepare return value */
  360. addk r3, r0, r31
  361. /* save registers in cpu_context */
  362. /* use r11 and r12, volatile registers, as temp register */
  363. addik r11, r5, TI_CPU_CONTEXT
  364. swi r1, r11, CC_R1
  365. swi r2, r11, CC_R2
  366. /* skip volatile registers.
  367. * they are saved on stack when we jumped to _switch_to() */
  368. /* dedicated registers */
  369. swi r13, r11, CC_R13
  370. swi r14, r11, CC_R14
  371. swi r15, r11, CC_R15
  372. swi r16, r11, CC_R16
  373. swi r17, r11, CC_R17
  374. swi r18, r11, CC_R18
  375. /* save non-volatile registers */
  376. swi r19, r11, CC_R19
  377. swi r20, r11, CC_R20
  378. swi r21, r11, CC_R21
  379. swi r22, r11, CC_R22
  380. swi r23, r11, CC_R23
  381. swi r24, r11, CC_R24
  382. swi r25, r11, CC_R25
  383. swi r26, r11, CC_R26
  384. swi r27, r11, CC_R27
  385. swi r28, r11, CC_R28
  386. swi r29, r11, CC_R29
  387. swi r30, r11, CC_R30
  388. /* special purpose registers */
  389. mfs r12, rmsr
  390. swi r12, r11, CC_MSR
  391. mfs r12, rear
  392. swi r12, r11, CC_EAR
  393. mfs r12, resr
  394. swi r12, r11, CC_ESR
  395. mfs r12, rfsr
  396. swi r12, r11, CC_FSR
  397. /* update r31, the current */
  398. lwi r31, r6, TI_TASK
  399. swi r31, r0, PER_CPU(CURRENT_SAVE)
  400. /* get new process' cpu context and restore */
  401. addik r11, r6, TI_CPU_CONTEXT
  402. /* special purpose registers */
  403. lwi r12, r11, CC_FSR
  404. mts rfsr, r12
  405. lwi r12, r11, CC_ESR
  406. mts resr, r12
  407. lwi r12, r11, CC_EAR
  408. mts rear, r12
  409. lwi r12, r11, CC_MSR
  410. mts rmsr, r12
  411. /* non-volatile registers */
  412. lwi r30, r11, CC_R30
  413. lwi r29, r11, CC_R29
  414. lwi r28, r11, CC_R28
  415. lwi r27, r11, CC_R27
  416. lwi r26, r11, CC_R26
  417. lwi r25, r11, CC_R25
  418. lwi r24, r11, CC_R24
  419. lwi r23, r11, CC_R23
  420. lwi r22, r11, CC_R22
  421. lwi r21, r11, CC_R21
  422. lwi r20, r11, CC_R20
  423. lwi r19, r11, CC_R19
  424. /* dedicated registers */
  425. lwi r18, r11, CC_R18
  426. lwi r17, r11, CC_R17
  427. lwi r16, r11, CC_R16
  428. lwi r15, r11, CC_R15
  429. lwi r14, r11, CC_R14
  430. lwi r13, r11, CC_R13
  431. /* skip volatile registers */
  432. lwi r2, r11, CC_R2
  433. lwi r1, r11, CC_R1
  434. rtsd r15, 8
  435. nop
  436. ENTRY(ret_from_fork)
  437. addk r5, r0, r3
  438. addk r6, r0, r1
  439. brlid r15, schedule_tail
  440. nop
  441. swi r31, r1, PT_R31 /* save r31 in user context. */
  442. /* will soon be restored to r31 in ret_to_user */
  443. addk r3, r0, r0
  444. brid ret_to_user
  445. nop
  446. work_pending:
  447. andi r11, r19, _TIF_NEED_RESCHED
  448. beqi r11, 1f
  449. bralid r15, schedule
  450. nop
  451. 1: andi r11, r19, _TIF_SIGPENDING
  452. beqi r11, no_work_pending
  453. addk r5, r1, r0
  454. addik r7, r0, 1
  455. bralid r15, do_signal
  456. addk r6, r0, r0
  457. bri no_work_pending
  458. ENTRY(ret_to_user)
  459. disable_irq
  460. swi r4, r1, PT_R4 /* return val */
  461. swi r3, r1, PT_R3 /* return val */
  462. lwi r6, r31, TS_THREAD_INFO /* get thread info */
  463. lwi r19, r6, TI_FLAGS /* get flags in thread info */
  464. bnei r19, work_pending /* do an extra work if any bits are set */
  465. no_work_pending:
  466. disable_irq
  467. /* save r31 */
  468. swi r31, r0, PER_CPU(CURRENT_SAVE)
  469. /* save mode indicator */
  470. lwi r18, r1, PT_MODE
  471. swi r18, r0, PER_CPU(KM)
  472. //restore_context:
  473. /* special purpose registers */
  474. lwi r18, r1, PT_FSR
  475. mts rfsr, r18
  476. lwi r18, r1, PT_ESR
  477. mts resr, r18
  478. lwi r18, r1, PT_EAR
  479. mts rear, r18
  480. lwi r18, r1, PT_MSR
  481. mts rmsr, r18
  482. lwi r31, r1, PT_R31
  483. lwi r30, r1, PT_R30
  484. lwi r29, r1, PT_R29
  485. lwi r28, r1, PT_R28
  486. lwi r27, r1, PT_R27
  487. lwi r26, r1, PT_R26
  488. lwi r25, r1, PT_R25
  489. lwi r24, r1, PT_R24
  490. lwi r23, r1, PT_R23
  491. lwi r22, r1, PT_R22
  492. lwi r21, r1, PT_R21
  493. lwi r20, r1, PT_R20
  494. lwi r19, r1, PT_R19
  495. lwi r18, r1, PT_R18
  496. lwi r17, r1, PT_R17
  497. lwi r16, r1, PT_R16
  498. lwi r15, r1, PT_R15
  499. lwi r14, r1, PT_PC
  500. lwi r13, r1, PT_R13
  501. lwi r12, r1, PT_R12
  502. lwi r11, r1, PT_R11
  503. lwi r10, r1, PT_R10
  504. lwi r9, r1, PT_R9
  505. lwi r8, r1, PT_R8
  506. lwi r7, r1, PT_R7
  507. lwi r6, r1, PT_R6
  508. lwi r5, r1, PT_R5
  509. lwi r4, r1, PT_R4 /* return val */
  510. lwi r3, r1, PT_R3 /* return val */
  511. lwi r2, r1, PT_R2
  512. lwi r1, r1, PT_R1
  513. rtid r14, 0
  514. nop
  515. sys_vfork:
  516. brid microblaze_vfork
  517. addk r5, r1, r0
  518. sys_clone:
  519. brid microblaze_clone
  520. addk r7, r1, r0
  521. sys_execve:
  522. brid microblaze_execve
  523. addk r8, r1, r0
  524. sys_rt_sigreturn_wrapper:
  525. brid sys_rt_sigreturn
  526. addk r5, r1, r0
  527. sys_rt_sigsuspend_wrapper:
  528. brid sys_rt_sigsuspend
  529. addk r7, r1, r0
  530. /* Interrupt vector table */
  531. .section .init.ivt, "ax"
  532. .org 0x0
  533. brai _reset
  534. brai _user_exception
  535. brai _interrupt
  536. brai _break
  537. brai _hw_exception_handler
  538. .org 0x60
  539. brai _debug_exception
  540. .section .rodata,"a"
  541. #include "syscall_table.S"
  542. syscall_table_size=(.-sys_call_table)