misc.S 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482
  1. /*
  2. * arch/ppc/kernel/misc.S
  3. *
  4. *
  5. *
  6. * This file contains miscellaneous low-level functions.
  7. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  8. *
  9. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  10. * and Paul Mackerras.
  11. * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
  12. * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. *
  19. */
  20. #include <linux/config.h>
  21. #include <linux/sys.h>
  22. #include <asm/unistd.h>
  23. #include <asm/errno.h>
  24. #include <asm/processor.h>
  25. #include <asm/page.h>
  26. #include <asm/cache.h>
  27. #include <asm/ppc_asm.h>
  28. #include <asm/asm-offsets.h>
  29. #include <asm/cputable.h>
  30. .text
  31. /*
  32. * Returns (address we were linked at) - (address we are running at)
  33. * for use before the text and data are mapped to KERNELBASE.
  34. */
  35. _GLOBAL(reloc_offset)
  36. mflr r0
  37. bl 1f
  38. 1: mflr r3
  39. LOADADDR(r4,1b)
  40. sub r3,r4,r3
  41. mtlr r0
  42. blr
  43. _GLOBAL(get_msr)
  44. mfmsr r3
  45. blr
  46. _GLOBAL(get_dar)
  47. mfdar r3
  48. blr
  49. _GLOBAL(get_srr0)
  50. mfsrr0 r3
  51. blr
  52. _GLOBAL(get_srr1)
  53. mfsrr1 r3
  54. blr
  55. _GLOBAL(get_sp)
  56. mr r3,r1
  57. blr
  58. #ifdef CONFIG_IRQSTACKS
  59. _GLOBAL(call_do_softirq)
  60. mflr r0
  61. std r0,16(r1)
  62. stdu r1,THREAD_SIZE-112(r3)
  63. mr r1,r3
  64. bl .__do_softirq
  65. ld r1,0(r1)
  66. ld r0,16(r1)
  67. mtlr r0
  68. blr
  69. _GLOBAL(call_handle_IRQ_event)
  70. mflr r0
  71. std r0,16(r1)
  72. stdu r1,THREAD_SIZE-112(r6)
  73. mr r1,r6
  74. bl .handle_IRQ_event
  75. ld r1,0(r1)
  76. ld r0,16(r1)
  77. mtlr r0
  78. blr
  79. #endif /* CONFIG_IRQSTACKS */
  80. /*
  81. * To be called by C code which needs to do some operations with MMU
  82. * disabled. Note that interrupts have to be disabled by the caller
  83. * prior to calling us. The code called _MUST_ be in the RMO of course
  84. * and part of the linear mapping as we don't attempt to translate the
  85. * stack pointer at all. The function is called with the stack switched
  86. * to this CPU emergency stack
  87. *
  88. * prototype is void *call_with_mmu_off(void *func, void *data);
  89. *
  90. * the called function is expected to be of the form
  91. *
  92. * void *called(void *data);
  93. */
  94. _GLOBAL(call_with_mmu_off)
  95. mflr r0 /* get link, save it on stackframe */
  96. std r0,16(r1)
  97. mr r1,r5 /* save old stack ptr */
  98. ld r1,PACAEMERGSP(r13) /* get emerg. stack */
  99. subi r1,r1,STACK_FRAME_OVERHEAD
  100. std r0,16(r1) /* save link on emerg. stack */
  101. std r5,0(r1) /* save old stack ptr in backchain */
  102. ld r3,0(r3) /* get to real function ptr (assume same TOC) */
  103. bl 2f /* we need LR to return, continue at label 2 */
  104. ld r0,16(r1) /* we return here from the call, get LR and */
  105. ld r1,0(r1) /* .. old stack ptr */
  106. mtspr SPRN_SRR0,r0 /* and get back to virtual mode with these */
  107. mfmsr r4
  108. ori r4,r4,MSR_IR|MSR_DR
  109. mtspr SPRN_SRR1,r4
  110. rfid
  111. 2: mtspr SPRN_SRR0,r3 /* coming from above, enter real mode */
  112. mr r3,r4 /* get parameter */
  113. mfmsr r0
  114. ori r0,r0,MSR_IR|MSR_DR
  115. xori r0,r0,MSR_IR|MSR_DR
  116. mtspr SPRN_SRR1,r0
  117. rfid
  118. .section ".toc","aw"
  119. PPC64_CACHES:
  120. .tc ppc64_caches[TC],ppc64_caches
  121. .section ".text"
  122. /*
  123. * Write any modified data cache blocks out to memory
  124. * and invalidate the corresponding instruction cache blocks.
  125. *
  126. * flush_icache_range(unsigned long start, unsigned long stop)
  127. *
  128. * flush all bytes from start through stop-1 inclusive
  129. */
  130. _KPROBE(__flush_icache_range)
  131. /*
  132. * Flush the data cache to memory
  133. *
  134. * Different systems have different cache line sizes
  135. * and in some cases i-cache and d-cache line sizes differ from
  136. * each other.
  137. */
  138. ld r10,PPC64_CACHES@toc(r2)
  139. lwz r7,DCACHEL1LINESIZE(r10)/* Get cache line size */
  140. addi r5,r7,-1
  141. andc r6,r3,r5 /* round low to line bdy */
  142. subf r8,r6,r4 /* compute length */
  143. add r8,r8,r5 /* ensure we get enough */
  144. lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of cache line size */
  145. srw. r8,r8,r9 /* compute line count */
  146. beqlr /* nothing to do? */
  147. mtctr r8
  148. 1: dcbst 0,r6
  149. add r6,r6,r7
  150. bdnz 1b
  151. sync
  152. /* Now invalidate the instruction cache */
  153. lwz r7,ICACHEL1LINESIZE(r10) /* Get Icache line size */
  154. addi r5,r7,-1
  155. andc r6,r3,r5 /* round low to line bdy */
  156. subf r8,r6,r4 /* compute length */
  157. add r8,r8,r5
  158. lwz r9,ICACHEL1LOGLINESIZE(r10) /* Get log-2 of Icache line size */
  159. srw. r8,r8,r9 /* compute line count */
  160. beqlr /* nothing to do? */
  161. mtctr r8
  162. 2: icbi 0,r6
  163. add r6,r6,r7
  164. bdnz 2b
  165. isync
  166. blr
  167. .previous .text
  168. /*
  169. * Like above, but only do the D-cache.
  170. *
  171. * flush_dcache_range(unsigned long start, unsigned long stop)
  172. *
  173. * flush all bytes from start to stop-1 inclusive
  174. */
  175. _GLOBAL(flush_dcache_range)
  176. /*
  177. * Flush the data cache to memory
  178. *
  179. * Different systems have different cache line sizes
  180. */
  181. ld r10,PPC64_CACHES@toc(r2)
  182. lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
  183. addi r5,r7,-1
  184. andc r6,r3,r5 /* round low to line bdy */
  185. subf r8,r6,r4 /* compute length */
  186. add r8,r8,r5 /* ensure we get enough */
  187. lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
  188. srw. r8,r8,r9 /* compute line count */
  189. beqlr /* nothing to do? */
  190. mtctr r8
  191. 0: dcbst 0,r6
  192. add r6,r6,r7
  193. bdnz 0b
  194. sync
  195. blr
  196. /*
  197. * Like above, but works on non-mapped physical addresses.
  198. * Use only for non-LPAR setups ! It also assumes real mode
  199. * is cacheable. Used for flushing out the DART before using
  200. * it as uncacheable memory
  201. *
  202. * flush_dcache_phys_range(unsigned long start, unsigned long stop)
  203. *
  204. * flush all bytes from start to stop-1 inclusive
  205. */
  206. _GLOBAL(flush_dcache_phys_range)
  207. ld r10,PPC64_CACHES@toc(r2)
  208. lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
  209. addi r5,r7,-1
  210. andc r6,r3,r5 /* round low to line bdy */
  211. subf r8,r6,r4 /* compute length */
  212. add r8,r8,r5 /* ensure we get enough */
  213. lwz r9,DCACHEL1LOGLINESIZE(r10) /* Get log-2 of dcache line size */
  214. srw. r8,r8,r9 /* compute line count */
  215. beqlr /* nothing to do? */
  216. mfmsr r5 /* Disable MMU Data Relocation */
  217. ori r0,r5,MSR_DR
  218. xori r0,r0,MSR_DR
  219. sync
  220. mtmsr r0
  221. sync
  222. isync
  223. mtctr r8
  224. 0: dcbst 0,r6
  225. add r6,r6,r7
  226. bdnz 0b
  227. sync
  228. isync
  229. mtmsr r5 /* Re-enable MMU Data Relocation */
  230. sync
  231. isync
  232. blr
  233. _GLOBAL(flush_inval_dcache_range)
  234. ld r10,PPC64_CACHES@toc(r2)
  235. lwz r7,DCACHEL1LINESIZE(r10) /* Get dcache line size */
  236. addi r5,r7,-1
  237. andc r6,r3,r5 /* round low to line bdy */
  238. subf r8,r6,r4 /* compute length */
  239. add r8,r8,r5 /* ensure we get enough */
  240. lwz r9,DCACHEL1LOGLINESIZE(r10)/* Get log-2 of dcache line size */
  241. srw. r8,r8,r9 /* compute line count */
  242. beqlr /* nothing to do? */
  243. sync
  244. isync
  245. mtctr r8
  246. 0: dcbf 0,r6
  247. add r6,r6,r7
  248. bdnz 0b
  249. sync
  250. isync
  251. blr
  252. /*
  253. * Flush a particular page from the data cache to RAM.
  254. * Note: this is necessary because the instruction cache does *not*
  255. * snoop from the data cache.
  256. *
  257. * void __flush_dcache_icache(void *page)
  258. */
  259. _GLOBAL(__flush_dcache_icache)
  260. /*
  261. * Flush the data cache to memory
  262. *
  263. * Different systems have different cache line sizes
  264. */
  265. /* Flush the dcache */
  266. ld r7,PPC64_CACHES@toc(r2)
  267. clrrdi r3,r3,PAGE_SHIFT /* Page align */
  268. lwz r4,DCACHEL1LINESPERPAGE(r7) /* Get # dcache lines per page */
  269. lwz r5,DCACHEL1LINESIZE(r7) /* Get dcache line size */
  270. mr r6,r3
  271. mtctr r4
  272. 0: dcbst 0,r6
  273. add r6,r6,r5
  274. bdnz 0b
  275. sync
  276. /* Now invalidate the icache */
  277. lwz r4,ICACHEL1LINESPERPAGE(r7) /* Get # icache lines per page */
  278. lwz r5,ICACHEL1LINESIZE(r7) /* Get icache line size */
  279. mtctr r4
  280. 1: icbi 0,r3
  281. add r3,r3,r5
  282. bdnz 1b
  283. isync
  284. blr
  285. /*
  286. * I/O string operations
  287. *
  288. * insb(port, buf, len)
  289. * outsb(port, buf, len)
  290. * insw(port, buf, len)
  291. * outsw(port, buf, len)
  292. * insl(port, buf, len)
  293. * outsl(port, buf, len)
  294. * insw_ns(port, buf, len)
  295. * outsw_ns(port, buf, len)
  296. * insl_ns(port, buf, len)
  297. * outsl_ns(port, buf, len)
  298. *
  299. * The *_ns versions don't do byte-swapping.
  300. */
  301. _GLOBAL(_insb)
  302. cmpwi 0,r5,0
  303. mtctr r5
  304. subi r4,r4,1
  305. blelr-
  306. 00: lbz r5,0(r3)
  307. eieio
  308. stbu r5,1(r4)
  309. bdnz 00b
  310. twi 0,r5,0
  311. isync
  312. blr
  313. _GLOBAL(_outsb)
  314. cmpwi 0,r5,0
  315. mtctr r5
  316. subi r4,r4,1
  317. blelr-
  318. 00: lbzu r5,1(r4)
  319. stb r5,0(r3)
  320. bdnz 00b
  321. sync
  322. blr
  323. _GLOBAL(_insw)
  324. cmpwi 0,r5,0
  325. mtctr r5
  326. subi r4,r4,2
  327. blelr-
  328. 00: lhbrx r5,0,r3
  329. eieio
  330. sthu r5,2(r4)
  331. bdnz 00b
  332. twi 0,r5,0
  333. isync
  334. blr
  335. _GLOBAL(_outsw)
  336. cmpwi 0,r5,0
  337. mtctr r5
  338. subi r4,r4,2
  339. blelr-
  340. 00: lhzu r5,2(r4)
  341. sthbrx r5,0,r3
  342. bdnz 00b
  343. sync
  344. blr
  345. _GLOBAL(_insl)
  346. cmpwi 0,r5,0
  347. mtctr r5
  348. subi r4,r4,4
  349. blelr-
  350. 00: lwbrx r5,0,r3
  351. eieio
  352. stwu r5,4(r4)
  353. bdnz 00b
  354. twi 0,r5,0
  355. isync
  356. blr
  357. _GLOBAL(_outsl)
  358. cmpwi 0,r5,0
  359. mtctr r5
  360. subi r4,r4,4
  361. blelr-
  362. 00: lwzu r5,4(r4)
  363. stwbrx r5,0,r3
  364. bdnz 00b
  365. sync
  366. blr
  367. /* _GLOBAL(ide_insw) now in drivers/ide/ide-iops.c */
  368. _GLOBAL(_insw_ns)
  369. cmpwi 0,r5,0
  370. mtctr r5
  371. subi r4,r4,2
  372. blelr-
  373. 00: lhz r5,0(r3)
  374. eieio
  375. sthu r5,2(r4)
  376. bdnz 00b
  377. twi 0,r5,0
  378. isync
  379. blr
  380. /* _GLOBAL(ide_outsw) now in drivers/ide/ide-iops.c */
  381. _GLOBAL(_outsw_ns)
  382. cmpwi 0,r5,0
  383. mtctr r5
  384. subi r4,r4,2
  385. blelr-
  386. 00: lhzu r5,2(r4)
  387. sth r5,0(r3)
  388. bdnz 00b
  389. sync
  390. blr
  391. _GLOBAL(_insl_ns)
  392. cmpwi 0,r5,0
  393. mtctr r5
  394. subi r4,r4,4
  395. blelr-
  396. 00: lwz r5,0(r3)
  397. eieio
  398. stwu r5,4(r4)
  399. bdnz 00b
  400. twi 0,r5,0
  401. isync
  402. blr
  403. _GLOBAL(_outsl_ns)
  404. cmpwi 0,r5,0
  405. mtctr r5
  406. subi r4,r4,4
  407. blelr-
  408. 00: lwzu r5,4(r4)
  409. stw r5,0(r3)
  410. bdnz 00b
  411. sync
  412. blr
  413. _GLOBAL(cvt_fd)
  414. lfd 0,0(r5) /* load up fpscr value */
  415. mtfsf 0xff,0
  416. lfs 0,0(r3)
  417. stfd 0,0(r4)
  418. mffs 0 /* save new fpscr value */
  419. stfd 0,0(r5)
  420. blr
  421. _GLOBAL(cvt_df)
  422. lfd 0,0(r5) /* load up fpscr value */
  423. mtfsf 0xff,0
  424. lfd 0,0(r3)
  425. stfs 0,0(r4)
  426. mffs 0 /* save new fpscr value */
  427. stfd 0,0(r5)
  428. blr
  429. /*
  430. * identify_cpu and calls setup_cpu
  431. * In: r3 = base of the cpu_specs array
  432. * r4 = address of cur_cpu_spec
  433. * r5 = relocation offset
  434. */
  435. _GLOBAL(identify_cpu)
  436. mfpvr r7
  437. 1:
  438. lwz r8,CPU_SPEC_PVR_MASK(r3)
  439. and r8,r8,r7
  440. lwz r9,CPU_SPEC_PVR_VALUE(r3)
  441. cmplw 0,r9,r8
  442. beq 1f
  443. addi r3,r3,CPU_SPEC_ENTRY_SIZE
  444. b 1b
  445. 1:
  446. add r0,r3,r5
  447. std r0,0(r4)
  448. ld r4,CPU_SPEC_SETUP(r3)
  449. sub r4,r4,r5
  450. ld r4,0(r4)
  451. sub r4,r4,r5
  452. mtctr r4
  453. /* Calling convention for cpu setup is r3=offset, r4=cur_cpu_spec */
  454. mr r4,r3
  455. mr r3,r5
  456. bctr
  457. /*
  458. * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
  459. * and writes nop's over sections of code that don't apply for this cpu.
  460. * r3 = data offset (not changed)
  461. */
  462. _GLOBAL(do_cpu_ftr_fixups)
  463. /* Get CPU 0 features */
  464. LOADADDR(r6,cur_cpu_spec)
  465. sub r6,r6,r3
  466. ld r4,0(r6)
  467. sub r4,r4,r3
  468. ld r4,CPU_SPEC_FEATURES(r4)
  469. /* Get the fixup table */
  470. LOADADDR(r6,__start___ftr_fixup)
  471. sub r6,r6,r3
  472. LOADADDR(r7,__stop___ftr_fixup)
  473. sub r7,r7,r3
  474. /* Do the fixup */
  475. 1: cmpld r6,r7
  476. bgelr
  477. addi r6,r6,32
  478. ld r8,-32(r6) /* mask */
  479. and r8,r8,r4
  480. ld r9,-24(r6) /* value */
  481. cmpld r8,r9
  482. beq 1b
  483. ld r8,-16(r6) /* section begin */
  484. ld r9,-8(r6) /* section end */
  485. subf. r9,r8,r9
  486. beq 1b
  487. /* write nops over the section of code */
  488. /* todo: if large section, add a branch at the start of it */
  489. srwi r9,r9,2
  490. mtctr r9
  491. sub r8,r8,r3
  492. lis r0,0x60000000@h /* nop */
  493. 3: stw r0,0(r8)
  494. andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
  495. beq 2f
  496. dcbst 0,r8 /* suboptimal, but simpler */
  497. sync
  498. icbi 0,r8
  499. 2: addi r8,r8,4
  500. bdnz 3b
  501. sync /* additional sync needed on g4 */
  502. isync
  503. b 1b
  504. #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
  505. /*
  506. * Do an IO access in real mode
  507. */
  508. _GLOBAL(real_readb)
  509. mfmsr r7
  510. ori r0,r7,MSR_DR
  511. xori r0,r0,MSR_DR
  512. sync
  513. mtmsrd r0
  514. sync
  515. isync
  516. mfspr r6,SPRN_HID4
  517. rldicl r5,r6,32,0
  518. ori r5,r5,0x100
  519. rldicl r5,r5,32,0
  520. sync
  521. mtspr SPRN_HID4,r5
  522. isync
  523. slbia
  524. isync
  525. lbz r3,0(r3)
  526. sync
  527. mtspr SPRN_HID4,r6
  528. isync
  529. slbia
  530. isync
  531. mtmsrd r7
  532. sync
  533. isync
  534. blr
  535. /*
  536. * Do an IO access in real mode
  537. */
  538. _GLOBAL(real_writeb)
  539. mfmsr r7
  540. ori r0,r7,MSR_DR
  541. xori r0,r0,MSR_DR
  542. sync
  543. mtmsrd r0
  544. sync
  545. isync
  546. mfspr r6,SPRN_HID4
  547. rldicl r5,r6,32,0
  548. ori r5,r5,0x100
  549. rldicl r5,r5,32,0
  550. sync
  551. mtspr SPRN_HID4,r5
  552. isync
  553. slbia
  554. isync
  555. stb r3,0(r4)
  556. sync
  557. mtspr SPRN_HID4,r6
  558. isync
  559. slbia
  560. isync
  561. mtmsrd r7
  562. sync
  563. isync
  564. blr
  565. #endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
  566. /*
  567. * Create a kernel thread
  568. * kernel_thread(fn, arg, flags)
  569. */
  570. _GLOBAL(kernel_thread)
  571. std r29,-24(r1)
  572. std r30,-16(r1)
  573. stdu r1,-STACK_FRAME_OVERHEAD(r1)
  574. mr r29,r3
  575. mr r30,r4
  576. ori r3,r5,CLONE_VM /* flags */
  577. oris r3,r3,(CLONE_UNTRACED>>16)
  578. li r4,0 /* new sp (unused) */
  579. li r0,__NR_clone
  580. sc
  581. cmpdi 0,r3,0 /* parent or child? */
  582. bne 1f /* return if parent */
  583. li r0,0
  584. stdu r0,-STACK_FRAME_OVERHEAD(r1)
  585. ld r2,8(r29)
  586. ld r29,0(r29)
  587. mtlr r29 /* fn addr in lr */
  588. mr r3,r30 /* load arg and call fn */
  589. blrl
  590. li r0,__NR_exit /* exit after child exits */
  591. li r3,0
  592. sc
  593. 1: addi r1,r1,STACK_FRAME_OVERHEAD
  594. ld r29,-24(r1)
  595. ld r30,-16(r1)
  596. blr
  597. /*
  598. * disable_kernel_fp()
  599. * Disable the FPU.
  600. */
  601. _GLOBAL(disable_kernel_fp)
  602. mfmsr r3
  603. rldicl r0,r3,(63-MSR_FP_LG),1
  604. rldicl r3,r0,(MSR_FP_LG+1),0
  605. mtmsrd r3 /* disable use of fpu now */
  606. isync
  607. blr
  608. /*
  609. * giveup_fpu(tsk)
  610. * Disable FP for the task given as the argument,
  611. * and save the floating-point registers in its thread_struct.
  612. * Enables the FPU for use in the kernel on return.
  613. */
  614. _GLOBAL(giveup_fpu)
  615. mfmsr r5
  616. ori r5,r5,MSR_FP
  617. mtmsrd r5 /* enable use of fpu now */
  618. isync
  619. cmpdi 0,r3,0
  620. beqlr- /* if no previous owner, done */
  621. addi r3,r3,THREAD /* want THREAD of task */
  622. ld r5,PT_REGS(r3)
  623. cmpdi 0,r5,0
  624. SAVE_32FPRS(0, r3)
  625. mffs fr0
  626. stfd fr0,THREAD_FPSCR(r3)
  627. beq 1f
  628. ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  629. li r3,MSR_FP|MSR_FE0|MSR_FE1
  630. andc r4,r4,r3 /* disable FP for previous task */
  631. std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  632. 1:
  633. #ifndef CONFIG_SMP
  634. li r5,0
  635. ld r4,last_task_used_math@got(r2)
  636. std r5,0(r4)
  637. #endif /* CONFIG_SMP */
  638. blr
  639. #ifdef CONFIG_ALTIVEC
  640. #if 0 /* this has no callers for now */
  641. /*
  642. * disable_kernel_altivec()
  643. * Disable the VMX.
  644. */
  645. _GLOBAL(disable_kernel_altivec)
  646. mfmsr r3
  647. rldicl r0,r3,(63-MSR_VEC_LG),1
  648. rldicl r3,r0,(MSR_VEC_LG+1),0
  649. mtmsrd r3 /* disable use of VMX now */
  650. isync
  651. blr
  652. #endif /* 0 */
  653. /*
  654. * giveup_altivec(tsk)
  655. * Disable VMX for the task given as the argument,
  656. * and save the vector registers in its thread_struct.
  657. * Enables the VMX for use in the kernel on return.
  658. */
  659. _GLOBAL(giveup_altivec)
  660. mfmsr r5
  661. oris r5,r5,MSR_VEC@h
  662. mtmsrd r5 /* enable use of VMX now */
  663. isync
  664. cmpdi 0,r3,0
  665. beqlr- /* if no previous owner, done */
  666. addi r3,r3,THREAD /* want THREAD of task */
  667. ld r5,PT_REGS(r3)
  668. cmpdi 0,r5,0
  669. SAVE_32VRS(0,r4,r3)
  670. mfvscr vr0
  671. li r4,THREAD_VSCR
  672. stvx vr0,r4,r3
  673. beq 1f
  674. ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  675. lis r3,MSR_VEC@h
  676. andc r4,r4,r3 /* disable FP for previous task */
  677. std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
  678. 1:
  679. #ifndef CONFIG_SMP
  680. li r5,0
  681. ld r4,last_task_used_altivec@got(r2)
  682. std r5,0(r4)
  683. #endif /* CONFIG_SMP */
  684. blr
  685. #endif /* CONFIG_ALTIVEC */
  686. _GLOBAL(__setup_cpu_power3)
  687. blr
  688. _GLOBAL(execve)
  689. li r0,__NR_execve
  690. sc
  691. bnslr
  692. neg r3,r3
  693. blr
  694. /* kexec_wait(phys_cpu)
  695. *
  696. * wait for the flag to change, indicating this kernel is going away but
  697. * the slave code for the next one is at addresses 0 to 100.
  698. *
  699. * This is used by all slaves.
  700. *
  701. * Physical (hardware) cpu id should be in r3.
  702. */
  703. _GLOBAL(kexec_wait)
  704. bl 1f
  705. 1: mflr r5
  706. addi r5,r5,kexec_flag-1b
  707. 99: HMT_LOW
  708. #ifdef CONFIG_KEXEC /* use no memory without kexec */
  709. lwz r4,0(r5)
  710. cmpwi 0,r4,0
  711. bnea 0x60
  712. #endif
  713. b 99b
  714. /* this can be in text because we won't change it until we are
  715. * running in real anyways
  716. */
  717. kexec_flag:
  718. .long 0
  719. #ifdef CONFIG_KEXEC
  720. /* kexec_smp_wait(void)
  721. *
  722. * call with interrupts off
  723. * note: this is a terminal routine, it does not save lr
  724. *
  725. * get phys id from paca
  726. * set paca id to -1 to say we got here
  727. * switch to real mode
  728. * join other cpus in kexec_wait(phys_id)
  729. */
  730. _GLOBAL(kexec_smp_wait)
  731. lhz r3,PACAHWCPUID(r13)
  732. li r4,-1
  733. sth r4,PACAHWCPUID(r13) /* let others know we left */
  734. bl real_mode
  735. b .kexec_wait
  736. /*
  737. * switch to real mode (turn mmu off)
  738. * we use the early kernel trick that the hardware ignores bits
  739. * 0 and 1 (big endian) of the effective address in real mode
  740. *
  741. * don't overwrite r3 here, it is live for kexec_wait above.
  742. */
  743. real_mode: /* assume normal blr return */
  744. 1: li r9,MSR_RI
  745. li r10,MSR_DR|MSR_IR
  746. mflr r11 /* return address to SRR0 */
  747. mfmsr r12
  748. andc r9,r12,r9
  749. andc r10,r12,r10
  750. mtmsrd r9,1
  751. mtspr SPRN_SRR1,r10
  752. mtspr SPRN_SRR0,r11
  753. rfid
  754. /*
  755. * kexec_sequence(newstack, start, image, control, clear_all())
  756. *
  757. * does the grungy work with stack switching and real mode switches
  758. * also does simple calls to other code
  759. */
  760. _GLOBAL(kexec_sequence)
  761. mflr r0
  762. std r0,16(r1)
  763. /* switch stacks to newstack -- &kexec_stack.stack */
  764. stdu r1,THREAD_SIZE-112(r3)
  765. mr r1,r3
  766. li r0,0
  767. std r0,16(r1)
  768. /* save regs for local vars on new stack.
  769. * yes, we won't go back, but ...
  770. */
  771. std r31,-8(r1)
  772. std r30,-16(r1)
  773. std r29,-24(r1)
  774. std r28,-32(r1)
  775. std r27,-40(r1)
  776. std r26,-48(r1)
  777. std r25,-56(r1)
  778. stdu r1,-112-64(r1)
  779. /* save args into preserved regs */
  780. mr r31,r3 /* newstack (both) */
  781. mr r30,r4 /* start (real) */
  782. mr r29,r5 /* image (virt) */
  783. mr r28,r6 /* control, unused */
  784. mr r27,r7 /* clear_all() fn desc */
  785. mr r26,r8 /* spare */
  786. lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */
  787. /* disable interrupts, we are overwriting kernel data next */
  788. mfmsr r3
  789. rlwinm r3,r3,0,17,15
  790. mtmsrd r3,1
  791. /* copy dest pages, flush whole dest image */
  792. mr r3,r29
  793. bl .kexec_copy_flush /* (image) */
  794. /* turn off mmu */
  795. bl real_mode
  796. /* clear out hardware hash page table and tlb */
  797. ld r5,0(r27) /* deref function descriptor */
  798. mtctr r5
  799. bctrl /* ppc_md.hash_clear_all(void); */
  800. /*
  801. * kexec image calling is:
  802. * the first 0x100 bytes of the entry point are copied to 0
  803. *
  804. * all slaves branch to slave = 0x60 (absolute)
  805. * slave(phys_cpu_id);
  806. *
  807. * master goes to start = entry point
  808. * start(phys_cpu_id, start, 0);
  809. *
  810. *
  811. * a wrapper is needed to call existing kernels, here is an approximate
  812. * description of one method:
  813. *
  814. * v2: (2.6.10)
  815. * start will be near the boot_block (maybe 0x100 bytes before it?)
  816. * it will have a 0x60, which will b to boot_block, where it will wait
  817. * and 0 will store phys into struct boot-block and load r3 from there,
  818. * copy kernel 0-0x100 and tell slaves to back down to 0x60 again
  819. *
  820. * v1: (2.6.9)
  821. * boot block will have all cpus scanning device tree to see if they
  822. * are the boot cpu ?????
  823. * other device tree differences (prop sizes, va vs pa, etc)...
  824. */
  825. /* copy 0x100 bytes starting at start to 0 */
  826. li r3,0
  827. mr r4,r30
  828. li r5,0x100
  829. li r6,0
  830. bl .copy_and_flush /* (dest, src, copy limit, start offset) */
  831. 1: /* assume normal blr return */
  832. /* release other cpus to the new kernel secondary start at 0x60 */
  833. mflr r5
  834. li r6,1
  835. stw r6,kexec_flag-1b(5)
  836. mr r3,r25 # my phys cpu
  837. mr r4,r30 # start, aka phys mem offset
  838. mtlr 4
  839. li r5,0
  840. blr /* image->start(physid, image->start, 0); */
  841. #endif /* CONFIG_KEXEC */
  842. /* Why isn't this a) automatic, b) written in 'C'? */
  843. .balign 8
  844. _GLOBAL(sys_call_table32)
  845. .llong .sys_restart_syscall /* 0 */
  846. .llong .sys_exit
  847. .llong .ppc_fork
  848. .llong .sys_read
  849. .llong .sys_write
  850. .llong .compat_sys_open /* 5 */
  851. .llong .sys_close
  852. .llong .compat_sys_waitpid
  853. .llong .compat_sys_creat
  854. .llong .sys_link
  855. .llong .sys_unlink /* 10 */
  856. .llong .compat_sys_execve
  857. .llong .sys_chdir
  858. .llong .compat_sys_time
  859. .llong .sys_mknod
  860. .llong .sys_chmod /* 15 */
  861. .llong .sys_lchown
  862. .llong .sys_ni_syscall /* old break syscall */
  863. .llong .sys_ni_syscall /* old stat syscall */
  864. .llong .ppc32_lseek
  865. .llong .sys_getpid /* 20 */
  866. .llong .compat_sys_mount
  867. .llong .sys_oldumount
  868. .llong .sys_setuid
  869. .llong .sys_getuid
  870. .llong .compat_sys_stime /* 25 */
  871. .llong .compat_sys_ptrace
  872. .llong .sys_alarm
  873. .llong .sys_ni_syscall /* old fstat syscall */
  874. .llong .compat_sys_pause
  875. .llong .compat_sys_utime /* 30 */
  876. .llong .sys_ni_syscall /* old stty syscall */
  877. .llong .sys_ni_syscall /* old gtty syscall */
  878. .llong .compat_sys_access
  879. .llong .compat_sys_nice
  880. .llong .sys_ni_syscall /* 35 - old ftime syscall */
  881. .llong .sys_sync
  882. .llong .compat_sys_kill
  883. .llong .sys_rename
  884. .llong .compat_sys_mkdir
  885. .llong .sys_rmdir /* 40 */
  886. .llong .sys_dup
  887. .llong .sys_pipe
  888. .llong .compat_sys_times
  889. .llong .sys_ni_syscall /* old prof syscall */
  890. .llong .sys_brk /* 45 */
  891. .llong .sys_setgid
  892. .llong .sys_getgid
  893. .llong .sys_signal
  894. .llong .sys_geteuid
  895. .llong .sys_getegid /* 50 */
  896. .llong .sys_acct
  897. .llong .sys_umount
  898. .llong .sys_ni_syscall /* old lock syscall */
  899. .llong .compat_sys_ioctl
  900. .llong .compat_sys_fcntl /* 55 */
  901. .llong .sys_ni_syscall /* old mpx syscall */
  902. .llong .compat_sys_setpgid
  903. .llong .sys_ni_syscall /* old ulimit syscall */
  904. .llong .sys_olduname
  905. .llong .compat_sys_umask /* 60 */
  906. .llong .sys_chroot
  907. .llong .sys_ustat
  908. .llong .sys_dup2
  909. .llong .sys_getppid
  910. .llong .sys_getpgrp /* 65 */
  911. .llong .sys_setsid
  912. .llong .compat_sys_sigaction
  913. .llong .sys_sgetmask
  914. .llong .compat_sys_ssetmask
  915. .llong .sys_setreuid /* 70 */
  916. .llong .sys_setregid
  917. .llong .ppc32_sigsuspend
  918. .llong .compat_sys_sigpending
  919. .llong .compat_sys_sethostname
  920. .llong .compat_sys_setrlimit /* 75 */
  921. .llong .compat_sys_old_getrlimit
  922. .llong .compat_sys_getrusage
  923. .llong .compat_sys_gettimeofday
  924. .llong .compat_sys_settimeofday
  925. .llong .compat_sys_getgroups /* 80 */
  926. .llong .compat_sys_setgroups
  927. .llong .sys_ni_syscall /* old select syscall */
  928. .llong .sys_symlink
  929. .llong .sys_ni_syscall /* old lstat syscall */
  930. .llong .compat_sys_readlink /* 85 */
  931. .llong .sys_uselib
  932. .llong .sys_swapon
  933. .llong .sys_reboot
  934. .llong .old32_readdir
  935. .llong .sys_mmap /* 90 */
  936. .llong .sys_munmap
  937. .llong .sys_truncate
  938. .llong .sys_ftruncate
  939. .llong .sys_fchmod
  940. .llong .sys_fchown /* 95 */
  941. .llong .compat_sys_getpriority
  942. .llong .compat_sys_setpriority
  943. .llong .sys_ni_syscall /* old profil syscall */
  944. .llong .compat_sys_statfs
  945. .llong .compat_sys_fstatfs /* 100 */
  946. .llong .sys_ni_syscall /* old ioperm syscall */
  947. .llong .compat_sys_socketcall
  948. .llong .compat_sys_syslog
  949. .llong .compat_sys_setitimer
  950. .llong .compat_sys_getitimer /* 105 */
  951. .llong .compat_sys_newstat
  952. .llong .compat_sys_newlstat
  953. .llong .compat_sys_newfstat
  954. .llong .sys_uname
  955. .llong .sys_ni_syscall /* 110 old iopl syscall */
  956. .llong .sys_vhangup
  957. .llong .sys_ni_syscall /* old idle syscall */
  958. .llong .sys_ni_syscall /* old vm86 syscall */
  959. .llong .compat_sys_wait4
  960. .llong .sys_swapoff /* 115 */
  961. .llong .compat_sys_sysinfo
  962. .llong .sys32_ipc
  963. .llong .sys_fsync
  964. .llong .ppc32_sigreturn
  965. .llong .ppc_clone /* 120 */
  966. .llong .compat_sys_setdomainname
  967. .llong .ppc_newuname
  968. .llong .sys_ni_syscall /* old modify_ldt syscall */
  969. .llong .compat_sys_adjtimex
  970. .llong .sys_mprotect /* 125 */
  971. .llong .compat_sys_sigprocmask
  972. .llong .sys_ni_syscall /* old create_module syscall */
  973. .llong .sys_init_module
  974. .llong .sys_delete_module
  975. .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
  976. .llong .sys_quotactl
  977. .llong .compat_sys_getpgid
  978. .llong .sys_fchdir
  979. .llong .sys_bdflush
  980. .llong .compat_sys_sysfs /* 135 */
  981. .llong .ppc64_personality
  982. .llong .sys_ni_syscall /* for afs_syscall */
  983. .llong .sys_setfsuid
  984. .llong .sys_setfsgid
  985. .llong .sys_llseek /* 140 */
  986. .llong .compat_sys_getdents
  987. .llong .ppc32_select
  988. .llong .sys_flock
  989. .llong .sys_msync
  990. .llong .compat_sys_readv /* 145 */
  991. .llong .compat_sys_writev
  992. .llong .compat_sys_getsid
  993. .llong .sys_fdatasync
  994. .llong .compat_sys_sysctl
  995. .llong .sys_mlock /* 150 */
  996. .llong .sys_munlock
  997. .llong .sys_mlockall
  998. .llong .sys_munlockall
  999. .llong .compat_sys_sched_setparam
  1000. .llong .compat_sys_sched_getparam /* 155 */
  1001. .llong .compat_sys_sched_setscheduler
  1002. .llong .compat_sys_sched_getscheduler
  1003. .llong .sys_sched_yield
  1004. .llong .compat_sys_sched_get_priority_max
  1005. .llong .compat_sys_sched_get_priority_min /* 160 */
  1006. .llong .compat_sys_sched_rr_get_interval
  1007. .llong .compat_sys_nanosleep
  1008. .llong .sys_mremap
  1009. .llong .sys_setresuid
  1010. .llong .sys_getresuid /* 165 */
  1011. .llong .sys_ni_syscall /* old query_module syscall */
  1012. .llong .sys_poll
  1013. .llong .compat_sys_nfsservctl
  1014. .llong .sys_setresgid
  1015. .llong .sys_getresgid /* 170 */
  1016. .llong .compat_sys_prctl
  1017. .llong .ppc32_rt_sigreturn
  1018. .llong .compat_sys_rt_sigaction
  1019. .llong .compat_sys_rt_sigprocmask
  1020. .llong .compat_sys_rt_sigpending /* 175 */
  1021. .llong .compat_sys_rt_sigtimedwait
  1022. .llong .compat_sys_rt_sigqueueinfo
  1023. .llong .ppc32_rt_sigsuspend
  1024. .llong .compat_sys_pread64
  1025. .llong .compat_sys_pwrite64 /* 180 */
  1026. .llong .sys_chown
  1027. .llong .sys_getcwd
  1028. .llong .sys_capget
  1029. .llong .sys_capset
  1030. .llong .compat_sys_sigaltstack /* 185 */
  1031. .llong .compat_sys_sendfile
  1032. .llong .sys_ni_syscall /* reserved for streams1 */
  1033. .llong .sys_ni_syscall /* reserved for streams2 */
  1034. .llong .ppc_vfork
  1035. .llong .compat_sys_getrlimit /* 190 */
  1036. .llong .compat_sys_readahead
  1037. .llong .compat_sys_mmap2
  1038. .llong .compat_sys_truncate64
  1039. .llong .compat_sys_ftruncate64
  1040. .llong .sys_stat64 /* 195 */
  1041. .llong .sys_lstat64
  1042. .llong .sys_fstat64
  1043. .llong .compat_sys_pciconfig_read
  1044. .llong .compat_sys_pciconfig_write
  1045. .llong .compat_sys_pciconfig_iobase /* 200 - pciconfig_iobase */
  1046. .llong .sys_ni_syscall /* reserved for MacOnLinux */
  1047. .llong .sys_getdents64
  1048. .llong .sys_pivot_root
  1049. .llong .compat_sys_fcntl64
  1050. .llong .sys_madvise /* 205 */
  1051. .llong .sys_mincore
  1052. .llong .sys_gettid
  1053. .llong .sys_tkill
  1054. .llong .sys_setxattr
  1055. .llong .sys_lsetxattr /* 210 */
  1056. .llong .sys_fsetxattr
  1057. .llong .sys_getxattr
  1058. .llong .sys_lgetxattr
  1059. .llong .sys_fgetxattr
  1060. .llong .sys_listxattr /* 215 */
  1061. .llong .sys_llistxattr
  1062. .llong .sys_flistxattr
  1063. .llong .sys_removexattr
  1064. .llong .sys_lremovexattr
  1065. .llong .sys_fremovexattr /* 220 */
  1066. .llong .compat_sys_futex
  1067. .llong .compat_sys_sched_setaffinity
  1068. .llong .compat_sys_sched_getaffinity
  1069. .llong .sys_ni_syscall
  1070. .llong .sys_ni_syscall /* 225 - reserved for tux */
  1071. .llong .compat_sys_sendfile64
  1072. .llong .compat_sys_io_setup
  1073. .llong .sys_io_destroy
  1074. .llong .compat_sys_io_getevents
  1075. .llong .compat_sys_io_submit
  1076. .llong .sys_io_cancel
  1077. .llong .sys_set_tid_address
  1078. .llong .ppc32_fadvise64
  1079. .llong .sys_exit_group
  1080. .llong .ppc32_lookup_dcookie /* 235 */
  1081. .llong .sys_epoll_create
  1082. .llong .sys_epoll_ctl
  1083. .llong .sys_epoll_wait
  1084. .llong .sys_remap_file_pages
  1085. .llong .ppc32_timer_create /* 240 */
  1086. .llong .compat_sys_timer_settime
  1087. .llong .compat_sys_timer_gettime
  1088. .llong .sys_timer_getoverrun
  1089. .llong .sys_timer_delete
  1090. .llong .compat_sys_clock_settime/* 245 */
  1091. .llong .compat_sys_clock_gettime
  1092. .llong .compat_sys_clock_getres
  1093. .llong .compat_sys_clock_nanosleep
  1094. .llong .ppc32_swapcontext
  1095. .llong .compat_sys_tgkill /* 250 */
  1096. .llong .compat_sys_utimes
  1097. .llong .compat_sys_statfs64
  1098. .llong .compat_sys_fstatfs64
  1099. .llong .ppc_fadvise64_64 /* 32bit only fadvise64_64 */
  1100. .llong .ppc_rtas /* 255 */
  1101. .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
  1102. .llong .sys_ni_syscall /* 257 reserved for vserver */
  1103. .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
  1104. .llong .compat_sys_mbind
  1105. .llong .compat_sys_get_mempolicy /* 260 */
  1106. .llong .compat_sys_set_mempolicy
  1107. .llong .compat_sys_mq_open
  1108. .llong .sys_mq_unlink
  1109. .llong .compat_sys_mq_timedsend
  1110. .llong .compat_sys_mq_timedreceive /* 265 */
  1111. .llong .compat_sys_mq_notify
  1112. .llong .compat_sys_mq_getsetattr
  1113. .llong .compat_sys_kexec_load
  1114. .llong .compat_sys_add_key
  1115. .llong .compat_sys_request_key /* 270 */
  1116. .llong .compat_sys_keyctl
  1117. .llong .compat_sys_waitid
  1118. .llong .compat_sys_ioprio_set
  1119. .llong .compat_sys_ioprio_get
  1120. .llong .sys_inotify_init /* 275 */
  1121. .llong .sys_inotify_add_watch
  1122. .llong .sys_inotify_rm_watch
  1123. .balign 8
  1124. _GLOBAL(sys_call_table)
  1125. .llong .sys_restart_syscall /* 0 */
  1126. .llong .sys_exit
  1127. .llong .ppc_fork
  1128. .llong .sys_read
  1129. .llong .sys_write
  1130. .llong .sys_open /* 5 */
  1131. .llong .sys_close
  1132. .llong .sys_waitpid
  1133. .llong .sys_creat
  1134. .llong .sys_link
  1135. .llong .sys_unlink /* 10 */
  1136. .llong .sys_execve
  1137. .llong .sys_chdir
  1138. .llong .sys64_time
  1139. .llong .sys_mknod
  1140. .llong .sys_chmod /* 15 */
  1141. .llong .sys_lchown
  1142. .llong .sys_ni_syscall /* old break syscall */
  1143. .llong .sys_ni_syscall /* old stat syscall */
  1144. .llong .sys_lseek
  1145. .llong .sys_getpid /* 20 */
  1146. .llong .sys_mount
  1147. .llong .sys_ni_syscall /* old umount syscall */
  1148. .llong .sys_setuid
  1149. .llong .sys_getuid
  1150. .llong .sys_stime /* 25 */
  1151. .llong .sys_ptrace
  1152. .llong .sys_alarm
  1153. .llong .sys_ni_syscall /* old fstat syscall */
  1154. .llong .sys_pause
  1155. .llong .sys_utime /* 30 */
  1156. .llong .sys_ni_syscall /* old stty syscall */
  1157. .llong .sys_ni_syscall /* old gtty syscall */
  1158. .llong .sys_access
  1159. .llong .sys_nice
  1160. .llong .sys_ni_syscall /* 35 - old ftime syscall */
  1161. .llong .sys_sync
  1162. .llong .sys_kill
  1163. .llong .sys_rename
  1164. .llong .sys_mkdir
  1165. .llong .sys_rmdir /* 40 */
  1166. .llong .sys_dup
  1167. .llong .sys_pipe
  1168. .llong .sys_times
  1169. .llong .sys_ni_syscall /* old prof syscall */
  1170. .llong .sys_brk /* 45 */
  1171. .llong .sys_setgid
  1172. .llong .sys_getgid
  1173. .llong .sys_signal
  1174. .llong .sys_geteuid
  1175. .llong .sys_getegid /* 50 */
  1176. .llong .sys_acct
  1177. .llong .sys_umount
  1178. .llong .sys_ni_syscall /* old lock syscall */
  1179. .llong .sys_ioctl
  1180. .llong .sys_fcntl /* 55 */
  1181. .llong .sys_ni_syscall /* old mpx syscall */
  1182. .llong .sys_setpgid
  1183. .llong .sys_ni_syscall /* old ulimit syscall */
  1184. .llong .sys_ni_syscall /* old uname syscall */
  1185. .llong .sys_umask /* 60 */
  1186. .llong .sys_chroot
  1187. .llong .sys_ustat
  1188. .llong .sys_dup2
  1189. .llong .sys_getppid
  1190. .llong .sys_getpgrp /* 65 */
  1191. .llong .sys_setsid
  1192. .llong .sys_ni_syscall
  1193. .llong .sys_sgetmask
  1194. .llong .sys_ssetmask
  1195. .llong .sys_setreuid /* 70 */
  1196. .llong .sys_setregid
  1197. .llong .sys_ni_syscall
  1198. .llong .sys_ni_syscall
  1199. .llong .sys_sethostname
  1200. .llong .sys_setrlimit /* 75 */
  1201. .llong .sys_ni_syscall /* old getrlimit syscall */
  1202. .llong .sys_getrusage
  1203. .llong .sys_gettimeofday
  1204. .llong .sys_settimeofday
  1205. .llong .sys_getgroups /* 80 */
  1206. .llong .sys_setgroups
  1207. .llong .sys_ni_syscall /* old select syscall */
  1208. .llong .sys_symlink
  1209. .llong .sys_ni_syscall /* old lstat syscall */
  1210. .llong .sys_readlink /* 85 */
  1211. .llong .sys_uselib
  1212. .llong .sys_swapon
  1213. .llong .sys_reboot
  1214. .llong .sys_ni_syscall /* old readdir syscall */
  1215. .llong .sys_mmap /* 90 */
  1216. .llong .sys_munmap
  1217. .llong .sys_truncate
  1218. .llong .sys_ftruncate
  1219. .llong .sys_fchmod
  1220. .llong .sys_fchown /* 95 */
  1221. .llong .sys_getpriority
  1222. .llong .sys_setpriority
  1223. .llong .sys_ni_syscall /* old profil syscall holder */
  1224. .llong .sys_statfs
  1225. .llong .sys_fstatfs /* 100 */
  1226. .llong .sys_ni_syscall /* old ioperm syscall */
  1227. .llong .sys_socketcall
  1228. .llong .sys_syslog
  1229. .llong .sys_setitimer
  1230. .llong .sys_getitimer /* 105 */
  1231. .llong .sys_newstat
  1232. .llong .sys_newlstat
  1233. .llong .sys_newfstat
  1234. .llong .sys_ni_syscall /* old uname syscall */
  1235. .llong .sys_ni_syscall /* 110 old iopl syscall */
  1236. .llong .sys_vhangup
  1237. .llong .sys_ni_syscall /* old idle syscall */
  1238. .llong .sys_ni_syscall /* old vm86 syscall */
  1239. .llong .sys_wait4
  1240. .llong .sys_swapoff /* 115 */
  1241. .llong .sys_sysinfo
  1242. .llong .sys_ipc
  1243. .llong .sys_fsync
  1244. .llong .sys_ni_syscall
  1245. .llong .ppc_clone /* 120 */
  1246. .llong .sys_setdomainname
  1247. .llong .ppc_newuname
  1248. .llong .sys_ni_syscall /* old modify_ldt syscall */
  1249. .llong .sys_adjtimex
  1250. .llong .sys_mprotect /* 125 */
  1251. .llong .sys_ni_syscall
  1252. .llong .sys_ni_syscall /* old create_module syscall */
  1253. .llong .sys_init_module
  1254. .llong .sys_delete_module
  1255. .llong .sys_ni_syscall /* 130 old get_kernel_syms syscall */
  1256. .llong .sys_quotactl
  1257. .llong .sys_getpgid
  1258. .llong .sys_fchdir
  1259. .llong .sys_bdflush
  1260. .llong .sys_sysfs /* 135 */
  1261. .llong .ppc64_personality
  1262. .llong .sys_ni_syscall /* for afs_syscall */
  1263. .llong .sys_setfsuid
  1264. .llong .sys_setfsgid
  1265. .llong .sys_llseek /* 140 */
  1266. .llong .sys_getdents
  1267. .llong .sys_select
  1268. .llong .sys_flock
  1269. .llong .sys_msync
  1270. .llong .sys_readv /* 145 */
  1271. .llong .sys_writev
  1272. .llong .sys_getsid
  1273. .llong .sys_fdatasync
  1274. .llong .sys_sysctl
  1275. .llong .sys_mlock /* 150 */
  1276. .llong .sys_munlock
  1277. .llong .sys_mlockall
  1278. .llong .sys_munlockall
  1279. .llong .sys_sched_setparam
  1280. .llong .sys_sched_getparam /* 155 */
  1281. .llong .sys_sched_setscheduler
  1282. .llong .sys_sched_getscheduler
  1283. .llong .sys_sched_yield
  1284. .llong .sys_sched_get_priority_max
  1285. .llong .sys_sched_get_priority_min /* 160 */
  1286. .llong .sys_sched_rr_get_interval
  1287. .llong .sys_nanosleep
  1288. .llong .sys_mremap
  1289. .llong .sys_setresuid
  1290. .llong .sys_getresuid /* 165 */
  1291. .llong .sys_ni_syscall /* old query_module syscall */
  1292. .llong .sys_poll
  1293. .llong .sys_nfsservctl
  1294. .llong .sys_setresgid
  1295. .llong .sys_getresgid /* 170 */
  1296. .llong .sys_prctl
  1297. .llong .ppc64_rt_sigreturn
  1298. .llong .sys_rt_sigaction
  1299. .llong .sys_rt_sigprocmask
  1300. .llong .sys_rt_sigpending /* 175 */
  1301. .llong .sys_rt_sigtimedwait
  1302. .llong .sys_rt_sigqueueinfo
  1303. .llong .ppc64_rt_sigsuspend
  1304. .llong .sys_pread64
  1305. .llong .sys_pwrite64 /* 180 */
  1306. .llong .sys_chown
  1307. .llong .sys_getcwd
  1308. .llong .sys_capget
  1309. .llong .sys_capset
  1310. .llong .sys_sigaltstack /* 185 */
  1311. .llong .sys_sendfile64
  1312. .llong .sys_ni_syscall /* reserved for streams1 */
  1313. .llong .sys_ni_syscall /* reserved for streams2 */
  1314. .llong .ppc_vfork
  1315. .llong .sys_getrlimit /* 190 */
  1316. .llong .sys_readahead
  1317. .llong .sys_ni_syscall /* 32bit only mmap2 */
  1318. .llong .sys_ni_syscall /* 32bit only truncate64 */
  1319. .llong .sys_ni_syscall /* 32bit only ftruncate64 */
  1320. .llong .sys_ni_syscall /* 195 - 32bit only stat64 */
  1321. .llong .sys_ni_syscall /* 32bit only lstat64 */
  1322. .llong .sys_ni_syscall /* 32bit only fstat64 */
  1323. .llong .sys_pciconfig_read
  1324. .llong .sys_pciconfig_write
  1325. .llong .sys_pciconfig_iobase /* 200 - pciconfig_iobase */
  1326. .llong .sys_ni_syscall /* reserved for MacOnLinux */
  1327. .llong .sys_getdents64
  1328. .llong .sys_pivot_root
  1329. .llong .sys_ni_syscall /* 32bit only fcntl64 */
  1330. .llong .sys_madvise /* 205 */
  1331. .llong .sys_mincore
  1332. .llong .sys_gettid
  1333. .llong .sys_tkill
  1334. .llong .sys_setxattr
  1335. .llong .sys_lsetxattr /* 210 */
  1336. .llong .sys_fsetxattr
  1337. .llong .sys_getxattr
  1338. .llong .sys_lgetxattr
  1339. .llong .sys_fgetxattr
  1340. .llong .sys_listxattr /* 215 */
  1341. .llong .sys_llistxattr
  1342. .llong .sys_flistxattr
  1343. .llong .sys_removexattr
  1344. .llong .sys_lremovexattr
  1345. .llong .sys_fremovexattr /* 220 */
  1346. .llong .sys_futex
  1347. .llong .sys_sched_setaffinity
  1348. .llong .sys_sched_getaffinity
  1349. .llong .sys_ni_syscall
  1350. .llong .sys_ni_syscall /* 225 - reserved for tux */
  1351. .llong .sys_ni_syscall /* 32bit only sendfile64 */
  1352. .llong .sys_io_setup
  1353. .llong .sys_io_destroy
  1354. .llong .sys_io_getevents
  1355. .llong .sys_io_submit /* 230 */
  1356. .llong .sys_io_cancel
  1357. .llong .sys_set_tid_address
  1358. .llong .sys_fadvise64
  1359. .llong .sys_exit_group
  1360. .llong .sys_lookup_dcookie /* 235 */
  1361. .llong .sys_epoll_create
  1362. .llong .sys_epoll_ctl
  1363. .llong .sys_epoll_wait
  1364. .llong .sys_remap_file_pages
  1365. .llong .sys_timer_create /* 240 */
  1366. .llong .sys_timer_settime
  1367. .llong .sys_timer_gettime
  1368. .llong .sys_timer_getoverrun
  1369. .llong .sys_timer_delete
  1370. .llong .sys_clock_settime /* 245 */
  1371. .llong .sys_clock_gettime
  1372. .llong .sys_clock_getres
  1373. .llong .sys_clock_nanosleep
  1374. .llong .ppc64_swapcontext
  1375. .llong .sys_tgkill /* 250 */
  1376. .llong .sys_utimes
  1377. .llong .sys_statfs64
  1378. .llong .sys_fstatfs64
  1379. .llong .sys_ni_syscall /* 32bit only fadvise64_64 */
  1380. .llong .ppc_rtas /* 255 */
  1381. .llong .sys_ni_syscall /* 256 reserved for sys_debug_setcontext */
  1382. .llong .sys_ni_syscall /* 257 reserved for vserver */
  1383. .llong .sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
  1384. .llong .sys_mbind
  1385. .llong .sys_get_mempolicy /* 260 */
  1386. .llong .sys_set_mempolicy
  1387. .llong .sys_mq_open
  1388. .llong .sys_mq_unlink
  1389. .llong .sys_mq_timedsend
  1390. .llong .sys_mq_timedreceive /* 265 */
  1391. .llong .sys_mq_notify
  1392. .llong .sys_mq_getsetattr
  1393. .llong .sys_kexec_load
  1394. .llong .sys_add_key
  1395. .llong .sys_request_key /* 270 */
  1396. .llong .sys_keyctl
  1397. .llong .sys_waitid
  1398. .llong .sys_ioprio_set
  1399. .llong .sys_ioprio_get
  1400. .llong .sys_inotify_init /* 275 */
  1401. .llong .sys_inotify_add_watch
  1402. .llong .sys_inotify_rm_watch