misc.S 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327
  1. /*
  2. * This file contains miscellaneous low-level functions.
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6. * and Paul Mackerras.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. #include <linux/config.h>
  15. #include <linux/sys.h>
  16. #include <asm/unistd.h>
  17. #include <asm/errno.h>
  18. #include <asm/processor.h>
  19. #include <asm/page.h>
  20. #include <asm/cache.h>
  21. #include <asm/cputable.h>
  22. #include <asm/mmu.h>
  23. #include <asm/ppc_asm.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/asm-offsets.h>
  26. .text
  27. .align 5
  28. _GLOBAL(__delay)
  29. cmpwi 0,r3,0
  30. mtctr r3
  31. beqlr
  32. 1: bdnz 1b
  33. blr
  34. /*
  35. * Returns (address we're running at) - (address we were linked at)
  36. * for use before the text and data are mapped to KERNELBASE.
  37. */
  38. _GLOBAL(reloc_offset)
  39. mflr r0
  40. bl 1f
  41. 1: mflr r3
  42. lis r4,1b@ha
  43. addi r4,r4,1b@l
  44. subf r3,r4,r3
  45. mtlr r0
  46. blr
  47. /*
  48. * add_reloc_offset(x) returns x + reloc_offset().
  49. */
  50. _GLOBAL(add_reloc_offset)
  51. mflr r0
  52. bl 1f
  53. 1: mflr r5
  54. lis r4,1b@ha
  55. addi r4,r4,1b@l
  56. subf r5,r4,r5
  57. add r3,r3,r5
  58. mtlr r0
  59. blr
  60. /*
  61. * sub_reloc_offset(x) returns x - reloc_offset().
  62. */
  63. _GLOBAL(sub_reloc_offset)
  64. mflr r0
  65. bl 1f
  66. 1: mflr r5
  67. lis r4,1b@ha
  68. addi r4,r4,1b@l
  69. subf r5,r4,r5
  70. subf r3,r5,r3
  71. mtlr r0
  72. blr
  73. /*
  74. * reloc_got2 runs through the .got2 section adding an offset
  75. * to each entry.
  76. */
  77. _GLOBAL(reloc_got2)
  78. mflr r11
  79. lis r7,__got2_start@ha
  80. addi r7,r7,__got2_start@l
  81. lis r8,__got2_end@ha
  82. addi r8,r8,__got2_end@l
  83. subf r8,r7,r8
  84. srwi. r8,r8,2
  85. beqlr
  86. mtctr r8
  87. bl 1f
  88. 1: mflr r0
  89. lis r4,1b@ha
  90. addi r4,r4,1b@l
  91. subf r0,r4,r0
  92. add r7,r0,r7
  93. 2: lwz r0,0(r7)
  94. add r0,r0,r3
  95. stw r0,0(r7)
  96. addi r7,r7,4
  97. bdnz 2b
  98. mtlr r11
  99. blr
  100. /*
  101. * identify_cpu,
  102. * called with r3 = data offset and r4 = CPU number
  103. * doesn't change r3
  104. */
  105. _GLOBAL(identify_cpu)
  106. addis r8,r3,cpu_specs@ha
  107. addi r8,r8,cpu_specs@l
  108. mfpvr r7
  109. 1:
  110. lwz r5,CPU_SPEC_PVR_MASK(r8)
  111. and r5,r5,r7
  112. lwz r6,CPU_SPEC_PVR_VALUE(r8)
  113. cmplw 0,r6,r5
  114. beq 1f
  115. addi r8,r8,CPU_SPEC_ENTRY_SIZE
  116. b 1b
  117. 1:
  118. addis r6,r3,cur_cpu_spec@ha
  119. addi r6,r6,cur_cpu_spec@l
  120. sub r8,r8,r3
  121. stw r8,0(r6)
  122. blr
  123. /*
  124. * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
  125. * and writes nop's over sections of code that don't apply for this cpu.
  126. * r3 = data offset (not changed)
  127. */
  128. _GLOBAL(do_cpu_ftr_fixups)
  129. /* Get CPU 0 features */
  130. addis r6,r3,cur_cpu_spec@ha
  131. addi r6,r6,cur_cpu_spec@l
  132. lwz r4,0(r6)
  133. add r4,r4,r3
  134. lwz r4,CPU_SPEC_FEATURES(r4)
  135. /* Get the fixup table */
  136. addis r6,r3,__start___ftr_fixup@ha
  137. addi r6,r6,__start___ftr_fixup@l
  138. addis r7,r3,__stop___ftr_fixup@ha
  139. addi r7,r7,__stop___ftr_fixup@l
  140. /* Do the fixup */
  141. 1: cmplw 0,r6,r7
  142. bgelr
  143. addi r6,r6,16
  144. lwz r8,-16(r6) /* mask */
  145. and r8,r8,r4
  146. lwz r9,-12(r6) /* value */
  147. cmplw 0,r8,r9
  148. beq 1b
  149. lwz r8,-8(r6) /* section begin */
  150. lwz r9,-4(r6) /* section end */
  151. subf. r9,r8,r9
  152. beq 1b
  153. /* write nops over the section of code */
  154. /* todo: if large section, add a branch at the start of it */
  155. srwi r9,r9,2
  156. mtctr r9
  157. add r8,r8,r3
  158. lis r0,0x60000000@h /* nop */
  159. 3: stw r0,0(r8)
  160. andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
  161. beq 2f
  162. dcbst 0,r8 /* suboptimal, but simpler */
  163. sync
  164. icbi 0,r8
  165. 2: addi r8,r8,4
  166. bdnz 3b
  167. sync /* additional sync needed on g4 */
  168. isync
  169. b 1b
  170. /*
  171. * call_setup_cpu - call the setup_cpu function for this cpu
  172. * r3 = data offset, r24 = cpu number
  173. *
  174. * Setup function is called with:
  175. * r3 = data offset
  176. * r4 = ptr to CPU spec (relocated)
  177. */
  178. _GLOBAL(call_setup_cpu)
  179. addis r4,r3,cur_cpu_spec@ha
  180. addi r4,r4,cur_cpu_spec@l
  181. lwz r4,0(r4)
  182. add r4,r4,r3
  183. lwz r5,CPU_SPEC_SETUP(r4)
  184. cmpi 0,r5,0
  185. add r5,r5,r3
  186. beqlr
  187. mtctr r5
  188. bctr
  189. #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
  190. /* This gets called by via-pmu.c to switch the PLL selection
  191. * on 750fx CPU. This function should really be moved to some
  192. * other place (as most of the cpufreq code in via-pmu
  193. */
  194. _GLOBAL(low_choose_750fx_pll)
  195. /* Clear MSR:EE */
  196. mfmsr r7
  197. rlwinm r0,r7,0,17,15
  198. mtmsr r0
  199. /* If switching to PLL1, disable HID0:BTIC */
  200. cmplwi cr0,r3,0
  201. beq 1f
  202. mfspr r5,SPRN_HID0
  203. rlwinm r5,r5,0,27,25
  204. sync
  205. mtspr SPRN_HID0,r5
  206. isync
  207. sync
  208. 1:
  209. /* Calc new HID1 value */
  210. mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
  211. rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
  212. rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
  213. or r4,r4,r5
  214. mtspr SPRN_HID1,r4
  215. /* Store new HID1 image */
  216. rlwinm r6,r1,0,0,18
  217. lwz r6,TI_CPU(r6)
  218. slwi r6,r6,2
  219. addis r6,r6,nap_save_hid1@ha
  220. stw r4,nap_save_hid1@l(r6)
  221. /* If switching to PLL0, enable HID0:BTIC */
  222. cmplwi cr0,r3,0
  223. bne 1f
  224. mfspr r5,SPRN_HID0
  225. ori r5,r5,HID0_BTIC
  226. sync
  227. mtspr SPRN_HID0,r5
  228. isync
  229. sync
  230. 1:
  231. /* Return */
  232. mtmsr r7
  233. blr
  234. _GLOBAL(low_choose_7447a_dfs)
  235. /* Clear MSR:EE */
  236. mfmsr r7
  237. rlwinm r0,r7,0,17,15
  238. mtmsr r0
  239. /* Calc new HID1 value */
  240. mfspr r4,SPRN_HID1
  241. insrwi r4,r3,1,9 /* insert parameter into bit 9 */
  242. sync
  243. mtspr SPRN_HID1,r4
  244. sync
  245. isync
  246. /* Return */
  247. mtmsr r7
  248. blr
  249. #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
  250. /*
  251. * complement mask on the msr then "or" some values on.
  252. * _nmask_and_or_msr(nmask, value_to_or)
  253. */
  254. _GLOBAL(_nmask_and_or_msr)
  255. mfmsr r0 /* Get current msr */
  256. andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
  257. or r0,r0,r4 /* Or on the bits in r4 (second parm) */
  258. SYNC /* Some chip revs have problems here... */
  259. mtmsr r0 /* Update machine state */
  260. isync
  261. blr /* Done */
  262. /*
  263. * Flush MMU TLB
  264. */
  265. _GLOBAL(_tlbia)
  266. #if defined(CONFIG_40x)
  267. sync /* Flush to memory before changing mapping */
  268. tlbia
  269. isync /* Flush shadow TLB */
  270. #elif defined(CONFIG_44x)
  271. li r3,0
  272. sync
  273. /* Load high watermark */
  274. lis r4,tlb_44x_hwater@ha
  275. lwz r5,tlb_44x_hwater@l(r4)
  276. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  277. addi r3,r3,1
  278. cmpw 0,r3,r5
  279. ble 1b
  280. isync
  281. #elif defined(CONFIG_FSL_BOOKE)
  282. /* Invalidate all entries in TLB0 */
  283. li r3, 0x04
  284. tlbivax 0,3
  285. /* Invalidate all entries in TLB1 */
  286. li r3, 0x0c
  287. tlbivax 0,3
  288. /* Invalidate all entries in TLB2 */
  289. li r3, 0x14
  290. tlbivax 0,3
  291. /* Invalidate all entries in TLB3 */
  292. li r3, 0x1c
  293. tlbivax 0,3
  294. msync
  295. #ifdef CONFIG_SMP
  296. tlbsync
  297. #endif /* CONFIG_SMP */
  298. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  299. #if defined(CONFIG_SMP)
  300. rlwinm r8,r1,0,0,18
  301. lwz r8,TI_CPU(r8)
  302. oris r8,r8,10
  303. mfmsr r10
  304. SYNC
  305. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  306. rlwinm r0,r0,0,28,26 /* clear DR */
  307. mtmsr r0
  308. SYNC_601
  309. isync
  310. lis r9,mmu_hash_lock@h
  311. ori r9,r9,mmu_hash_lock@l
  312. tophys(r9,r9)
  313. 10: lwarx r7,0,r9
  314. cmpwi 0,r7,0
  315. bne- 10b
  316. stwcx. r8,0,r9
  317. bne- 10b
  318. sync
  319. tlbia
  320. sync
  321. TLBSYNC
  322. li r0,0
  323. stw r0,0(r9) /* clear mmu_hash_lock */
  324. mtmsr r10
  325. SYNC_601
  326. isync
  327. #else /* CONFIG_SMP */
  328. sync
  329. tlbia
  330. sync
  331. #endif /* CONFIG_SMP */
  332. #endif /* ! defined(CONFIG_40x) */
  333. blr
  334. /*
  335. * Flush MMU TLB for a particular address
  336. */
  337. _GLOBAL(_tlbie)
  338. #if defined(CONFIG_40x)
  339. tlbsx. r3, 0, r3
  340. bne 10f
  341. sync
  342. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
  343. * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
  344. * the TLB entry. */
  345. tlbwe r3, r3, TLB_TAG
  346. isync
  347. 10:
  348. #elif defined(CONFIG_44x)
  349. mfspr r4,SPRN_MMUCR
  350. mfspr r5,SPRN_PID /* Get PID */
  351. rlwimi r4,r5,0,24,31 /* Set TID */
  352. mtspr SPRN_MMUCR,r4
  353. tlbsx. r3, 0, r3
  354. bne 10f
  355. sync
  356. /* There are only 64 TLB entries, so r3 < 64,
  357. * which means bit 22, is clear. Since 22 is
  358. * the V bit in the TLB_PAGEID, loading this
  359. * value will invalidate the TLB entry.
  360. */
  361. tlbwe r3, r3, PPC44x_TLB_PAGEID
  362. isync
  363. 10:
  364. #elif defined(CONFIG_FSL_BOOKE)
  365. rlwinm r4, r3, 0, 0, 19
  366. ori r5, r4, 0x08 /* TLBSEL = 1 */
  367. ori r6, r4, 0x10 /* TLBSEL = 2 */
  368. ori r7, r4, 0x18 /* TLBSEL = 3 */
  369. tlbivax 0, r4
  370. tlbivax 0, r5
  371. tlbivax 0, r6
  372. tlbivax 0, r7
  373. msync
  374. #if defined(CONFIG_SMP)
  375. tlbsync
  376. #endif /* CONFIG_SMP */
  377. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  378. #if defined(CONFIG_SMP)
  379. rlwinm r8,r1,0,0,18
  380. lwz r8,TI_CPU(r8)
  381. oris r8,r8,11
  382. mfmsr r10
  383. SYNC
  384. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  385. rlwinm r0,r0,0,28,26 /* clear DR */
  386. mtmsr r0
  387. SYNC_601
  388. isync
  389. lis r9,mmu_hash_lock@h
  390. ori r9,r9,mmu_hash_lock@l
  391. tophys(r9,r9)
  392. 10: lwarx r7,0,r9
  393. cmpwi 0,r7,0
  394. bne- 10b
  395. stwcx. r8,0,r9
  396. bne- 10b
  397. eieio
  398. tlbie r3
  399. sync
  400. TLBSYNC
  401. li r0,0
  402. stw r0,0(r9) /* clear mmu_hash_lock */
  403. mtmsr r10
  404. SYNC_601
  405. isync
  406. #else /* CONFIG_SMP */
  407. tlbie r3
  408. sync
  409. #endif /* CONFIG_SMP */
  410. #endif /* ! CONFIG_40x */
  411. blr
  412. /*
  413. * Flush instruction cache.
  414. * This is a no-op on the 601.
  415. */
  416. _GLOBAL(flush_instruction_cache)
  417. #if defined(CONFIG_8xx)
  418. isync
  419. lis r5, IDC_INVALL@h
  420. mtspr SPRN_IC_CST, r5
  421. #elif defined(CONFIG_4xx)
  422. #ifdef CONFIG_403GCX
  423. li r3, 512
  424. mtctr r3
  425. lis r4, KERNELBASE@h
  426. 1: iccci 0, r4
  427. addi r4, r4, 16
  428. bdnz 1b
  429. #else
  430. lis r3, KERNELBASE@h
  431. iccci 0,r3
  432. #endif
  433. #elif CONFIG_FSL_BOOKE
  434. BEGIN_FTR_SECTION
  435. mfspr r3,SPRN_L1CSR0
  436. ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
  437. /* msync; isync recommended here */
  438. mtspr SPRN_L1CSR0,r3
  439. isync
  440. blr
  441. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  442. mfspr r3,SPRN_L1CSR1
  443. ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
  444. mtspr SPRN_L1CSR1,r3
  445. #else
  446. mfspr r3,SPRN_PVR
  447. rlwinm r3,r3,16,16,31
  448. cmpwi 0,r3,1
  449. beqlr /* for 601, do nothing */
  450. /* 603/604 processor - use invalidate-all bit in HID0 */
  451. mfspr r3,SPRN_HID0
  452. ori r3,r3,HID0_ICFI
  453. mtspr SPRN_HID0,r3
  454. #endif /* CONFIG_8xx/4xx */
  455. isync
  456. blr
  457. /*
  458. * Write any modified data cache blocks out to memory
  459. * and invalidate the corresponding instruction cache blocks.
  460. * This is a no-op on the 601.
  461. *
  462. * flush_icache_range(unsigned long start, unsigned long stop)
  463. */
  464. _GLOBAL(flush_icache_range)
  465. BEGIN_FTR_SECTION
  466. blr /* for 601, do nothing */
  467. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  468. li r5,L1_CACHE_BYTES-1
  469. andc r3,r3,r5
  470. subf r4,r3,r4
  471. add r4,r4,r5
  472. srwi. r4,r4,L1_CACHE_SHIFT
  473. beqlr
  474. mtctr r4
  475. mr r6,r3
  476. 1: dcbst 0,r3
  477. addi r3,r3,L1_CACHE_BYTES
  478. bdnz 1b
  479. sync /* wait for dcbst's to get to ram */
  480. mtctr r4
  481. 2: icbi 0,r6
  482. addi r6,r6,L1_CACHE_BYTES
  483. bdnz 2b
  484. sync /* additional sync needed on g4 */
  485. isync
  486. blr
  487. /*
  488. * Write any modified data cache blocks out to memory.
  489. * Does not invalidate the corresponding cache lines (especially for
  490. * any corresponding instruction cache).
  491. *
  492. * clean_dcache_range(unsigned long start, unsigned long stop)
  493. */
  494. _GLOBAL(clean_dcache_range)
  495. li r5,L1_CACHE_BYTES-1
  496. andc r3,r3,r5
  497. subf r4,r3,r4
  498. add r4,r4,r5
  499. srwi. r4,r4,L1_CACHE_SHIFT
  500. beqlr
  501. mtctr r4
  502. 1: dcbst 0,r3
  503. addi r3,r3,L1_CACHE_BYTES
  504. bdnz 1b
  505. sync /* wait for dcbst's to get to ram */
  506. blr
  507. /*
  508. * Write any modified data cache blocks out to memory and invalidate them.
  509. * Does not invalidate the corresponding instruction cache blocks.
  510. *
  511. * flush_dcache_range(unsigned long start, unsigned long stop)
  512. */
  513. _GLOBAL(flush_dcache_range)
  514. li r5,L1_CACHE_BYTES-1
  515. andc r3,r3,r5
  516. subf r4,r3,r4
  517. add r4,r4,r5
  518. srwi. r4,r4,L1_CACHE_SHIFT
  519. beqlr
  520. mtctr r4
  521. 1: dcbf 0,r3
  522. addi r3,r3,L1_CACHE_BYTES
  523. bdnz 1b
  524. sync /* wait for dcbst's to get to ram */
  525. blr
  526. /*
  527. * Like above, but invalidate the D-cache. This is used by the 8xx
  528. * to invalidate the cache so the PPC core doesn't get stale data
  529. * from the CPM (no cache snooping here :-).
  530. *
  531. * invalidate_dcache_range(unsigned long start, unsigned long stop)
  532. */
  533. _GLOBAL(invalidate_dcache_range)
  534. li r5,L1_CACHE_BYTES-1
  535. andc r3,r3,r5
  536. subf r4,r3,r4
  537. add r4,r4,r5
  538. srwi. r4,r4,L1_CACHE_SHIFT
  539. beqlr
  540. mtctr r4
  541. 1: dcbi 0,r3
  542. addi r3,r3,L1_CACHE_BYTES
  543. bdnz 1b
  544. sync /* wait for dcbi's to get to ram */
  545. blr
  546. #ifdef CONFIG_NOT_COHERENT_CACHE
  547. /*
  548. * 40x cores have 8K or 16K dcache and 32 byte line size.
  549. * 44x has a 32K dcache and 32 byte line size.
  550. * 8xx has 1, 2, 4, 8K variants.
  551. * For now, cover the worst case of the 44x.
  552. * Must be called with external interrupts disabled.
  553. */
  554. #define CACHE_NWAYS 64
  555. #define CACHE_NLINES 16
  556. _GLOBAL(flush_dcache_all)
  557. li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
  558. mtctr r4
  559. lis r5, KERNELBASE@h
  560. 1: lwz r3, 0(r5) /* Load one word from every line */
  561. addi r5, r5, L1_CACHE_BYTES
  562. bdnz 1b
  563. blr
  564. #endif /* CONFIG_NOT_COHERENT_CACHE */
  565. /*
  566. * Flush a particular page from the data cache to RAM.
  567. * Note: this is necessary because the instruction cache does *not*
  568. * snoop from the data cache.
  569. * This is a no-op on the 601 which has a unified cache.
  570. *
  571. * void __flush_dcache_icache(void *page)
  572. */
  573. _GLOBAL(__flush_dcache_icache)
  574. BEGIN_FTR_SECTION
  575. blr /* for 601, do nothing */
  576. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  577. rlwinm r3,r3,0,0,19 /* Get page base address */
  578. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  579. mtctr r4
  580. mr r6,r3
  581. 0: dcbst 0,r3 /* Write line to ram */
  582. addi r3,r3,L1_CACHE_BYTES
  583. bdnz 0b
  584. sync
  585. mtctr r4
  586. 1: icbi 0,r6
  587. addi r6,r6,L1_CACHE_BYTES
  588. bdnz 1b
  589. sync
  590. isync
  591. blr
  592. /*
  593. * Flush a particular page from the data cache to RAM, identified
  594. * by its physical address. We turn off the MMU so we can just use
  595. * the physical address (this may be a highmem page without a kernel
  596. * mapping).
  597. *
  598. * void __flush_dcache_icache_phys(unsigned long physaddr)
  599. */
  600. _GLOBAL(__flush_dcache_icache_phys)
  601. BEGIN_FTR_SECTION
  602. blr /* for 601, do nothing */
  603. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  604. mfmsr r10
  605. rlwinm r0,r10,0,28,26 /* clear DR */
  606. mtmsr r0
  607. isync
  608. rlwinm r3,r3,0,0,19 /* Get page base address */
  609. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  610. mtctr r4
  611. mr r6,r3
  612. 0: dcbst 0,r3 /* Write line to ram */
  613. addi r3,r3,L1_CACHE_BYTES
  614. bdnz 0b
  615. sync
  616. mtctr r4
  617. 1: icbi 0,r6
  618. addi r6,r6,L1_CACHE_BYTES
  619. bdnz 1b
  620. sync
  621. mtmsr r10 /* restore DR */
  622. isync
  623. blr
  624. /*
  625. * Clear pages using the dcbz instruction, which doesn't cause any
  626. * memory traffic (except to write out any cache lines which get
  627. * displaced). This only works on cacheable memory.
  628. *
  629. * void clear_pages(void *page, int order) ;
  630. */
  631. _GLOBAL(clear_pages)
  632. li r0,4096/L1_CACHE_BYTES
  633. slw r0,r0,r4
  634. mtctr r0
  635. #ifdef CONFIG_8xx
  636. li r4, 0
  637. 1: stw r4, 0(r3)
  638. stw r4, 4(r3)
  639. stw r4, 8(r3)
  640. stw r4, 12(r3)
  641. #else
  642. 1: dcbz 0,r3
  643. #endif
  644. addi r3,r3,L1_CACHE_BYTES
  645. bdnz 1b
  646. blr
  647. /*
  648. * Copy a whole page. We use the dcbz instruction on the destination
  649. * to reduce memory traffic (it eliminates the unnecessary reads of
  650. * the destination into cache). This requires that the destination
  651. * is cacheable.
  652. */
  653. #define COPY_16_BYTES \
  654. lwz r6,4(r4); \
  655. lwz r7,8(r4); \
  656. lwz r8,12(r4); \
  657. lwzu r9,16(r4); \
  658. stw r6,4(r3); \
  659. stw r7,8(r3); \
  660. stw r8,12(r3); \
  661. stwu r9,16(r3)
  662. _GLOBAL(copy_page)
  663. addi r3,r3,-4
  664. addi r4,r4,-4
  665. #ifdef CONFIG_8xx
  666. /* don't use prefetch on 8xx */
  667. li r0,4096/L1_CACHE_BYTES
  668. mtctr r0
  669. 1: COPY_16_BYTES
  670. bdnz 1b
  671. blr
  672. #else /* not 8xx, we can prefetch */
  673. li r5,4
  674. #if MAX_COPY_PREFETCH > 1
  675. li r0,MAX_COPY_PREFETCH
  676. li r11,4
  677. mtctr r0
  678. 11: dcbt r11,r4
  679. addi r11,r11,L1_CACHE_BYTES
  680. bdnz 11b
  681. #else /* MAX_COPY_PREFETCH == 1 */
  682. dcbt r5,r4
  683. li r11,L1_CACHE_BYTES+4
  684. #endif /* MAX_COPY_PREFETCH */
  685. li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
  686. crclr 4*cr0+eq
  687. 2:
  688. mtctr r0
  689. 1:
  690. dcbt r11,r4
  691. dcbz r5,r3
  692. COPY_16_BYTES
  693. #if L1_CACHE_BYTES >= 32
  694. COPY_16_BYTES
  695. #if L1_CACHE_BYTES >= 64
  696. COPY_16_BYTES
  697. COPY_16_BYTES
  698. #if L1_CACHE_BYTES >= 128
  699. COPY_16_BYTES
  700. COPY_16_BYTES
  701. COPY_16_BYTES
  702. COPY_16_BYTES
  703. #endif
  704. #endif
  705. #endif
  706. bdnz 1b
  707. beqlr
  708. crnot 4*cr0+eq,4*cr0+eq
  709. li r0,MAX_COPY_PREFETCH
  710. li r11,4
  711. b 2b
  712. #endif /* CONFIG_8xx */
  713. /*
  714. * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
  715. * void atomic_set_mask(atomic_t mask, atomic_t *addr);
  716. */
  717. _GLOBAL(atomic_clear_mask)
  718. 10: lwarx r5,0,r4
  719. andc r5,r5,r3
  720. PPC405_ERR77(0,r4)
  721. stwcx. r5,0,r4
  722. bne- 10b
  723. blr
  724. _GLOBAL(atomic_set_mask)
  725. 10: lwarx r5,0,r4
  726. or r5,r5,r3
  727. PPC405_ERR77(0,r4)
  728. stwcx. r5,0,r4
  729. bne- 10b
  730. blr
  731. /*
  732. * I/O string operations
  733. *
  734. * insb(port, buf, len)
  735. * outsb(port, buf, len)
  736. * insw(port, buf, len)
  737. * outsw(port, buf, len)
  738. * insl(port, buf, len)
  739. * outsl(port, buf, len)
  740. * insw_ns(port, buf, len)
  741. * outsw_ns(port, buf, len)
  742. * insl_ns(port, buf, len)
  743. * outsl_ns(port, buf, len)
  744. *
  745. * The *_ns versions don't do byte-swapping.
  746. */
  747. _GLOBAL(_insb)
  748. cmpwi 0,r5,0
  749. mtctr r5
  750. subi r4,r4,1
  751. blelr-
  752. 00: lbz r5,0(r3)
  753. eieio
  754. stbu r5,1(r4)
  755. bdnz 00b
  756. blr
  757. _GLOBAL(_outsb)
  758. cmpwi 0,r5,0
  759. mtctr r5
  760. subi r4,r4,1
  761. blelr-
  762. 00: lbzu r5,1(r4)
  763. stb r5,0(r3)
  764. eieio
  765. bdnz 00b
  766. blr
  767. _GLOBAL(_insw)
  768. cmpwi 0,r5,0
  769. mtctr r5
  770. subi r4,r4,2
  771. blelr-
  772. 00: lhbrx r5,0,r3
  773. eieio
  774. sthu r5,2(r4)
  775. bdnz 00b
  776. blr
  777. _GLOBAL(_outsw)
  778. cmpwi 0,r5,0
  779. mtctr r5
  780. subi r4,r4,2
  781. blelr-
  782. 00: lhzu r5,2(r4)
  783. eieio
  784. sthbrx r5,0,r3
  785. bdnz 00b
  786. blr
  787. _GLOBAL(_insl)
  788. cmpwi 0,r5,0
  789. mtctr r5
  790. subi r4,r4,4
  791. blelr-
  792. 00: lwbrx r5,0,r3
  793. eieio
  794. stwu r5,4(r4)
  795. bdnz 00b
  796. blr
  797. _GLOBAL(_outsl)
  798. cmpwi 0,r5,0
  799. mtctr r5
  800. subi r4,r4,4
  801. blelr-
  802. 00: lwzu r5,4(r4)
  803. stwbrx r5,0,r3
  804. eieio
  805. bdnz 00b
  806. blr
  807. _GLOBAL(__ide_mm_insw)
  808. _GLOBAL(_insw_ns)
  809. cmpwi 0,r5,0
  810. mtctr r5
  811. subi r4,r4,2
  812. blelr-
  813. 00: lhz r5,0(r3)
  814. eieio
  815. sthu r5,2(r4)
  816. bdnz 00b
  817. blr
  818. _GLOBAL(__ide_mm_outsw)
  819. _GLOBAL(_outsw_ns)
  820. cmpwi 0,r5,0
  821. mtctr r5
  822. subi r4,r4,2
  823. blelr-
  824. 00: lhzu r5,2(r4)
  825. sth r5,0(r3)
  826. eieio
  827. bdnz 00b
  828. blr
  829. _GLOBAL(__ide_mm_insl)
  830. _GLOBAL(_insl_ns)
  831. cmpwi 0,r5,0
  832. mtctr r5
  833. subi r4,r4,4
  834. blelr-
  835. 00: lwz r5,0(r3)
  836. eieio
  837. stwu r5,4(r4)
  838. bdnz 00b
  839. blr
  840. _GLOBAL(__ide_mm_outsl)
  841. _GLOBAL(_outsl_ns)
  842. cmpwi 0,r5,0
  843. mtctr r5
  844. subi r4,r4,4
  845. blelr-
  846. 00: lwzu r5,4(r4)
  847. stw r5,0(r3)
  848. eieio
  849. bdnz 00b
  850. blr
  851. /*
  852. * Extended precision shifts.
  853. *
  854. * Updated to be valid for shift counts from 0 to 63 inclusive.
  855. * -- Gabriel
  856. *
  857. * R3/R4 has 64 bit value
  858. * R5 has shift count
  859. * result in R3/R4
  860. *
  861. * ashrdi3: arithmetic right shift (sign propagation)
  862. * lshrdi3: logical right shift
  863. * ashldi3: left shift
  864. */
  865. _GLOBAL(__ashrdi3)
  866. subfic r6,r5,32
  867. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  868. addi r7,r5,32 # could be xori, or addi with -32
  869. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  870. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  871. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  872. or r4,r4,r6 # LSW |= t1
  873. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  874. sraw r3,r3,r5 # MSW = MSW >> count
  875. or r4,r4,r7 # LSW |= t2
  876. blr
  877. _GLOBAL(__ashldi3)
  878. subfic r6,r5,32
  879. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  880. addi r7,r5,32 # could be xori, or addi with -32
  881. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  882. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  883. or r3,r3,r6 # MSW |= t1
  884. slw r4,r4,r5 # LSW = LSW << count
  885. or r3,r3,r7 # MSW |= t2
  886. blr
  887. _GLOBAL(__lshrdi3)
  888. subfic r6,r5,32
  889. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  890. addi r7,r5,32 # could be xori, or addi with -32
  891. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  892. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  893. or r4,r4,r6 # LSW |= t1
  894. srw r3,r3,r5 # MSW = MSW >> count
  895. or r4,r4,r7 # LSW |= t2
  896. blr
  897. _GLOBAL(abs)
  898. srawi r4,r3,31
  899. xor r3,r3,r4
  900. sub r3,r3,r4
  901. blr
  902. _GLOBAL(_get_SP)
  903. mr r3,r1 /* Close enough */
  904. blr
  905. /*
  906. * These are used in the alignment trap handler when emulating
  907. * single-precision loads and stores.
  908. * We restore and save the fpscr so the task gets the same result
  909. * and exceptions as if the cpu had performed the load or store.
  910. */
  911. #ifdef CONFIG_PPC_FPU
  912. _GLOBAL(cvt_fd)
  913. lfd 0,-4(r5) /* load up fpscr value */
  914. mtfsf 0xff,0
  915. lfs 0,0(r3)
  916. stfd 0,0(r4)
  917. mffs 0 /* save new fpscr value */
  918. stfd 0,-4(r5)
  919. blr
  920. _GLOBAL(cvt_df)
  921. lfd 0,-4(r5) /* load up fpscr value */
  922. mtfsf 0xff,0
  923. lfd 0,0(r3)
  924. stfs 0,0(r4)
  925. mffs 0 /* save new fpscr value */
  926. stfd 0,-4(r5)
  927. blr
  928. #endif
  929. /*
  930. * Create a kernel thread
  931. * kernel_thread(fn, arg, flags)
  932. */
  933. _GLOBAL(kernel_thread)
  934. stwu r1,-16(r1)
  935. stw r30,8(r1)
  936. stw r31,12(r1)
  937. mr r30,r3 /* function */
  938. mr r31,r4 /* argument */
  939. ori r3,r5,CLONE_VM /* flags */
  940. oris r3,r3,CLONE_UNTRACED>>16
  941. li r4,0 /* new sp (unused) */
  942. li r0,__NR_clone
  943. sc
  944. cmpwi 0,r3,0 /* parent or child? */
  945. bne 1f /* return if parent */
  946. li r0,0 /* make top-level stack frame */
  947. stwu r0,-16(r1)
  948. mtlr r30 /* fn addr in lr */
  949. mr r3,r31 /* load arg and call fn */
  950. PPC440EP_ERR42
  951. blrl
  952. li r0,__NR_exit /* exit if function returns */
  953. li r3,0
  954. sc
  955. 1: lwz r30,8(r1)
  956. lwz r31,12(r1)
  957. addi r1,r1,16
  958. blr
  959. /*
  960. * This routine is just here to keep GCC happy - sigh...
  961. */
  962. _GLOBAL(__main)
  963. blr
  964. #define SYSCALL(name) \
  965. _GLOBAL(name) \
  966. li r0,__NR_##name; \
  967. sc; \
  968. bnslr; \
  969. lis r4,errno@ha; \
  970. stw r3,errno@l(r4); \
  971. li r3,-1; \
  972. blr
  973. SYSCALL(execve)
  974. /* Why isn't this a) automatic, b) written in 'C'? */
  975. .data
  976. .align 4
  977. _GLOBAL(sys_call_table)
  978. .long sys_restart_syscall /* 0 */
  979. .long sys_exit
  980. .long ppc_fork
  981. .long sys_read
  982. .long sys_write
  983. .long sys_open /* 5 */
  984. .long sys_close
  985. .long sys_waitpid
  986. .long sys_creat
  987. .long sys_link
  988. .long sys_unlink /* 10 */
  989. .long sys_execve
  990. .long sys_chdir
  991. .long sys_time
  992. .long sys_mknod
  993. .long sys_chmod /* 15 */
  994. .long sys_lchown
  995. .long sys_ni_syscall /* old break syscall holder */
  996. .long sys_stat
  997. .long sys_lseek
  998. .long sys_getpid /* 20 */
  999. .long sys_mount
  1000. .long sys_oldumount
  1001. .long sys_setuid
  1002. .long sys_getuid
  1003. .long sys_stime /* 25 */
  1004. .long sys_ptrace
  1005. .long sys_alarm
  1006. .long sys_fstat
  1007. .long sys_pause
  1008. .long sys_utime /* 30 */
  1009. .long sys_ni_syscall /* old stty syscall holder */
  1010. .long sys_ni_syscall /* old gtty syscall holder */
  1011. .long sys_access
  1012. .long sys_nice
  1013. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1014. .long sys_sync
  1015. .long sys_kill
  1016. .long sys_rename
  1017. .long sys_mkdir
  1018. .long sys_rmdir /* 40 */
  1019. .long sys_dup
  1020. .long sys_pipe
  1021. .long sys_times
  1022. .long sys_ni_syscall /* old prof syscall holder */
  1023. .long sys_brk /* 45 */
  1024. .long sys_setgid
  1025. .long sys_getgid
  1026. .long sys_signal
  1027. .long sys_geteuid
  1028. .long sys_getegid /* 50 */
  1029. .long sys_acct
  1030. .long sys_umount /* recycled never used phys() */
  1031. .long sys_ni_syscall /* old lock syscall holder */
  1032. .long sys_ioctl
  1033. .long sys_fcntl /* 55 */
  1034. .long sys_ni_syscall /* old mpx syscall holder */
  1035. .long sys_setpgid
  1036. .long sys_ni_syscall /* old ulimit syscall holder */
  1037. .long sys_olduname
  1038. .long sys_umask /* 60 */
  1039. .long sys_chroot
  1040. .long sys_ustat
  1041. .long sys_dup2
  1042. .long sys_getppid
  1043. .long sys_getpgrp /* 65 */
  1044. .long sys_setsid
  1045. .long sys_sigaction
  1046. .long sys_sgetmask
  1047. .long sys_ssetmask
  1048. .long sys_setreuid /* 70 */
  1049. .long sys_setregid
  1050. .long ppc_sigsuspend
  1051. .long sys_sigpending
  1052. .long sys_sethostname
  1053. .long sys_setrlimit /* 75 */
  1054. .long sys_old_getrlimit
  1055. .long sys_getrusage
  1056. .long sys_gettimeofday
  1057. .long sys_settimeofday
  1058. .long sys_getgroups /* 80 */
  1059. .long sys_setgroups
  1060. .long ppc_select
  1061. .long sys_symlink
  1062. .long sys_lstat
  1063. .long sys_readlink /* 85 */
  1064. .long sys_uselib
  1065. .long sys_swapon
  1066. .long sys_reboot
  1067. .long old_readdir
  1068. .long sys_mmap /* 90 */
  1069. .long sys_munmap
  1070. .long sys_truncate
  1071. .long sys_ftruncate
  1072. .long sys_fchmod
  1073. .long sys_fchown /* 95 */
  1074. .long sys_getpriority
  1075. .long sys_setpriority
  1076. .long sys_ni_syscall /* old profil syscall holder */
  1077. .long sys_statfs
  1078. .long sys_fstatfs /* 100 */
  1079. .long sys_ni_syscall
  1080. .long sys_socketcall
  1081. .long sys_syslog
  1082. .long sys_setitimer
  1083. .long sys_getitimer /* 105 */
  1084. .long sys_newstat
  1085. .long sys_newlstat
  1086. .long sys_newfstat
  1087. .long sys_uname
  1088. .long sys_ni_syscall /* 110 */
  1089. .long sys_vhangup
  1090. .long sys_ni_syscall /* old 'idle' syscall */
  1091. .long sys_ni_syscall
  1092. .long sys_wait4
  1093. .long sys_swapoff /* 115 */
  1094. .long sys_sysinfo
  1095. .long sys_ipc
  1096. .long sys_fsync
  1097. .long sys_sigreturn
  1098. .long ppc_clone /* 120 */
  1099. .long sys_setdomainname
  1100. .long sys_newuname
  1101. .long sys_ni_syscall
  1102. .long sys_adjtimex
  1103. .long sys_mprotect /* 125 */
  1104. .long sys_sigprocmask
  1105. .long sys_ni_syscall /* old sys_create_module */
  1106. .long sys_init_module
  1107. .long sys_delete_module
  1108. .long sys_ni_syscall /* old sys_get_kernel_syms */ /* 130 */
  1109. .long sys_quotactl
  1110. .long sys_getpgid
  1111. .long sys_fchdir
  1112. .long sys_bdflush
  1113. .long sys_sysfs /* 135 */
  1114. .long sys_personality
  1115. .long sys_ni_syscall /* for afs_syscall */
  1116. .long sys_setfsuid
  1117. .long sys_setfsgid
  1118. .long sys_llseek /* 140 */
  1119. .long sys_getdents
  1120. .long ppc_select
  1121. .long sys_flock
  1122. .long sys_msync
  1123. .long sys_readv /* 145 */
  1124. .long sys_writev
  1125. .long sys_getsid
  1126. .long sys_fdatasync
  1127. .long sys_sysctl
  1128. .long sys_mlock /* 150 */
  1129. .long sys_munlock
  1130. .long sys_mlockall
  1131. .long sys_munlockall
  1132. .long sys_sched_setparam
  1133. .long sys_sched_getparam /* 155 */
  1134. .long sys_sched_setscheduler
  1135. .long sys_sched_getscheduler
  1136. .long sys_sched_yield
  1137. .long sys_sched_get_priority_max
  1138. .long sys_sched_get_priority_min /* 160 */
  1139. .long sys_sched_rr_get_interval
  1140. .long sys_nanosleep
  1141. .long sys_mremap
  1142. .long sys_setresuid
  1143. .long sys_getresuid /* 165 */
  1144. .long sys_ni_syscall /* old sys_query_module */
  1145. .long sys_poll
  1146. .long sys_nfsservctl
  1147. .long sys_setresgid
  1148. .long sys_getresgid /* 170 */
  1149. .long sys_prctl
  1150. .long sys_rt_sigreturn
  1151. .long sys_rt_sigaction
  1152. .long sys_rt_sigprocmask
  1153. .long sys_rt_sigpending /* 175 */
  1154. .long sys_rt_sigtimedwait
  1155. .long sys_rt_sigqueueinfo
  1156. .long ppc_rt_sigsuspend
  1157. .long sys_pread64
  1158. .long sys_pwrite64 /* 180 */
  1159. .long sys_chown
  1160. .long sys_getcwd
  1161. .long sys_capget
  1162. .long sys_capset
  1163. .long sys_sigaltstack /* 185 */
  1164. .long sys_sendfile
  1165. .long sys_ni_syscall /* streams1 */
  1166. .long sys_ni_syscall /* streams2 */
  1167. .long ppc_vfork
  1168. .long sys_getrlimit /* 190 */
  1169. .long sys_readahead
  1170. .long sys_mmap2
  1171. .long sys_truncate64
  1172. .long sys_ftruncate64
  1173. .long sys_stat64 /* 195 */
  1174. .long sys_lstat64
  1175. .long sys_fstat64
  1176. .long sys_pciconfig_read
  1177. .long sys_pciconfig_write
  1178. .long sys_pciconfig_iobase /* 200 */
  1179. .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
  1180. .long sys_getdents64
  1181. .long sys_pivot_root
  1182. .long sys_fcntl64
  1183. .long sys_madvise /* 205 */
  1184. .long sys_mincore
  1185. .long sys_gettid
  1186. .long sys_tkill
  1187. .long sys_setxattr
  1188. .long sys_lsetxattr /* 210 */
  1189. .long sys_fsetxattr
  1190. .long sys_getxattr
  1191. .long sys_lgetxattr
  1192. .long sys_fgetxattr
  1193. .long sys_listxattr /* 215 */
  1194. .long sys_llistxattr
  1195. .long sys_flistxattr
  1196. .long sys_removexattr
  1197. .long sys_lremovexattr
  1198. .long sys_fremovexattr /* 220 */
  1199. .long sys_futex
  1200. .long sys_sched_setaffinity
  1201. .long sys_sched_getaffinity
  1202. .long sys_ni_syscall
  1203. .long sys_ni_syscall /* 225 - reserved for Tux */
  1204. .long sys_sendfile64
  1205. .long sys_io_setup
  1206. .long sys_io_destroy
  1207. .long sys_io_getevents
  1208. .long sys_io_submit /* 230 */
  1209. .long sys_io_cancel
  1210. .long sys_set_tid_address
  1211. .long sys_fadvise64
  1212. .long sys_exit_group
  1213. .long sys_lookup_dcookie /* 235 */
  1214. .long sys_epoll_create
  1215. .long sys_epoll_ctl
  1216. .long sys_epoll_wait
  1217. .long sys_remap_file_pages
  1218. .long sys_timer_create /* 240 */
  1219. .long sys_timer_settime
  1220. .long sys_timer_gettime
  1221. .long sys_timer_getoverrun
  1222. .long sys_timer_delete
  1223. .long sys_clock_settime /* 245 */
  1224. .long sys_clock_gettime
  1225. .long sys_clock_getres
  1226. .long sys_clock_nanosleep
  1227. .long ppc_swapcontext
  1228. .long sys_tgkill /* 250 */
  1229. .long sys_utimes
  1230. .long sys_statfs64
  1231. .long sys_fstatfs64
  1232. .long ppc_fadvise64_64
  1233. .long sys_ni_syscall /* 255 - rtas (used on ppc64) */
  1234. .long sys_debug_setcontext
  1235. .long sys_ni_syscall /* 257 reserved for vserver */
  1236. .long sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
  1237. .long sys_ni_syscall /* 259 reserved for new sys_mbind */
  1238. .long sys_ni_syscall /* 260 reserved for new sys_get_mempolicy */
  1239. .long sys_ni_syscall /* 261 reserved for new sys_set_mempolicy */
  1240. .long sys_mq_open
  1241. .long sys_mq_unlink
  1242. .long sys_mq_timedsend
  1243. .long sys_mq_timedreceive /* 265 */
  1244. .long sys_mq_notify
  1245. .long sys_mq_getsetattr
  1246. .long sys_kexec_load
  1247. .long sys_add_key
  1248. .long sys_request_key /* 270 */
  1249. .long sys_keyctl
  1250. .long sys_waitid
  1251. .long sys_ioprio_set
  1252. .long sys_ioprio_get
  1253. .long sys_inotify_init /* 275 */
  1254. .long sys_inotify_add_watch
  1255. .long sys_inotify_rm_watch