misc.S 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. /*
  2. * This file contains miscellaneous low-level functions.
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6. * and Paul Mackerras.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. #include <linux/config.h>
  15. #include <linux/sys.h>
  16. #include <asm/unistd.h>
  17. #include <asm/errno.h>
  18. #include <asm/processor.h>
  19. #include <asm/page.h>
  20. #include <asm/cache.h>
  21. #include <asm/cputable.h>
  22. #include <asm/mmu.h>
  23. #include <asm/ppc_asm.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/asm-offsets.h>
  26. .text
  27. .align 5
  28. _GLOBAL(__delay)
  29. cmpwi 0,r3,0
  30. mtctr r3
  31. beqlr
  32. 1: bdnz 1b
  33. blr
  34. /*
  35. * Returns (address we're running at) - (address we were linked at)
  36. * for use before the text and data are mapped to KERNELBASE.
  37. */
  38. _GLOBAL(reloc_offset)
  39. mflr r0
  40. bl 1f
  41. 1: mflr r3
  42. lis r4,1b@ha
  43. addi r4,r4,1b@l
  44. subf r3,r4,r3
  45. mtlr r0
  46. blr
  47. /*
  48. * add_reloc_offset(x) returns x + reloc_offset().
  49. */
  50. _GLOBAL(add_reloc_offset)
  51. mflr r0
  52. bl 1f
  53. 1: mflr r5
  54. lis r4,1b@ha
  55. addi r4,r4,1b@l
  56. subf r5,r4,r5
  57. add r3,r3,r5
  58. mtlr r0
  59. blr
  60. /*
  61. * sub_reloc_offset(x) returns x - reloc_offset().
  62. */
  63. _GLOBAL(sub_reloc_offset)
  64. mflr r0
  65. bl 1f
  66. 1: mflr r5
  67. lis r4,1b@ha
  68. addi r4,r4,1b@l
  69. subf r5,r4,r5
  70. subf r3,r5,r3
  71. mtlr r0
  72. blr
  73. /*
  74. * reloc_got2 runs through the .got2 section adding an offset
  75. * to each entry.
  76. */
  77. _GLOBAL(reloc_got2)
  78. mflr r11
  79. lis r7,__got2_start@ha
  80. addi r7,r7,__got2_start@l
  81. lis r8,__got2_end@ha
  82. addi r8,r8,__got2_end@l
  83. subf r8,r7,r8
  84. srwi. r8,r8,2
  85. beqlr
  86. mtctr r8
  87. bl 1f
  88. 1: mflr r0
  89. lis r4,1b@ha
  90. addi r4,r4,1b@l
  91. subf r0,r4,r0
  92. add r7,r0,r7
  93. 2: lwz r0,0(r7)
  94. add r0,r0,r3
  95. stw r0,0(r7)
  96. addi r7,r7,4
  97. bdnz 2b
  98. mtlr r11
  99. blr
  100. /*
  101. * identify_cpu,
  102. * called with r3 = data offset and r4 = CPU number
  103. * doesn't change r3
  104. */
  105. _GLOBAL(identify_cpu)
  106. addis r8,r3,cpu_specs@ha
  107. addi r8,r8,cpu_specs@l
  108. mfpvr r7
  109. 1:
  110. lwz r5,CPU_SPEC_PVR_MASK(r8)
  111. and r5,r5,r7
  112. lwz r6,CPU_SPEC_PVR_VALUE(r8)
  113. cmplw 0,r6,r5
  114. beq 1f
  115. addi r8,r8,CPU_SPEC_ENTRY_SIZE
  116. b 1b
  117. 1:
  118. addis r6,r3,cur_cpu_spec@ha
  119. addi r6,r6,cur_cpu_spec@l
  120. slwi r4,r4,2
  121. sub r8,r8,r3
  122. stwx r8,r4,r6
  123. blr
  124. /*
  125. * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
  126. * and writes nop's over sections of code that don't apply for this cpu.
  127. * r3 = data offset (not changed)
  128. */
  129. _GLOBAL(do_cpu_ftr_fixups)
  130. /* Get CPU 0 features */
  131. addis r6,r3,cur_cpu_spec@ha
  132. addi r6,r6,cur_cpu_spec@l
  133. lwz r4,0(r6)
  134. add r4,r4,r3
  135. lwz r4,CPU_SPEC_FEATURES(r4)
  136. /* Get the fixup table */
  137. addis r6,r3,__start___ftr_fixup@ha
  138. addi r6,r6,__start___ftr_fixup@l
  139. addis r7,r3,__stop___ftr_fixup@ha
  140. addi r7,r7,__stop___ftr_fixup@l
  141. /* Do the fixup */
  142. 1: cmplw 0,r6,r7
  143. bgelr
  144. addi r6,r6,16
  145. lwz r8,-16(r6) /* mask */
  146. and r8,r8,r4
  147. lwz r9,-12(r6) /* value */
  148. cmplw 0,r8,r9
  149. beq 1b
  150. lwz r8,-8(r6) /* section begin */
  151. lwz r9,-4(r6) /* section end */
  152. subf. r9,r8,r9
  153. beq 1b
  154. /* write nops over the section of code */
  155. /* todo: if large section, add a branch at the start of it */
  156. srwi r9,r9,2
  157. mtctr r9
  158. add r8,r8,r3
  159. lis r0,0x60000000@h /* nop */
  160. 3: stw r0,0(r8)
  161. andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
  162. beq 2f
  163. dcbst 0,r8 /* suboptimal, but simpler */
  164. sync
  165. icbi 0,r8
  166. 2: addi r8,r8,4
  167. bdnz 3b
  168. sync /* additional sync needed on g4 */
  169. isync
  170. b 1b
  171. /*
  172. * call_setup_cpu - call the setup_cpu function for this cpu
  173. * r3 = data offset, r24 = cpu number
  174. *
  175. * Setup function is called with:
  176. * r3 = data offset
  177. * r4 = CPU number
  178. * r5 = ptr to CPU spec (relocated)
  179. */
  180. _GLOBAL(call_setup_cpu)
  181. addis r5,r3,cur_cpu_spec@ha
  182. addi r5,r5,cur_cpu_spec@l
  183. slwi r4,r24,2
  184. lwzx r5,r4,r5
  185. add r5,r5,r3
  186. lwz r6,CPU_SPEC_SETUP(r5)
  187. add r6,r6,r3
  188. mtctr r6
  189. mr r4,r24
  190. bctr
  191. #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
  192. /* This gets called by via-pmu.c to switch the PLL selection
  193. * on 750fx CPU. This function should really be moved to some
  194. * other place (as most of the cpufreq code in via-pmu
  195. */
  196. _GLOBAL(low_choose_750fx_pll)
  197. /* Clear MSR:EE */
  198. mfmsr r7
  199. rlwinm r0,r7,0,17,15
  200. mtmsr r0
  201. /* If switching to PLL1, disable HID0:BTIC */
  202. cmplwi cr0,r3,0
  203. beq 1f
  204. mfspr r5,SPRN_HID0
  205. rlwinm r5,r5,0,27,25
  206. sync
  207. mtspr SPRN_HID0,r5
  208. isync
  209. sync
  210. 1:
  211. /* Calc new HID1 value */
  212. mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
  213. rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
  214. rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
  215. or r4,r4,r5
  216. mtspr SPRN_HID1,r4
  217. /* Store new HID1 image */
  218. rlwinm r6,r1,0,0,18
  219. lwz r6,TI_CPU(r6)
  220. slwi r6,r6,2
  221. addis r6,r6,nap_save_hid1@ha
  222. stw r4,nap_save_hid1@l(r6)
  223. /* If switching to PLL0, enable HID0:BTIC */
  224. cmplwi cr0,r3,0
  225. bne 1f
  226. mfspr r5,SPRN_HID0
  227. ori r5,r5,HID0_BTIC
  228. sync
  229. mtspr SPRN_HID0,r5
  230. isync
  231. sync
  232. 1:
  233. /* Return */
  234. mtmsr r7
  235. blr
  236. _GLOBAL(low_choose_7447a_dfs)
  237. /* Clear MSR:EE */
  238. mfmsr r7
  239. rlwinm r0,r7,0,17,15
  240. mtmsr r0
  241. /* Calc new HID1 value */
  242. mfspr r4,SPRN_HID1
  243. insrwi r4,r3,1,9 /* insert parameter into bit 9 */
  244. sync
  245. mtspr SPRN_HID1,r4
  246. sync
  247. isync
  248. /* Return */
  249. mtmsr r7
  250. blr
  251. #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
  252. /*
  253. * complement mask on the msr then "or" some values on.
  254. * _nmask_and_or_msr(nmask, value_to_or)
  255. */
  256. _GLOBAL(_nmask_and_or_msr)
  257. mfmsr r0 /* Get current msr */
  258. andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
  259. or r0,r0,r4 /* Or on the bits in r4 (second parm) */
  260. SYNC /* Some chip revs have problems here... */
  261. mtmsr r0 /* Update machine state */
  262. isync
  263. blr /* Done */
  264. /*
  265. * Flush MMU TLB
  266. */
  267. _GLOBAL(_tlbia)
  268. #if defined(CONFIG_40x)
  269. sync /* Flush to memory before changing mapping */
  270. tlbia
  271. isync /* Flush shadow TLB */
  272. #elif defined(CONFIG_44x)
  273. li r3,0
  274. sync
  275. /* Load high watermark */
  276. lis r4,tlb_44x_hwater@ha
  277. lwz r5,tlb_44x_hwater@l(r4)
  278. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  279. addi r3,r3,1
  280. cmpw 0,r3,r5
  281. ble 1b
  282. isync
  283. #elif defined(CONFIG_FSL_BOOKE)
  284. /* Invalidate all entries in TLB0 */
  285. li r3, 0x04
  286. tlbivax 0,3
  287. /* Invalidate all entries in TLB1 */
  288. li r3, 0x0c
  289. tlbivax 0,3
  290. /* Invalidate all entries in TLB2 */
  291. li r3, 0x14
  292. tlbivax 0,3
  293. /* Invalidate all entries in TLB3 */
  294. li r3, 0x1c
  295. tlbivax 0,3
  296. msync
  297. #ifdef CONFIG_SMP
  298. tlbsync
  299. #endif /* CONFIG_SMP */
  300. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  301. #if defined(CONFIG_SMP)
  302. rlwinm r8,r1,0,0,18
  303. lwz r8,TI_CPU(r8)
  304. oris r8,r8,10
  305. mfmsr r10
  306. SYNC
  307. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  308. rlwinm r0,r0,0,28,26 /* clear DR */
  309. mtmsr r0
  310. SYNC_601
  311. isync
  312. lis r9,mmu_hash_lock@h
  313. ori r9,r9,mmu_hash_lock@l
  314. tophys(r9,r9)
  315. 10: lwarx r7,0,r9
  316. cmpwi 0,r7,0
  317. bne- 10b
  318. stwcx. r8,0,r9
  319. bne- 10b
  320. sync
  321. tlbia
  322. sync
  323. TLBSYNC
  324. li r0,0
  325. stw r0,0(r9) /* clear mmu_hash_lock */
  326. mtmsr r10
  327. SYNC_601
  328. isync
  329. #else /* CONFIG_SMP */
  330. sync
  331. tlbia
  332. sync
  333. #endif /* CONFIG_SMP */
  334. #endif /* ! defined(CONFIG_40x) */
  335. blr
  336. /*
  337. * Flush MMU TLB for a particular address
  338. */
  339. _GLOBAL(_tlbie)
  340. #if defined(CONFIG_40x)
  341. tlbsx. r3, 0, r3
  342. bne 10f
  343. sync
  344. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
  345. * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
  346. * the TLB entry. */
  347. tlbwe r3, r3, TLB_TAG
  348. isync
  349. 10:
  350. #elif defined(CONFIG_44x)
  351. mfspr r4,SPRN_MMUCR
  352. mfspr r5,SPRN_PID /* Get PID */
  353. rlwimi r4,r5,0,24,31 /* Set TID */
  354. mtspr SPRN_MMUCR,r4
  355. tlbsx. r3, 0, r3
  356. bne 10f
  357. sync
  358. /* There are only 64 TLB entries, so r3 < 64,
  359. * which means bit 22, is clear. Since 22 is
  360. * the V bit in the TLB_PAGEID, loading this
  361. * value will invalidate the TLB entry.
  362. */
  363. tlbwe r3, r3, PPC44x_TLB_PAGEID
  364. isync
  365. 10:
  366. #elif defined(CONFIG_FSL_BOOKE)
  367. rlwinm r4, r3, 0, 0, 19
  368. ori r5, r4, 0x08 /* TLBSEL = 1 */
  369. ori r6, r4, 0x10 /* TLBSEL = 2 */
  370. ori r7, r4, 0x18 /* TLBSEL = 3 */
  371. tlbivax 0, r4
  372. tlbivax 0, r5
  373. tlbivax 0, r6
  374. tlbivax 0, r7
  375. msync
  376. #if defined(CONFIG_SMP)
  377. tlbsync
  378. #endif /* CONFIG_SMP */
  379. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  380. #if defined(CONFIG_SMP)
  381. rlwinm r8,r1,0,0,18
  382. lwz r8,TI_CPU(r8)
  383. oris r8,r8,11
  384. mfmsr r10
  385. SYNC
  386. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  387. rlwinm r0,r0,0,28,26 /* clear DR */
  388. mtmsr r0
  389. SYNC_601
  390. isync
  391. lis r9,mmu_hash_lock@h
  392. ori r9,r9,mmu_hash_lock@l
  393. tophys(r9,r9)
  394. 10: lwarx r7,0,r9
  395. cmpwi 0,r7,0
  396. bne- 10b
  397. stwcx. r8,0,r9
  398. bne- 10b
  399. eieio
  400. tlbie r3
  401. sync
  402. TLBSYNC
  403. li r0,0
  404. stw r0,0(r9) /* clear mmu_hash_lock */
  405. mtmsr r10
  406. SYNC_601
  407. isync
  408. #else /* CONFIG_SMP */
  409. tlbie r3
  410. sync
  411. #endif /* CONFIG_SMP */
  412. #endif /* ! CONFIG_40x */
  413. blr
  414. /*
  415. * Flush instruction cache.
  416. * This is a no-op on the 601.
  417. */
  418. _GLOBAL(flush_instruction_cache)
  419. #if defined(CONFIG_8xx)
  420. isync
  421. lis r5, IDC_INVALL@h
  422. mtspr SPRN_IC_CST, r5
  423. #elif defined(CONFIG_4xx)
  424. #ifdef CONFIG_403GCX
  425. li r3, 512
  426. mtctr r3
  427. lis r4, KERNELBASE@h
  428. 1: iccci 0, r4
  429. addi r4, r4, 16
  430. bdnz 1b
  431. #else
  432. lis r3, KERNELBASE@h
  433. iccci 0,r3
  434. #endif
  435. #elif CONFIG_FSL_BOOKE
  436. BEGIN_FTR_SECTION
  437. mfspr r3,SPRN_L1CSR0
  438. ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
  439. /* msync; isync recommended here */
  440. mtspr SPRN_L1CSR0,r3
  441. isync
  442. blr
  443. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  444. mfspr r3,SPRN_L1CSR1
  445. ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
  446. mtspr SPRN_L1CSR1,r3
  447. #else
  448. mfspr r3,SPRN_PVR
  449. rlwinm r3,r3,16,16,31
  450. cmpwi 0,r3,1
  451. beqlr /* for 601, do nothing */
  452. /* 603/604 processor - use invalidate-all bit in HID0 */
  453. mfspr r3,SPRN_HID0
  454. ori r3,r3,HID0_ICFI
  455. mtspr SPRN_HID0,r3
  456. #endif /* CONFIG_8xx/4xx */
  457. isync
  458. blr
  459. /*
  460. * Write any modified data cache blocks out to memory
  461. * and invalidate the corresponding instruction cache blocks.
  462. * This is a no-op on the 601.
  463. *
  464. * flush_icache_range(unsigned long start, unsigned long stop)
  465. */
  466. _GLOBAL(flush_icache_range)
  467. BEGIN_FTR_SECTION
  468. blr /* for 601, do nothing */
  469. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  470. li r5,L1_CACHE_LINE_SIZE-1
  471. andc r3,r3,r5
  472. subf r4,r3,r4
  473. add r4,r4,r5
  474. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  475. beqlr
  476. mtctr r4
  477. mr r6,r3
  478. 1: dcbst 0,r3
  479. addi r3,r3,L1_CACHE_LINE_SIZE
  480. bdnz 1b
  481. sync /* wait for dcbst's to get to ram */
  482. mtctr r4
  483. 2: icbi 0,r6
  484. addi r6,r6,L1_CACHE_LINE_SIZE
  485. bdnz 2b
  486. sync /* additional sync needed on g4 */
  487. isync
  488. blr
  489. /*
  490. * Write any modified data cache blocks out to memory.
  491. * Does not invalidate the corresponding cache lines (especially for
  492. * any corresponding instruction cache).
  493. *
  494. * clean_dcache_range(unsigned long start, unsigned long stop)
  495. */
  496. _GLOBAL(clean_dcache_range)
  497. li r5,L1_CACHE_LINE_SIZE-1
  498. andc r3,r3,r5
  499. subf r4,r3,r4
  500. add r4,r4,r5
  501. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  502. beqlr
  503. mtctr r4
  504. 1: dcbst 0,r3
  505. addi r3,r3,L1_CACHE_LINE_SIZE
  506. bdnz 1b
  507. sync /* wait for dcbst's to get to ram */
  508. blr
  509. /*
  510. * Write any modified data cache blocks out to memory and invalidate them.
  511. * Does not invalidate the corresponding instruction cache blocks.
  512. *
  513. * flush_dcache_range(unsigned long start, unsigned long stop)
  514. */
  515. _GLOBAL(flush_dcache_range)
  516. li r5,L1_CACHE_LINE_SIZE-1
  517. andc r3,r3,r5
  518. subf r4,r3,r4
  519. add r4,r4,r5
  520. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  521. beqlr
  522. mtctr r4
  523. 1: dcbf 0,r3
  524. addi r3,r3,L1_CACHE_LINE_SIZE
  525. bdnz 1b
  526. sync /* wait for dcbst's to get to ram */
  527. blr
  528. /*
  529. * Like above, but invalidate the D-cache. This is used by the 8xx
  530. * to invalidate the cache so the PPC core doesn't get stale data
  531. * from the CPM (no cache snooping here :-).
  532. *
  533. * invalidate_dcache_range(unsigned long start, unsigned long stop)
  534. */
  535. _GLOBAL(invalidate_dcache_range)
  536. li r5,L1_CACHE_LINE_SIZE-1
  537. andc r3,r3,r5
  538. subf r4,r3,r4
  539. add r4,r4,r5
  540. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  541. beqlr
  542. mtctr r4
  543. 1: dcbi 0,r3
  544. addi r3,r3,L1_CACHE_LINE_SIZE
  545. bdnz 1b
  546. sync /* wait for dcbi's to get to ram */
  547. blr
  548. #ifdef CONFIG_NOT_COHERENT_CACHE
  549. /*
  550. * 40x cores have 8K or 16K dcache and 32 byte line size.
  551. * 44x has a 32K dcache and 32 byte line size.
  552. * 8xx has 1, 2, 4, 8K variants.
  553. * For now, cover the worst case of the 44x.
  554. * Must be called with external interrupts disabled.
  555. */
  556. #define CACHE_NWAYS 64
  557. #define CACHE_NLINES 16
  558. _GLOBAL(flush_dcache_all)
  559. li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
  560. mtctr r4
  561. lis r5, KERNELBASE@h
  562. 1: lwz r3, 0(r5) /* Load one word from every line */
  563. addi r5, r5, L1_CACHE_LINE_SIZE
  564. bdnz 1b
  565. blr
  566. #endif /* CONFIG_NOT_COHERENT_CACHE */
  567. /*
  568. * Flush a particular page from the data cache to RAM.
  569. * Note: this is necessary because the instruction cache does *not*
  570. * snoop from the data cache.
  571. * This is a no-op on the 601 which has a unified cache.
  572. *
  573. * void __flush_dcache_icache(void *page)
  574. */
  575. _GLOBAL(__flush_dcache_icache)
  576. BEGIN_FTR_SECTION
  577. blr /* for 601, do nothing */
  578. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  579. rlwinm r3,r3,0,0,19 /* Get page base address */
  580. li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
  581. mtctr r4
  582. mr r6,r3
  583. 0: dcbst 0,r3 /* Write line to ram */
  584. addi r3,r3,L1_CACHE_LINE_SIZE
  585. bdnz 0b
  586. sync
  587. mtctr r4
  588. 1: icbi 0,r6
  589. addi r6,r6,L1_CACHE_LINE_SIZE
  590. bdnz 1b
  591. sync
  592. isync
  593. blr
  594. /*
  595. * Flush a particular page from the data cache to RAM, identified
  596. * by its physical address. We turn off the MMU so we can just use
  597. * the physical address (this may be a highmem page without a kernel
  598. * mapping).
  599. *
  600. * void __flush_dcache_icache_phys(unsigned long physaddr)
  601. */
  602. _GLOBAL(__flush_dcache_icache_phys)
  603. BEGIN_FTR_SECTION
  604. blr /* for 601, do nothing */
  605. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  606. mfmsr r10
  607. rlwinm r0,r10,0,28,26 /* clear DR */
  608. mtmsr r0
  609. isync
  610. rlwinm r3,r3,0,0,19 /* Get page base address */
  611. li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
  612. mtctr r4
  613. mr r6,r3
  614. 0: dcbst 0,r3 /* Write line to ram */
  615. addi r3,r3,L1_CACHE_LINE_SIZE
  616. bdnz 0b
  617. sync
  618. mtctr r4
  619. 1: icbi 0,r6
  620. addi r6,r6,L1_CACHE_LINE_SIZE
  621. bdnz 1b
  622. sync
  623. mtmsr r10 /* restore DR */
  624. isync
  625. blr
  626. /*
  627. * Clear pages using the dcbz instruction, which doesn't cause any
  628. * memory traffic (except to write out any cache lines which get
  629. * displaced). This only works on cacheable memory.
  630. *
  631. * void clear_pages(void *page, int order) ;
  632. */
  633. _GLOBAL(clear_pages)
  634. li r0,4096/L1_CACHE_LINE_SIZE
  635. slw r0,r0,r4
  636. mtctr r0
  637. #ifdef CONFIG_8xx
  638. li r4, 0
  639. 1: stw r4, 0(r3)
  640. stw r4, 4(r3)
  641. stw r4, 8(r3)
  642. stw r4, 12(r3)
  643. #else
  644. 1: dcbz 0,r3
  645. #endif
  646. addi r3,r3,L1_CACHE_LINE_SIZE
  647. bdnz 1b
  648. blr
  649. /*
  650. * Copy a whole page. We use the dcbz instruction on the destination
  651. * to reduce memory traffic (it eliminates the unnecessary reads of
  652. * the destination into cache). This requires that the destination
  653. * is cacheable.
  654. */
  655. #define COPY_16_BYTES \
  656. lwz r6,4(r4); \
  657. lwz r7,8(r4); \
  658. lwz r8,12(r4); \
  659. lwzu r9,16(r4); \
  660. stw r6,4(r3); \
  661. stw r7,8(r3); \
  662. stw r8,12(r3); \
  663. stwu r9,16(r3)
  664. _GLOBAL(copy_page)
  665. addi r3,r3,-4
  666. addi r4,r4,-4
  667. #ifdef CONFIG_8xx
  668. /* don't use prefetch on 8xx */
  669. li r0,4096/L1_CACHE_LINE_SIZE
  670. mtctr r0
  671. 1: COPY_16_BYTES
  672. bdnz 1b
  673. blr
  674. #else /* not 8xx, we can prefetch */
  675. li r5,4
  676. #if MAX_COPY_PREFETCH > 1
  677. li r0,MAX_COPY_PREFETCH
  678. li r11,4
  679. mtctr r0
  680. 11: dcbt r11,r4
  681. addi r11,r11,L1_CACHE_LINE_SIZE
  682. bdnz 11b
  683. #else /* MAX_COPY_PREFETCH == 1 */
  684. dcbt r5,r4
  685. li r11,L1_CACHE_LINE_SIZE+4
  686. #endif /* MAX_COPY_PREFETCH */
  687. li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH
  688. crclr 4*cr0+eq
  689. 2:
  690. mtctr r0
  691. 1:
  692. dcbt r11,r4
  693. dcbz r5,r3
  694. COPY_16_BYTES
  695. #if L1_CACHE_LINE_SIZE >= 32
  696. COPY_16_BYTES
  697. #if L1_CACHE_LINE_SIZE >= 64
  698. COPY_16_BYTES
  699. COPY_16_BYTES
  700. #if L1_CACHE_LINE_SIZE >= 128
  701. COPY_16_BYTES
  702. COPY_16_BYTES
  703. COPY_16_BYTES
  704. COPY_16_BYTES
  705. #endif
  706. #endif
  707. #endif
  708. bdnz 1b
  709. beqlr
  710. crnot 4*cr0+eq,4*cr0+eq
  711. li r0,MAX_COPY_PREFETCH
  712. li r11,4
  713. b 2b
  714. #endif /* CONFIG_8xx */
  715. /*
  716. * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
  717. * void atomic_set_mask(atomic_t mask, atomic_t *addr);
  718. */
  719. _GLOBAL(atomic_clear_mask)
  720. 10: lwarx r5,0,r4
  721. andc r5,r5,r3
  722. PPC405_ERR77(0,r4)
  723. stwcx. r5,0,r4
  724. bne- 10b
  725. blr
  726. _GLOBAL(atomic_set_mask)
  727. 10: lwarx r5,0,r4
  728. or r5,r5,r3
  729. PPC405_ERR77(0,r4)
  730. stwcx. r5,0,r4
  731. bne- 10b
  732. blr
  733. /*
  734. * I/O string operations
  735. *
  736. * insb(port, buf, len)
  737. * outsb(port, buf, len)
  738. * insw(port, buf, len)
  739. * outsw(port, buf, len)
  740. * insl(port, buf, len)
  741. * outsl(port, buf, len)
  742. * insw_ns(port, buf, len)
  743. * outsw_ns(port, buf, len)
  744. * insl_ns(port, buf, len)
  745. * outsl_ns(port, buf, len)
  746. *
  747. * The *_ns versions don't do byte-swapping.
  748. */
  749. _GLOBAL(_insb)
  750. cmpwi 0,r5,0
  751. mtctr r5
  752. subi r4,r4,1
  753. blelr-
  754. 00: lbz r5,0(r3)
  755. eieio
  756. stbu r5,1(r4)
  757. bdnz 00b
  758. blr
  759. _GLOBAL(_outsb)
  760. cmpwi 0,r5,0
  761. mtctr r5
  762. subi r4,r4,1
  763. blelr-
  764. 00: lbzu r5,1(r4)
  765. stb r5,0(r3)
  766. eieio
  767. bdnz 00b
  768. blr
  769. _GLOBAL(_insw)
  770. cmpwi 0,r5,0
  771. mtctr r5
  772. subi r4,r4,2
  773. blelr-
  774. 00: lhbrx r5,0,r3
  775. eieio
  776. sthu r5,2(r4)
  777. bdnz 00b
  778. blr
  779. _GLOBAL(_outsw)
  780. cmpwi 0,r5,0
  781. mtctr r5
  782. subi r4,r4,2
  783. blelr-
  784. 00: lhzu r5,2(r4)
  785. eieio
  786. sthbrx r5,0,r3
  787. bdnz 00b
  788. blr
  789. _GLOBAL(_insl)
  790. cmpwi 0,r5,0
  791. mtctr r5
  792. subi r4,r4,4
  793. blelr-
  794. 00: lwbrx r5,0,r3
  795. eieio
  796. stwu r5,4(r4)
  797. bdnz 00b
  798. blr
  799. _GLOBAL(_outsl)
  800. cmpwi 0,r5,0
  801. mtctr r5
  802. subi r4,r4,4
  803. blelr-
  804. 00: lwzu r5,4(r4)
  805. stwbrx r5,0,r3
  806. eieio
  807. bdnz 00b
  808. blr
  809. _GLOBAL(__ide_mm_insw)
  810. _GLOBAL(_insw_ns)
  811. cmpwi 0,r5,0
  812. mtctr r5
  813. subi r4,r4,2
  814. blelr-
  815. 00: lhz r5,0(r3)
  816. eieio
  817. sthu r5,2(r4)
  818. bdnz 00b
  819. blr
  820. _GLOBAL(__ide_mm_outsw)
  821. _GLOBAL(_outsw_ns)
  822. cmpwi 0,r5,0
  823. mtctr r5
  824. subi r4,r4,2
  825. blelr-
  826. 00: lhzu r5,2(r4)
  827. sth r5,0(r3)
  828. eieio
  829. bdnz 00b
  830. blr
  831. _GLOBAL(__ide_mm_insl)
  832. _GLOBAL(_insl_ns)
  833. cmpwi 0,r5,0
  834. mtctr r5
  835. subi r4,r4,4
  836. blelr-
  837. 00: lwz r5,0(r3)
  838. eieio
  839. stwu r5,4(r4)
  840. bdnz 00b
  841. blr
  842. _GLOBAL(__ide_mm_outsl)
  843. _GLOBAL(_outsl_ns)
  844. cmpwi 0,r5,0
  845. mtctr r5
  846. subi r4,r4,4
  847. blelr-
  848. 00: lwzu r5,4(r4)
  849. stw r5,0(r3)
  850. eieio
  851. bdnz 00b
  852. blr
  853. /*
  854. * Extended precision shifts.
  855. *
  856. * Updated to be valid for shift counts from 0 to 63 inclusive.
  857. * -- Gabriel
  858. *
  859. * R3/R4 has 64 bit value
  860. * R5 has shift count
  861. * result in R3/R4
  862. *
  863. * ashrdi3: arithmetic right shift (sign propagation)
  864. * lshrdi3: logical right shift
  865. * ashldi3: left shift
  866. */
  867. _GLOBAL(__ashrdi3)
  868. subfic r6,r5,32
  869. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  870. addi r7,r5,32 # could be xori, or addi with -32
  871. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  872. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  873. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  874. or r4,r4,r6 # LSW |= t1
  875. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  876. sraw r3,r3,r5 # MSW = MSW >> count
  877. or r4,r4,r7 # LSW |= t2
  878. blr
  879. _GLOBAL(__ashldi3)
  880. subfic r6,r5,32
  881. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  882. addi r7,r5,32 # could be xori, or addi with -32
  883. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  884. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  885. or r3,r3,r6 # MSW |= t1
  886. slw r4,r4,r5 # LSW = LSW << count
  887. or r3,r3,r7 # MSW |= t2
  888. blr
  889. _GLOBAL(__lshrdi3)
  890. subfic r6,r5,32
  891. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  892. addi r7,r5,32 # could be xori, or addi with -32
  893. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  894. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  895. or r4,r4,r6 # LSW |= t1
  896. srw r3,r3,r5 # MSW = MSW >> count
  897. or r4,r4,r7 # LSW |= t2
  898. blr
  899. _GLOBAL(abs)
  900. srawi r4,r3,31
  901. xor r3,r3,r4
  902. sub r3,r3,r4
  903. blr
  904. _GLOBAL(_get_SP)
  905. mr r3,r1 /* Close enough */
  906. blr
  907. /*
  908. * These are used in the alignment trap handler when emulating
  909. * single-precision loads and stores.
  910. * We restore and save the fpscr so the task gets the same result
  911. * and exceptions as if the cpu had performed the load or store.
  912. */
  913. #ifdef CONFIG_PPC_FPU
  914. _GLOBAL(cvt_fd)
  915. lfd 0,-4(r5) /* load up fpscr value */
  916. mtfsf 0xff,0
  917. lfs 0,0(r3)
  918. stfd 0,0(r4)
  919. mffs 0 /* save new fpscr value */
  920. stfd 0,-4(r5)
  921. blr
  922. _GLOBAL(cvt_df)
  923. lfd 0,-4(r5) /* load up fpscr value */
  924. mtfsf 0xff,0
  925. lfd 0,0(r3)
  926. stfs 0,0(r4)
  927. mffs 0 /* save new fpscr value */
  928. stfd 0,-4(r5)
  929. blr
  930. #endif
  931. /*
  932. * Create a kernel thread
  933. * kernel_thread(fn, arg, flags)
  934. */
  935. _GLOBAL(kernel_thread)
  936. stwu r1,-16(r1)
  937. stw r30,8(r1)
  938. stw r31,12(r1)
  939. mr r30,r3 /* function */
  940. mr r31,r4 /* argument */
  941. ori r3,r5,CLONE_VM /* flags */
  942. oris r3,r3,CLONE_UNTRACED>>16
  943. li r4,0 /* new sp (unused) */
  944. li r0,__NR_clone
  945. sc
  946. cmpwi 0,r3,0 /* parent or child? */
  947. bne 1f /* return if parent */
  948. li r0,0 /* make top-level stack frame */
  949. stwu r0,-16(r1)
  950. mtlr r30 /* fn addr in lr */
  951. mr r3,r31 /* load arg and call fn */
  952. PPC440EP_ERR42
  953. blrl
  954. li r0,__NR_exit /* exit if function returns */
  955. li r3,0
  956. sc
  957. 1: lwz r30,8(r1)
  958. lwz r31,12(r1)
  959. addi r1,r1,16
  960. blr
  961. /*
  962. * This routine is just here to keep GCC happy - sigh...
  963. */
  964. _GLOBAL(__main)
  965. blr
  966. #define SYSCALL(name) \
  967. _GLOBAL(name) \
  968. li r0,__NR_##name; \
  969. sc; \
  970. bnslr; \
  971. lis r4,errno@ha; \
  972. stw r3,errno@l(r4); \
  973. li r3,-1; \
  974. blr
  975. SYSCALL(execve)
  976. /* Why isn't this a) automatic, b) written in 'C'? */
  977. .data
  978. .align 4
  979. _GLOBAL(sys_call_table)
  980. .long sys_restart_syscall /* 0 */
  981. .long sys_exit
  982. .long ppc_fork
  983. .long sys_read
  984. .long sys_write
  985. .long sys_open /* 5 */
  986. .long sys_close
  987. .long sys_waitpid
  988. .long sys_creat
  989. .long sys_link
  990. .long sys_unlink /* 10 */
  991. .long sys_execve
  992. .long sys_chdir
  993. .long sys_time
  994. .long sys_mknod
  995. .long sys_chmod /* 15 */
  996. .long sys_lchown
  997. .long sys_ni_syscall /* old break syscall holder */
  998. .long sys_stat
  999. .long sys_lseek
  1000. .long sys_getpid /* 20 */
  1001. .long sys_mount
  1002. .long sys_oldumount
  1003. .long sys_setuid
  1004. .long sys_getuid
  1005. .long sys_stime /* 25 */
  1006. .long sys_ptrace
  1007. .long sys_alarm
  1008. .long sys_fstat
  1009. .long sys_pause
  1010. .long sys_utime /* 30 */
  1011. .long sys_ni_syscall /* old stty syscall holder */
  1012. .long sys_ni_syscall /* old gtty syscall holder */
  1013. .long sys_access
  1014. .long sys_nice
  1015. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1016. .long sys_sync
  1017. .long sys_kill
  1018. .long sys_rename
  1019. .long sys_mkdir
  1020. .long sys_rmdir /* 40 */
  1021. .long sys_dup
  1022. .long sys_pipe
  1023. .long sys_times
  1024. .long sys_ni_syscall /* old prof syscall holder */
  1025. .long sys_brk /* 45 */
  1026. .long sys_setgid
  1027. .long sys_getgid
  1028. .long sys_signal
  1029. .long sys_geteuid
  1030. .long sys_getegid /* 50 */
  1031. .long sys_acct
  1032. .long sys_umount /* recycled never used phys() */
  1033. .long sys_ni_syscall /* old lock syscall holder */
  1034. .long sys_ioctl
  1035. .long sys_fcntl /* 55 */
  1036. .long sys_ni_syscall /* old mpx syscall holder */
  1037. .long sys_setpgid
  1038. .long sys_ni_syscall /* old ulimit syscall holder */
  1039. .long sys_olduname
  1040. .long sys_umask /* 60 */
  1041. .long sys_chroot
  1042. .long sys_ustat
  1043. .long sys_dup2
  1044. .long sys_getppid
  1045. .long sys_getpgrp /* 65 */
  1046. .long sys_setsid
  1047. .long sys_sigaction
  1048. .long sys_sgetmask
  1049. .long sys_ssetmask
  1050. .long sys_setreuid /* 70 */
  1051. .long sys_setregid
  1052. .long ppc_sigsuspend
  1053. .long sys_sigpending
  1054. .long sys_sethostname
  1055. .long sys_setrlimit /* 75 */
  1056. .long sys_old_getrlimit
  1057. .long sys_getrusage
  1058. .long sys_gettimeofday
  1059. .long sys_settimeofday
  1060. .long sys_getgroups /* 80 */
  1061. .long sys_setgroups
  1062. .long ppc_select
  1063. .long sys_symlink
  1064. .long sys_lstat
  1065. .long sys_readlink /* 85 */
  1066. .long sys_uselib
  1067. .long sys_swapon
  1068. .long sys_reboot
  1069. .long old_readdir
  1070. .long sys_mmap /* 90 */
  1071. .long sys_munmap
  1072. .long sys_truncate
  1073. .long sys_ftruncate
  1074. .long sys_fchmod
  1075. .long sys_fchown /* 95 */
  1076. .long sys_getpriority
  1077. .long sys_setpriority
  1078. .long sys_ni_syscall /* old profil syscall holder */
  1079. .long sys_statfs
  1080. .long sys_fstatfs /* 100 */
  1081. .long sys_ni_syscall
  1082. .long sys_socketcall
  1083. .long sys_syslog
  1084. .long sys_setitimer
  1085. .long sys_getitimer /* 105 */
  1086. .long sys_newstat
  1087. .long sys_newlstat
  1088. .long sys_newfstat
  1089. .long sys_uname
  1090. .long sys_ni_syscall /* 110 */
  1091. .long sys_vhangup
  1092. .long sys_ni_syscall /* old 'idle' syscall */
  1093. .long sys_ni_syscall
  1094. .long sys_wait4
  1095. .long sys_swapoff /* 115 */
  1096. .long sys_sysinfo
  1097. .long sys_ipc
  1098. .long sys_fsync
  1099. .long sys_sigreturn
  1100. .long ppc_clone /* 120 */
  1101. .long sys_setdomainname
  1102. .long sys_newuname
  1103. .long sys_ni_syscall
  1104. .long sys_adjtimex
  1105. .long sys_mprotect /* 125 */
  1106. .long sys_sigprocmask
  1107. .long sys_ni_syscall /* old sys_create_module */
  1108. .long sys_init_module
  1109. .long sys_delete_module
  1110. .long sys_ni_syscall /* old sys_get_kernel_syms */ /* 130 */
  1111. .long sys_quotactl
  1112. .long sys_getpgid
  1113. .long sys_fchdir
  1114. .long sys_bdflush
  1115. .long sys_sysfs /* 135 */
  1116. .long sys_personality
  1117. .long sys_ni_syscall /* for afs_syscall */
  1118. .long sys_setfsuid
  1119. .long sys_setfsgid
  1120. .long sys_llseek /* 140 */
  1121. .long sys_getdents
  1122. .long ppc_select
  1123. .long sys_flock
  1124. .long sys_msync
  1125. .long sys_readv /* 145 */
  1126. .long sys_writev
  1127. .long sys_getsid
  1128. .long sys_fdatasync
  1129. .long sys_sysctl
  1130. .long sys_mlock /* 150 */
  1131. .long sys_munlock
  1132. .long sys_mlockall
  1133. .long sys_munlockall
  1134. .long sys_sched_setparam
  1135. .long sys_sched_getparam /* 155 */
  1136. .long sys_sched_setscheduler
  1137. .long sys_sched_getscheduler
  1138. .long sys_sched_yield
  1139. .long sys_sched_get_priority_max
  1140. .long sys_sched_get_priority_min /* 160 */
  1141. .long sys_sched_rr_get_interval
  1142. .long sys_nanosleep
  1143. .long sys_mremap
  1144. .long sys_setresuid
  1145. .long sys_getresuid /* 165 */
  1146. .long sys_ni_syscall /* old sys_query_module */
  1147. .long sys_poll
  1148. .long sys_nfsservctl
  1149. .long sys_setresgid
  1150. .long sys_getresgid /* 170 */
  1151. .long sys_prctl
  1152. .long sys_rt_sigreturn
  1153. .long sys_rt_sigaction
  1154. .long sys_rt_sigprocmask
  1155. .long sys_rt_sigpending /* 175 */
  1156. .long sys_rt_sigtimedwait
  1157. .long sys_rt_sigqueueinfo
  1158. .long ppc_rt_sigsuspend
  1159. .long sys_pread64
  1160. .long sys_pwrite64 /* 180 */
  1161. .long sys_chown
  1162. .long sys_getcwd
  1163. .long sys_capget
  1164. .long sys_capset
  1165. .long sys_sigaltstack /* 185 */
  1166. .long sys_sendfile
  1167. .long sys_ni_syscall /* streams1 */
  1168. .long sys_ni_syscall /* streams2 */
  1169. .long ppc_vfork
  1170. .long sys_getrlimit /* 190 */
  1171. .long sys_readahead
  1172. .long sys_mmap2
  1173. .long sys_truncate64
  1174. .long sys_ftruncate64
  1175. .long sys_stat64 /* 195 */
  1176. .long sys_lstat64
  1177. .long sys_fstat64
  1178. .long sys_pciconfig_read
  1179. .long sys_pciconfig_write
  1180. .long sys_pciconfig_iobase /* 200 */
  1181. .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
  1182. .long sys_getdents64
  1183. .long sys_pivot_root
  1184. .long sys_fcntl64
  1185. .long sys_madvise /* 205 */
  1186. .long sys_mincore
  1187. .long sys_gettid
  1188. .long sys_tkill
  1189. .long sys_setxattr
  1190. .long sys_lsetxattr /* 210 */
  1191. .long sys_fsetxattr
  1192. .long sys_getxattr
  1193. .long sys_lgetxattr
  1194. .long sys_fgetxattr
  1195. .long sys_listxattr /* 215 */
  1196. .long sys_llistxattr
  1197. .long sys_flistxattr
  1198. .long sys_removexattr
  1199. .long sys_lremovexattr
  1200. .long sys_fremovexattr /* 220 */
  1201. .long sys_futex
  1202. .long sys_sched_setaffinity
  1203. .long sys_sched_getaffinity
  1204. .long sys_ni_syscall
  1205. .long sys_ni_syscall /* 225 - reserved for Tux */
  1206. .long sys_sendfile64
  1207. .long sys_io_setup
  1208. .long sys_io_destroy
  1209. .long sys_io_getevents
  1210. .long sys_io_submit /* 230 */
  1211. .long sys_io_cancel
  1212. .long sys_set_tid_address
  1213. .long sys_fadvise64
  1214. .long sys_exit_group
  1215. .long sys_lookup_dcookie /* 235 */
  1216. .long sys_epoll_create
  1217. .long sys_epoll_ctl
  1218. .long sys_epoll_wait
  1219. .long sys_remap_file_pages
  1220. .long sys_timer_create /* 240 */
  1221. .long sys_timer_settime
  1222. .long sys_timer_gettime
  1223. .long sys_timer_getoverrun
  1224. .long sys_timer_delete
  1225. .long sys_clock_settime /* 245 */
  1226. .long sys_clock_gettime
  1227. .long sys_clock_getres
  1228. .long sys_clock_nanosleep
  1229. .long ppc_swapcontext
  1230. .long sys_tgkill /* 250 */
  1231. .long sys_utimes
  1232. .long sys_statfs64
  1233. .long sys_fstatfs64
  1234. .long ppc_fadvise64_64
  1235. .long sys_ni_syscall /* 255 - rtas (used on ppc64) */
  1236. .long sys_debug_setcontext
  1237. .long sys_ni_syscall /* 257 reserved for vserver */
  1238. .long sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
  1239. .long sys_ni_syscall /* 259 reserved for new sys_mbind */
  1240. .long sys_ni_syscall /* 260 reserved for new sys_get_mempolicy */
  1241. .long sys_ni_syscall /* 261 reserved for new sys_set_mempolicy */
  1242. .long sys_mq_open
  1243. .long sys_mq_unlink
  1244. .long sys_mq_timedsend
  1245. .long sys_mq_timedreceive /* 265 */
  1246. .long sys_mq_notify
  1247. .long sys_mq_getsetattr
  1248. .long sys_kexec_load
  1249. .long sys_add_key
  1250. .long sys_request_key /* 270 */
  1251. .long sys_keyctl
  1252. .long sys_waitid
  1253. .long sys_ioprio_set
  1254. .long sys_ioprio_get
  1255. .long sys_inotify_init /* 275 */
  1256. .long sys_inotify_add_watch
  1257. .long sys_inotify_rm_watch