misc.S 29 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457
  1. /*
  2. * This file contains miscellaneous low-level functions.
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6. * and Paul Mackerras.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. #include <linux/config.h>
  15. #include <linux/sys.h>
  16. #include <asm/unistd.h>
  17. #include <asm/errno.h>
  18. #include <asm/processor.h>
  19. #include <asm/page.h>
  20. #include <asm/cache.h>
  21. #include <asm/cputable.h>
  22. #include <asm/mmu.h>
  23. #include <asm/ppc_asm.h>
  24. #include <asm/thread_info.h>
  25. #include <asm/offsets.h>
  26. .text
  27. .align 5
  28. _GLOBAL(__delay)
  29. cmpwi 0,r3,0
  30. mtctr r3
  31. beqlr
  32. 1: bdnz 1b
  33. blr
  34. /*
  35. * Returns (address we're running at) - (address we were linked at)
  36. * for use before the text and data are mapped to KERNELBASE.
  37. */
  38. _GLOBAL(reloc_offset)
  39. mflr r0
  40. bl 1f
  41. 1: mflr r3
  42. lis r4,1b@ha
  43. addi r4,r4,1b@l
  44. subf r3,r4,r3
  45. mtlr r0
  46. blr
  47. /*
  48. * add_reloc_offset(x) returns x + reloc_offset().
  49. */
  50. _GLOBAL(add_reloc_offset)
  51. mflr r0
  52. bl 1f
  53. 1: mflr r5
  54. lis r4,1b@ha
  55. addi r4,r4,1b@l
  56. subf r5,r4,r5
  57. add r3,r3,r5
  58. mtlr r0
  59. blr
  60. /*
  61. * sub_reloc_offset(x) returns x - reloc_offset().
  62. */
  63. _GLOBAL(sub_reloc_offset)
  64. mflr r0
  65. bl 1f
  66. 1: mflr r5
  67. lis r4,1b@ha
  68. addi r4,r4,1b@l
  69. subf r5,r4,r5
  70. subf r3,r5,r3
  71. mtlr r0
  72. blr
  73. /*
  74. * reloc_got2 runs through the .got2 section adding an offset
  75. * to each entry.
  76. */
  77. _GLOBAL(reloc_got2)
  78. mflr r11
  79. lis r7,__got2_start@ha
  80. addi r7,r7,__got2_start@l
  81. lis r8,__got2_end@ha
  82. addi r8,r8,__got2_end@l
  83. subf r8,r7,r8
  84. srwi. r8,r8,2
  85. beqlr
  86. mtctr r8
  87. bl 1f
  88. 1: mflr r0
  89. lis r4,1b@ha
  90. addi r4,r4,1b@l
  91. subf r0,r4,r0
  92. add r7,r0,r7
  93. 2: lwz r0,0(r7)
  94. add r0,r0,r3
  95. stw r0,0(r7)
  96. addi r7,r7,4
  97. bdnz 2b
  98. mtlr r11
  99. blr
  100. /*
  101. * identify_cpu,
  102. * called with r3 = data offset and r4 = CPU number
  103. * doesn't change r3
  104. */
  105. _GLOBAL(identify_cpu)
  106. addis r8,r3,cpu_specs@ha
  107. addi r8,r8,cpu_specs@l
  108. mfpvr r7
  109. 1:
  110. lwz r5,CPU_SPEC_PVR_MASK(r8)
  111. and r5,r5,r7
  112. lwz r6,CPU_SPEC_PVR_VALUE(r8)
  113. cmplw 0,r6,r5
  114. beq 1f
  115. addi r8,r8,CPU_SPEC_ENTRY_SIZE
  116. b 1b
  117. 1:
  118. addis r6,r3,cur_cpu_spec@ha
  119. addi r6,r6,cur_cpu_spec@l
  120. slwi r4,r4,2
  121. sub r8,r8,r3
  122. stwx r8,r4,r6
  123. blr
  124. /*
  125. * do_cpu_ftr_fixups - goes through the list of CPU feature fixups
  126. * and writes nop's over sections of code that don't apply for this cpu.
  127. * r3 = data offset (not changed)
  128. */
  129. _GLOBAL(do_cpu_ftr_fixups)
  130. /* Get CPU 0 features */
  131. addis r6,r3,cur_cpu_spec@ha
  132. addi r6,r6,cur_cpu_spec@l
  133. lwz r4,0(r6)
  134. add r4,r4,r3
  135. lwz r4,CPU_SPEC_FEATURES(r4)
  136. /* Get the fixup table */
  137. addis r6,r3,__start___ftr_fixup@ha
  138. addi r6,r6,__start___ftr_fixup@l
  139. addis r7,r3,__stop___ftr_fixup@ha
  140. addi r7,r7,__stop___ftr_fixup@l
  141. /* Do the fixup */
  142. 1: cmplw 0,r6,r7
  143. bgelr
  144. addi r6,r6,16
  145. lwz r8,-16(r6) /* mask */
  146. and r8,r8,r4
  147. lwz r9,-12(r6) /* value */
  148. cmplw 0,r8,r9
  149. beq 1b
  150. lwz r8,-8(r6) /* section begin */
  151. lwz r9,-4(r6) /* section end */
  152. subf. r9,r8,r9
  153. beq 1b
  154. /* write nops over the section of code */
  155. /* todo: if large section, add a branch at the start of it */
  156. srwi r9,r9,2
  157. mtctr r9
  158. add r8,r8,r3
  159. lis r0,0x60000000@h /* nop */
  160. 3: stw r0,0(r8)
  161. andi. r10,r4,CPU_FTR_SPLIT_ID_CACHE@l
  162. beq 2f
  163. dcbst 0,r8 /* suboptimal, but simpler */
  164. sync
  165. icbi 0,r8
  166. 2: addi r8,r8,4
  167. bdnz 3b
  168. sync /* additional sync needed on g4 */
  169. isync
  170. b 1b
  171. /*
  172. * call_setup_cpu - call the setup_cpu function for this cpu
  173. * r3 = data offset, r24 = cpu number
  174. *
  175. * Setup function is called with:
  176. * r3 = data offset
  177. * r4 = CPU number
  178. * r5 = ptr to CPU spec (relocated)
  179. */
  180. _GLOBAL(call_setup_cpu)
  181. addis r5,r3,cur_cpu_spec@ha
  182. addi r5,r5,cur_cpu_spec@l
  183. slwi r4,r24,2
  184. lwzx r5,r4,r5
  185. add r5,r5,r3
  186. lwz r6,CPU_SPEC_SETUP(r5)
  187. add r6,r6,r3
  188. mtctr r6
  189. mr r4,r24
  190. bctr
  191. #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
  192. /* This gets called by via-pmu.c to switch the PLL selection
  193. * on 750fx CPU. This function should really be moved to some
  194. * other place (as most of the cpufreq code in via-pmu
  195. */
  196. _GLOBAL(low_choose_750fx_pll)
  197. /* Clear MSR:EE */
  198. mfmsr r7
  199. rlwinm r0,r7,0,17,15
  200. mtmsr r0
  201. /* If switching to PLL1, disable HID0:BTIC */
  202. cmplwi cr0,r3,0
  203. beq 1f
  204. mfspr r5,SPRN_HID0
  205. rlwinm r5,r5,0,27,25
  206. sync
  207. mtspr SPRN_HID0,r5
  208. isync
  209. sync
  210. 1:
  211. /* Calc new HID1 value */
  212. mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
  213. rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
  214. rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
  215. or r4,r4,r5
  216. mtspr SPRN_HID1,r4
  217. /* Store new HID1 image */
  218. rlwinm r6,r1,0,0,18
  219. lwz r6,TI_CPU(r6)
  220. slwi r6,r6,2
  221. addis r6,r6,nap_save_hid1@ha
  222. stw r4,nap_save_hid1@l(r6)
  223. /* If switching to PLL0, enable HID0:BTIC */
  224. cmplwi cr0,r3,0
  225. bne 1f
  226. mfspr r5,SPRN_HID0
  227. ori r5,r5,HID0_BTIC
  228. sync
  229. mtspr SPRN_HID0,r5
  230. isync
  231. sync
  232. 1:
  233. /* Return */
  234. mtmsr r7
  235. blr
  236. _GLOBAL(low_choose_7447a_dfs)
  237. /* Clear MSR:EE */
  238. mfmsr r7
  239. rlwinm r0,r7,0,17,15
  240. mtmsr r0
  241. /* Calc new HID1 value */
  242. mfspr r4,SPRN_HID1
  243. insrwi r4,r3,1,9 /* insert parameter into bit 9 */
  244. sync
  245. mtspr SPRN_HID1,r4
  246. sync
  247. isync
  248. /* Return */
  249. mtmsr r7
  250. blr
  251. #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
  252. /* void local_save_flags_ptr(unsigned long *flags) */
  253. _GLOBAL(local_save_flags_ptr)
  254. mfmsr r4
  255. stw r4,0(r3)
  256. blr
  257. /*
  258. * Need these nops here for taking over save/restore to
  259. * handle lost intrs
  260. * -- Cort
  261. */
  262. nop
  263. nop
  264. nop
  265. nop
  266. nop
  267. nop
  268. nop
  269. nop
  270. nop
  271. nop
  272. nop
  273. nop
  274. nop
  275. nop
  276. nop
  277. nop
  278. nop
  279. _GLOBAL(local_save_flags_ptr_end)
  280. /* void local_irq_restore(unsigned long flags) */
  281. _GLOBAL(local_irq_restore)
  282. /*
  283. * Just set/clear the MSR_EE bit through restore/flags but do not
  284. * change anything else. This is needed by the RT system and makes
  285. * sense anyway.
  286. * -- Cort
  287. */
  288. mfmsr r4
  289. /* Copy all except the MSR_EE bit from r4 (current MSR value)
  290. to r3. This is the sort of thing the rlwimi instruction is
  291. designed for. -- paulus. */
  292. rlwimi r3,r4,0,17,15
  293. /* Check if things are setup the way we want _already_. */
  294. cmpw 0,r3,r4
  295. beqlr
  296. 1: SYNC
  297. mtmsr r3
  298. SYNC
  299. blr
  300. nop
  301. nop
  302. nop
  303. nop
  304. nop
  305. nop
  306. nop
  307. nop
  308. nop
  309. nop
  310. nop
  311. nop
  312. nop
  313. nop
  314. nop
  315. nop
  316. nop
  317. nop
  318. nop
  319. _GLOBAL(local_irq_restore_end)
  320. _GLOBAL(local_irq_disable)
  321. mfmsr r0 /* Get current interrupt state */
  322. rlwinm r3,r0,16+1,32-1,31 /* Extract old value of 'EE' */
  323. rlwinm r0,r0,0,17,15 /* clear MSR_EE in r0 */
  324. SYNC /* Some chip revs have problems here... */
  325. mtmsr r0 /* Update machine state */
  326. blr /* Done */
  327. /*
  328. * Need these nops here for taking over save/restore to
  329. * handle lost intrs
  330. * -- Cort
  331. */
  332. nop
  333. nop
  334. nop
  335. nop
  336. nop
  337. nop
  338. nop
  339. nop
  340. nop
  341. nop
  342. nop
  343. nop
  344. nop
  345. nop
  346. nop
  347. _GLOBAL(local_irq_disable_end)
  348. _GLOBAL(local_irq_enable)
  349. mfmsr r3 /* Get current state */
  350. ori r3,r3,MSR_EE /* Turn on 'EE' bit */
  351. SYNC /* Some chip revs have problems here... */
  352. mtmsr r3 /* Update machine state */
  353. blr
  354. /*
  355. * Need these nops here for taking over save/restore to
  356. * handle lost intrs
  357. * -- Cort
  358. */
  359. nop
  360. nop
  361. nop
  362. nop
  363. nop
  364. nop
  365. nop
  366. nop
  367. nop
  368. nop
  369. nop
  370. nop
  371. nop
  372. nop
  373. nop
  374. nop
  375. _GLOBAL(local_irq_enable_end)
  376. /*
  377. * complement mask on the msr then "or" some values on.
  378. * _nmask_and_or_msr(nmask, value_to_or)
  379. */
  380. _GLOBAL(_nmask_and_or_msr)
  381. mfmsr r0 /* Get current msr */
  382. andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
  383. or r0,r0,r4 /* Or on the bits in r4 (second parm) */
  384. SYNC /* Some chip revs have problems here... */
  385. mtmsr r0 /* Update machine state */
  386. isync
  387. blr /* Done */
  388. /*
  389. * Flush MMU TLB
  390. */
  391. _GLOBAL(_tlbia)
  392. #if defined(CONFIG_40x)
  393. sync /* Flush to memory before changing mapping */
  394. tlbia
  395. isync /* Flush shadow TLB */
  396. #elif defined(CONFIG_44x)
  397. li r3,0
  398. sync
  399. /* Load high watermark */
  400. lis r4,tlb_44x_hwater@ha
  401. lwz r5,tlb_44x_hwater@l(r4)
  402. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  403. addi r3,r3,1
  404. cmpw 0,r3,r5
  405. ble 1b
  406. isync
  407. #elif defined(CONFIG_FSL_BOOKE)
  408. /* Invalidate all entries in TLB0 */
  409. li r3, 0x04
  410. tlbivax 0,3
  411. /* Invalidate all entries in TLB1 */
  412. li r3, 0x0c
  413. tlbivax 0,3
  414. /* Invalidate all entries in TLB2 */
  415. li r3, 0x14
  416. tlbivax 0,3
  417. /* Invalidate all entries in TLB3 */
  418. li r3, 0x1c
  419. tlbivax 0,3
  420. msync
  421. #ifdef CONFIG_SMP
  422. tlbsync
  423. #endif /* CONFIG_SMP */
  424. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  425. #if defined(CONFIG_SMP)
  426. rlwinm r8,r1,0,0,18
  427. lwz r8,TI_CPU(r8)
  428. oris r8,r8,10
  429. mfmsr r10
  430. SYNC
  431. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  432. rlwinm r0,r0,0,28,26 /* clear DR */
  433. mtmsr r0
  434. SYNC_601
  435. isync
  436. lis r9,mmu_hash_lock@h
  437. ori r9,r9,mmu_hash_lock@l
  438. tophys(r9,r9)
  439. 10: lwarx r7,0,r9
  440. cmpwi 0,r7,0
  441. bne- 10b
  442. stwcx. r8,0,r9
  443. bne- 10b
  444. sync
  445. tlbia
  446. sync
  447. TLBSYNC
  448. li r0,0
  449. stw r0,0(r9) /* clear mmu_hash_lock */
  450. mtmsr r10
  451. SYNC_601
  452. isync
  453. #else /* CONFIG_SMP */
  454. sync
  455. tlbia
  456. sync
  457. #endif /* CONFIG_SMP */
  458. #endif /* ! defined(CONFIG_40x) */
  459. blr
  460. /*
  461. * Flush MMU TLB for a particular address
  462. */
  463. _GLOBAL(_tlbie)
  464. #if defined(CONFIG_40x)
  465. tlbsx. r3, 0, r3
  466. bne 10f
  467. sync
  468. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
  469. * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
  470. * the TLB entry. */
  471. tlbwe r3, r3, TLB_TAG
  472. isync
  473. 10:
  474. #elif defined(CONFIG_44x)
  475. mfspr r4,SPRN_MMUCR
  476. mfspr r5,SPRN_PID /* Get PID */
  477. rlwimi r4,r5,0,24,31 /* Set TID */
  478. mtspr SPRN_MMUCR,r4
  479. tlbsx. r3, 0, r3
  480. bne 10f
  481. sync
  482. /* There are only 64 TLB entries, so r3 < 64,
  483. * which means bit 22, is clear. Since 22 is
  484. * the V bit in the TLB_PAGEID, loading this
  485. * value will invalidate the TLB entry.
  486. */
  487. tlbwe r3, r3, PPC44x_TLB_PAGEID
  488. isync
  489. 10:
  490. #elif defined(CONFIG_FSL_BOOKE)
  491. rlwinm r4, r3, 0, 0, 19
  492. ori r5, r4, 0x08 /* TLBSEL = 1 */
  493. ori r6, r4, 0x10 /* TLBSEL = 2 */
  494. ori r7, r4, 0x18 /* TLBSEL = 3 */
  495. tlbivax 0, r4
  496. tlbivax 0, r5
  497. tlbivax 0, r6
  498. tlbivax 0, r7
  499. msync
  500. #if defined(CONFIG_SMP)
  501. tlbsync
  502. #endif /* CONFIG_SMP */
  503. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  504. #if defined(CONFIG_SMP)
  505. rlwinm r8,r1,0,0,18
  506. lwz r8,TI_CPU(r8)
  507. oris r8,r8,11
  508. mfmsr r10
  509. SYNC
  510. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  511. rlwinm r0,r0,0,28,26 /* clear DR */
  512. mtmsr r0
  513. SYNC_601
  514. isync
  515. lis r9,mmu_hash_lock@h
  516. ori r9,r9,mmu_hash_lock@l
  517. tophys(r9,r9)
  518. 10: lwarx r7,0,r9
  519. cmpwi 0,r7,0
  520. bne- 10b
  521. stwcx. r8,0,r9
  522. bne- 10b
  523. eieio
  524. tlbie r3
  525. sync
  526. TLBSYNC
  527. li r0,0
  528. stw r0,0(r9) /* clear mmu_hash_lock */
  529. mtmsr r10
  530. SYNC_601
  531. isync
  532. #else /* CONFIG_SMP */
  533. tlbie r3
  534. sync
  535. #endif /* CONFIG_SMP */
  536. #endif /* ! CONFIG_40x */
  537. blr
  538. /*
  539. * Flush instruction cache.
  540. * This is a no-op on the 601.
  541. */
  542. _GLOBAL(flush_instruction_cache)
  543. #if defined(CONFIG_8xx)
  544. isync
  545. lis r5, IDC_INVALL@h
  546. mtspr SPRN_IC_CST, r5
  547. #elif defined(CONFIG_4xx)
  548. #ifdef CONFIG_403GCX
  549. li r3, 512
  550. mtctr r3
  551. lis r4, KERNELBASE@h
  552. 1: iccci 0, r4
  553. addi r4, r4, 16
  554. bdnz 1b
  555. #else
  556. lis r3, KERNELBASE@h
  557. iccci 0,r3
  558. #endif
  559. #elif CONFIG_FSL_BOOKE
  560. BEGIN_FTR_SECTION
  561. mfspr r3,SPRN_L1CSR0
  562. ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
  563. /* msync; isync recommended here */
  564. mtspr SPRN_L1CSR0,r3
  565. isync
  566. blr
  567. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  568. mfspr r3,SPRN_L1CSR1
  569. ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
  570. mtspr SPRN_L1CSR1,r3
  571. #else
  572. mfspr r3,SPRN_PVR
  573. rlwinm r3,r3,16,16,31
  574. cmpwi 0,r3,1
  575. beqlr /* for 601, do nothing */
  576. /* 603/604 processor - use invalidate-all bit in HID0 */
  577. mfspr r3,SPRN_HID0
  578. ori r3,r3,HID0_ICFI
  579. mtspr SPRN_HID0,r3
  580. #endif /* CONFIG_8xx/4xx */
  581. isync
  582. blr
  583. /*
  584. * Write any modified data cache blocks out to memory
  585. * and invalidate the corresponding instruction cache blocks.
  586. * This is a no-op on the 601.
  587. *
  588. * flush_icache_range(unsigned long start, unsigned long stop)
  589. */
  590. _GLOBAL(flush_icache_range)
  591. BEGIN_FTR_SECTION
  592. blr /* for 601, do nothing */
  593. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  594. li r5,L1_CACHE_LINE_SIZE-1
  595. andc r3,r3,r5
  596. subf r4,r3,r4
  597. add r4,r4,r5
  598. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  599. beqlr
  600. mtctr r4
  601. mr r6,r3
  602. 1: dcbst 0,r3
  603. addi r3,r3,L1_CACHE_LINE_SIZE
  604. bdnz 1b
  605. sync /* wait for dcbst's to get to ram */
  606. mtctr r4
  607. 2: icbi 0,r6
  608. addi r6,r6,L1_CACHE_LINE_SIZE
  609. bdnz 2b
  610. sync /* additional sync needed on g4 */
  611. isync
  612. blr
  613. /*
  614. * Write any modified data cache blocks out to memory.
  615. * Does not invalidate the corresponding cache lines (especially for
  616. * any corresponding instruction cache).
  617. *
  618. * clean_dcache_range(unsigned long start, unsigned long stop)
  619. */
  620. _GLOBAL(clean_dcache_range)
  621. li r5,L1_CACHE_LINE_SIZE-1
  622. andc r3,r3,r5
  623. subf r4,r3,r4
  624. add r4,r4,r5
  625. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  626. beqlr
  627. mtctr r4
  628. 1: dcbst 0,r3
  629. addi r3,r3,L1_CACHE_LINE_SIZE
  630. bdnz 1b
  631. sync /* wait for dcbst's to get to ram */
  632. blr
  633. /*
  634. * Write any modified data cache blocks out to memory and invalidate them.
  635. * Does not invalidate the corresponding instruction cache blocks.
  636. *
  637. * flush_dcache_range(unsigned long start, unsigned long stop)
  638. */
  639. _GLOBAL(flush_dcache_range)
  640. li r5,L1_CACHE_LINE_SIZE-1
  641. andc r3,r3,r5
  642. subf r4,r3,r4
  643. add r4,r4,r5
  644. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  645. beqlr
  646. mtctr r4
  647. 1: dcbf 0,r3
  648. addi r3,r3,L1_CACHE_LINE_SIZE
  649. bdnz 1b
  650. sync /* wait for dcbst's to get to ram */
  651. blr
  652. /*
  653. * Like above, but invalidate the D-cache. This is used by the 8xx
  654. * to invalidate the cache so the PPC core doesn't get stale data
  655. * from the CPM (no cache snooping here :-).
  656. *
  657. * invalidate_dcache_range(unsigned long start, unsigned long stop)
  658. */
  659. _GLOBAL(invalidate_dcache_range)
  660. li r5,L1_CACHE_LINE_SIZE-1
  661. andc r3,r3,r5
  662. subf r4,r3,r4
  663. add r4,r4,r5
  664. srwi. r4,r4,LG_L1_CACHE_LINE_SIZE
  665. beqlr
  666. mtctr r4
  667. 1: dcbi 0,r3
  668. addi r3,r3,L1_CACHE_LINE_SIZE
  669. bdnz 1b
  670. sync /* wait for dcbi's to get to ram */
  671. blr
  672. #ifdef CONFIG_NOT_COHERENT_CACHE
  673. /*
  674. * 40x cores have 8K or 16K dcache and 32 byte line size.
  675. * 44x has a 32K dcache and 32 byte line size.
  676. * 8xx has 1, 2, 4, 8K variants.
  677. * For now, cover the worst case of the 44x.
  678. * Must be called with external interrupts disabled.
  679. */
  680. #define CACHE_NWAYS 64
  681. #define CACHE_NLINES 16
  682. _GLOBAL(flush_dcache_all)
  683. li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
  684. mtctr r4
  685. lis r5, KERNELBASE@h
  686. 1: lwz r3, 0(r5) /* Load one word from every line */
  687. addi r5, r5, L1_CACHE_LINE_SIZE
  688. bdnz 1b
  689. blr
  690. #endif /* CONFIG_NOT_COHERENT_CACHE */
  691. /*
  692. * Flush a particular page from the data cache to RAM.
  693. * Note: this is necessary because the instruction cache does *not*
  694. * snoop from the data cache.
  695. * This is a no-op on the 601 which has a unified cache.
  696. *
  697. * void __flush_dcache_icache(void *page)
  698. */
  699. _GLOBAL(__flush_dcache_icache)
  700. BEGIN_FTR_SECTION
  701. blr /* for 601, do nothing */
  702. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  703. rlwinm r3,r3,0,0,19 /* Get page base address */
  704. li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
  705. mtctr r4
  706. mr r6,r3
  707. 0: dcbst 0,r3 /* Write line to ram */
  708. addi r3,r3,L1_CACHE_LINE_SIZE
  709. bdnz 0b
  710. sync
  711. mtctr r4
  712. 1: icbi 0,r6
  713. addi r6,r6,L1_CACHE_LINE_SIZE
  714. bdnz 1b
  715. sync
  716. isync
  717. blr
  718. /*
  719. * Flush a particular page from the data cache to RAM, identified
  720. * by its physical address. We turn off the MMU so we can just use
  721. * the physical address (this may be a highmem page without a kernel
  722. * mapping).
  723. *
  724. * void __flush_dcache_icache_phys(unsigned long physaddr)
  725. */
  726. _GLOBAL(__flush_dcache_icache_phys)
  727. BEGIN_FTR_SECTION
  728. blr /* for 601, do nothing */
  729. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  730. mfmsr r10
  731. rlwinm r0,r10,0,28,26 /* clear DR */
  732. mtmsr r0
  733. isync
  734. rlwinm r3,r3,0,0,19 /* Get page base address */
  735. li r4,4096/L1_CACHE_LINE_SIZE /* Number of lines in a page */
  736. mtctr r4
  737. mr r6,r3
  738. 0: dcbst 0,r3 /* Write line to ram */
  739. addi r3,r3,L1_CACHE_LINE_SIZE
  740. bdnz 0b
  741. sync
  742. mtctr r4
  743. 1: icbi 0,r6
  744. addi r6,r6,L1_CACHE_LINE_SIZE
  745. bdnz 1b
  746. sync
  747. mtmsr r10 /* restore DR */
  748. isync
  749. blr
  750. /*
  751. * Clear pages using the dcbz instruction, which doesn't cause any
  752. * memory traffic (except to write out any cache lines which get
  753. * displaced). This only works on cacheable memory.
  754. *
  755. * void clear_pages(void *page, int order) ;
  756. */
  757. _GLOBAL(clear_pages)
  758. li r0,4096/L1_CACHE_LINE_SIZE
  759. slw r0,r0,r4
  760. mtctr r0
  761. #ifdef CONFIG_8xx
  762. li r4, 0
  763. 1: stw r4, 0(r3)
  764. stw r4, 4(r3)
  765. stw r4, 8(r3)
  766. stw r4, 12(r3)
  767. #else
  768. 1: dcbz 0,r3
  769. #endif
  770. addi r3,r3,L1_CACHE_LINE_SIZE
  771. bdnz 1b
  772. blr
  773. /*
  774. * Copy a whole page. We use the dcbz instruction on the destination
  775. * to reduce memory traffic (it eliminates the unnecessary reads of
  776. * the destination into cache). This requires that the destination
  777. * is cacheable.
  778. */
  779. #define COPY_16_BYTES \
  780. lwz r6,4(r4); \
  781. lwz r7,8(r4); \
  782. lwz r8,12(r4); \
  783. lwzu r9,16(r4); \
  784. stw r6,4(r3); \
  785. stw r7,8(r3); \
  786. stw r8,12(r3); \
  787. stwu r9,16(r3)
  788. _GLOBAL(copy_page)
  789. addi r3,r3,-4
  790. addi r4,r4,-4
  791. #ifdef CONFIG_8xx
  792. /* don't use prefetch on 8xx */
  793. li r0,4096/L1_CACHE_LINE_SIZE
  794. mtctr r0
  795. 1: COPY_16_BYTES
  796. bdnz 1b
  797. blr
  798. #else /* not 8xx, we can prefetch */
  799. li r5,4
  800. #if MAX_COPY_PREFETCH > 1
  801. li r0,MAX_COPY_PREFETCH
  802. li r11,4
  803. mtctr r0
  804. 11: dcbt r11,r4
  805. addi r11,r11,L1_CACHE_LINE_SIZE
  806. bdnz 11b
  807. #else /* MAX_COPY_PREFETCH == 1 */
  808. dcbt r5,r4
  809. li r11,L1_CACHE_LINE_SIZE+4
  810. #endif /* MAX_COPY_PREFETCH */
  811. li r0,4096/L1_CACHE_LINE_SIZE - MAX_COPY_PREFETCH
  812. crclr 4*cr0+eq
  813. 2:
  814. mtctr r0
  815. 1:
  816. dcbt r11,r4
  817. dcbz r5,r3
  818. COPY_16_BYTES
  819. #if L1_CACHE_LINE_SIZE >= 32
  820. COPY_16_BYTES
  821. #if L1_CACHE_LINE_SIZE >= 64
  822. COPY_16_BYTES
  823. COPY_16_BYTES
  824. #if L1_CACHE_LINE_SIZE >= 128
  825. COPY_16_BYTES
  826. COPY_16_BYTES
  827. COPY_16_BYTES
  828. COPY_16_BYTES
  829. #endif
  830. #endif
  831. #endif
  832. bdnz 1b
  833. beqlr
  834. crnot 4*cr0+eq,4*cr0+eq
  835. li r0,MAX_COPY_PREFETCH
  836. li r11,4
  837. b 2b
  838. #endif /* CONFIG_8xx */
  839. /*
  840. * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
  841. * void atomic_set_mask(atomic_t mask, atomic_t *addr);
  842. */
  843. _GLOBAL(atomic_clear_mask)
  844. 10: lwarx r5,0,r4
  845. andc r5,r5,r3
  846. PPC405_ERR77(0,r4)
  847. stwcx. r5,0,r4
  848. bne- 10b
  849. blr
  850. _GLOBAL(atomic_set_mask)
  851. 10: lwarx r5,0,r4
  852. or r5,r5,r3
  853. PPC405_ERR77(0,r4)
  854. stwcx. r5,0,r4
  855. bne- 10b
  856. blr
  857. /*
  858. * I/O string operations
  859. *
  860. * insb(port, buf, len)
  861. * outsb(port, buf, len)
  862. * insw(port, buf, len)
  863. * outsw(port, buf, len)
  864. * insl(port, buf, len)
  865. * outsl(port, buf, len)
  866. * insw_ns(port, buf, len)
  867. * outsw_ns(port, buf, len)
  868. * insl_ns(port, buf, len)
  869. * outsl_ns(port, buf, len)
  870. *
  871. * The *_ns versions don't do byte-swapping.
  872. */
  873. _GLOBAL(_insb)
  874. cmpwi 0,r5,0
  875. mtctr r5
  876. subi r4,r4,1
  877. blelr-
  878. 00: lbz r5,0(r3)
  879. eieio
  880. stbu r5,1(r4)
  881. bdnz 00b
  882. blr
  883. _GLOBAL(_outsb)
  884. cmpwi 0,r5,0
  885. mtctr r5
  886. subi r4,r4,1
  887. blelr-
  888. 00: lbzu r5,1(r4)
  889. stb r5,0(r3)
  890. eieio
  891. bdnz 00b
  892. blr
  893. _GLOBAL(_insw)
  894. cmpwi 0,r5,0
  895. mtctr r5
  896. subi r4,r4,2
  897. blelr-
  898. 00: lhbrx r5,0,r3
  899. eieio
  900. sthu r5,2(r4)
  901. bdnz 00b
  902. blr
  903. _GLOBAL(_outsw)
  904. cmpwi 0,r5,0
  905. mtctr r5
  906. subi r4,r4,2
  907. blelr-
  908. 00: lhzu r5,2(r4)
  909. eieio
  910. sthbrx r5,0,r3
  911. bdnz 00b
  912. blr
  913. _GLOBAL(_insl)
  914. cmpwi 0,r5,0
  915. mtctr r5
  916. subi r4,r4,4
  917. blelr-
  918. 00: lwbrx r5,0,r3
  919. eieio
  920. stwu r5,4(r4)
  921. bdnz 00b
  922. blr
  923. _GLOBAL(_outsl)
  924. cmpwi 0,r5,0
  925. mtctr r5
  926. subi r4,r4,4
  927. blelr-
  928. 00: lwzu r5,4(r4)
  929. stwbrx r5,0,r3
  930. eieio
  931. bdnz 00b
  932. blr
  933. _GLOBAL(__ide_mm_insw)
  934. _GLOBAL(_insw_ns)
  935. cmpwi 0,r5,0
  936. mtctr r5
  937. subi r4,r4,2
  938. blelr-
  939. 00: lhz r5,0(r3)
  940. eieio
  941. sthu r5,2(r4)
  942. bdnz 00b
  943. blr
  944. _GLOBAL(__ide_mm_outsw)
  945. _GLOBAL(_outsw_ns)
  946. cmpwi 0,r5,0
  947. mtctr r5
  948. subi r4,r4,2
  949. blelr-
  950. 00: lhzu r5,2(r4)
  951. sth r5,0(r3)
  952. eieio
  953. bdnz 00b
  954. blr
  955. _GLOBAL(__ide_mm_insl)
  956. _GLOBAL(_insl_ns)
  957. cmpwi 0,r5,0
  958. mtctr r5
  959. subi r4,r4,4
  960. blelr-
  961. 00: lwz r5,0(r3)
  962. eieio
  963. stwu r5,4(r4)
  964. bdnz 00b
  965. blr
  966. _GLOBAL(__ide_mm_outsl)
  967. _GLOBAL(_outsl_ns)
  968. cmpwi 0,r5,0
  969. mtctr r5
  970. subi r4,r4,4
  971. blelr-
  972. 00: lwzu r5,4(r4)
  973. stw r5,0(r3)
  974. eieio
  975. bdnz 00b
  976. blr
  977. /*
  978. * Extended precision shifts.
  979. *
  980. * Updated to be valid for shift counts from 0 to 63 inclusive.
  981. * -- Gabriel
  982. *
  983. * R3/R4 has 64 bit value
  984. * R5 has shift count
  985. * result in R3/R4
  986. *
  987. * ashrdi3: arithmetic right shift (sign propagation)
  988. * lshrdi3: logical right shift
  989. * ashldi3: left shift
  990. */
  991. _GLOBAL(__ashrdi3)
  992. subfic r6,r5,32
  993. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  994. addi r7,r5,32 # could be xori, or addi with -32
  995. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  996. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  997. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  998. or r4,r4,r6 # LSW |= t1
  999. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  1000. sraw r3,r3,r5 # MSW = MSW >> count
  1001. or r4,r4,r7 # LSW |= t2
  1002. blr
  1003. _GLOBAL(__ashldi3)
  1004. subfic r6,r5,32
  1005. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  1006. addi r7,r5,32 # could be xori, or addi with -32
  1007. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  1008. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  1009. or r3,r3,r6 # MSW |= t1
  1010. slw r4,r4,r5 # LSW = LSW << count
  1011. or r3,r3,r7 # MSW |= t2
  1012. blr
  1013. _GLOBAL(__lshrdi3)
  1014. subfic r6,r5,32
  1015. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  1016. addi r7,r5,32 # could be xori, or addi with -32
  1017. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  1018. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  1019. or r4,r4,r6 # LSW |= t1
  1020. srw r3,r3,r5 # MSW = MSW >> count
  1021. or r4,r4,r7 # LSW |= t2
  1022. blr
  1023. _GLOBAL(abs)
  1024. srawi r4,r3,31
  1025. xor r3,r3,r4
  1026. sub r3,r3,r4
  1027. blr
  1028. _GLOBAL(_get_SP)
  1029. mr r3,r1 /* Close enough */
  1030. blr
  1031. /*
  1032. * These are used in the alignment trap handler when emulating
  1033. * single-precision loads and stores.
  1034. * We restore and save the fpscr so the task gets the same result
  1035. * and exceptions as if the cpu had performed the load or store.
  1036. */
  1037. #ifdef CONFIG_PPC_FPU
  1038. _GLOBAL(cvt_fd)
  1039. lfd 0,-4(r5) /* load up fpscr value */
  1040. mtfsf 0xff,0
  1041. lfs 0,0(r3)
  1042. stfd 0,0(r4)
  1043. mffs 0 /* save new fpscr value */
  1044. stfd 0,-4(r5)
  1045. blr
  1046. _GLOBAL(cvt_df)
  1047. lfd 0,-4(r5) /* load up fpscr value */
  1048. mtfsf 0xff,0
  1049. lfd 0,0(r3)
  1050. stfs 0,0(r4)
  1051. mffs 0 /* save new fpscr value */
  1052. stfd 0,-4(r5)
  1053. blr
  1054. #endif
  1055. /*
  1056. * Create a kernel thread
  1057. * kernel_thread(fn, arg, flags)
  1058. */
  1059. _GLOBAL(kernel_thread)
  1060. stwu r1,-16(r1)
  1061. stw r30,8(r1)
  1062. stw r31,12(r1)
  1063. mr r30,r3 /* function */
  1064. mr r31,r4 /* argument */
  1065. ori r3,r5,CLONE_VM /* flags */
  1066. oris r3,r3,CLONE_UNTRACED>>16
  1067. li r4,0 /* new sp (unused) */
  1068. li r0,__NR_clone
  1069. sc
  1070. cmpwi 0,r3,0 /* parent or child? */
  1071. bne 1f /* return if parent */
  1072. li r0,0 /* make top-level stack frame */
  1073. stwu r0,-16(r1)
  1074. mtlr r30 /* fn addr in lr */
  1075. mr r3,r31 /* load arg and call fn */
  1076. PPC440EP_ERR42
  1077. blrl
  1078. li r0,__NR_exit /* exit if function returns */
  1079. li r3,0
  1080. sc
  1081. 1: lwz r30,8(r1)
  1082. lwz r31,12(r1)
  1083. addi r1,r1,16
  1084. blr
  1085. /*
  1086. * This routine is just here to keep GCC happy - sigh...
  1087. */
  1088. _GLOBAL(__main)
  1089. blr
  1090. #define SYSCALL(name) \
  1091. _GLOBAL(name) \
  1092. li r0,__NR_##name; \
  1093. sc; \
  1094. bnslr; \
  1095. lis r4,errno@ha; \
  1096. stw r3,errno@l(r4); \
  1097. li r3,-1; \
  1098. blr
  1099. SYSCALL(execve)
  1100. /* Why isn't this a) automatic, b) written in 'C'? */
  1101. .data
  1102. .align 4
  1103. _GLOBAL(sys_call_table)
  1104. .long sys_restart_syscall /* 0 */
  1105. .long sys_exit
  1106. .long ppc_fork
  1107. .long sys_read
  1108. .long sys_write
  1109. .long sys_open /* 5 */
  1110. .long sys_close
  1111. .long sys_waitpid
  1112. .long sys_creat
  1113. .long sys_link
  1114. .long sys_unlink /* 10 */
  1115. .long sys_execve
  1116. .long sys_chdir
  1117. .long sys_time
  1118. .long sys_mknod
  1119. .long sys_chmod /* 15 */
  1120. .long sys_lchown
  1121. .long sys_ni_syscall /* old break syscall holder */
  1122. .long sys_stat
  1123. .long sys_lseek
  1124. .long sys_getpid /* 20 */
  1125. .long sys_mount
  1126. .long sys_oldumount
  1127. .long sys_setuid
  1128. .long sys_getuid
  1129. .long sys_stime /* 25 */
  1130. .long sys_ptrace
  1131. .long sys_alarm
  1132. .long sys_fstat
  1133. .long sys_pause
  1134. .long sys_utime /* 30 */
  1135. .long sys_ni_syscall /* old stty syscall holder */
  1136. .long sys_ni_syscall /* old gtty syscall holder */
  1137. .long sys_access
  1138. .long sys_nice
  1139. .long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
  1140. .long sys_sync
  1141. .long sys_kill
  1142. .long sys_rename
  1143. .long sys_mkdir
  1144. .long sys_rmdir /* 40 */
  1145. .long sys_dup
  1146. .long sys_pipe
  1147. .long sys_times
  1148. .long sys_ni_syscall /* old prof syscall holder */
  1149. .long sys_brk /* 45 */
  1150. .long sys_setgid
  1151. .long sys_getgid
  1152. .long sys_signal
  1153. .long sys_geteuid
  1154. .long sys_getegid /* 50 */
  1155. .long sys_acct
  1156. .long sys_umount /* recycled never used phys() */
  1157. .long sys_ni_syscall /* old lock syscall holder */
  1158. .long sys_ioctl
  1159. .long sys_fcntl /* 55 */
  1160. .long sys_ni_syscall /* old mpx syscall holder */
  1161. .long sys_setpgid
  1162. .long sys_ni_syscall /* old ulimit syscall holder */
  1163. .long sys_olduname
  1164. .long sys_umask /* 60 */
  1165. .long sys_chroot
  1166. .long sys_ustat
  1167. .long sys_dup2
  1168. .long sys_getppid
  1169. .long sys_getpgrp /* 65 */
  1170. .long sys_setsid
  1171. .long sys_sigaction
  1172. .long sys_sgetmask
  1173. .long sys_ssetmask
  1174. .long sys_setreuid /* 70 */
  1175. .long sys_setregid
  1176. .long ppc_sigsuspend
  1177. .long sys_sigpending
  1178. .long sys_sethostname
  1179. .long sys_setrlimit /* 75 */
  1180. .long sys_old_getrlimit
  1181. .long sys_getrusage
  1182. .long sys_gettimeofday
  1183. .long sys_settimeofday
  1184. .long sys_getgroups /* 80 */
  1185. .long sys_setgroups
  1186. .long ppc_select
  1187. .long sys_symlink
  1188. .long sys_lstat
  1189. .long sys_readlink /* 85 */
  1190. .long sys_uselib
  1191. .long sys_swapon
  1192. .long sys_reboot
  1193. .long old_readdir
  1194. .long sys_mmap /* 90 */
  1195. .long sys_munmap
  1196. .long sys_truncate
  1197. .long sys_ftruncate
  1198. .long sys_fchmod
  1199. .long sys_fchown /* 95 */
  1200. .long sys_getpriority
  1201. .long sys_setpriority
  1202. .long sys_ni_syscall /* old profil syscall holder */
  1203. .long sys_statfs
  1204. .long sys_fstatfs /* 100 */
  1205. .long sys_ni_syscall
  1206. .long sys_socketcall
  1207. .long sys_syslog
  1208. .long sys_setitimer
  1209. .long sys_getitimer /* 105 */
  1210. .long sys_newstat
  1211. .long sys_newlstat
  1212. .long sys_newfstat
  1213. .long sys_uname
  1214. .long sys_ni_syscall /* 110 */
  1215. .long sys_vhangup
  1216. .long sys_ni_syscall /* old 'idle' syscall */
  1217. .long sys_ni_syscall
  1218. .long sys_wait4
  1219. .long sys_swapoff /* 115 */
  1220. .long sys_sysinfo
  1221. .long sys_ipc
  1222. .long sys_fsync
  1223. .long sys_sigreturn
  1224. .long ppc_clone /* 120 */
  1225. .long sys_setdomainname
  1226. .long sys_newuname
  1227. .long sys_ni_syscall
  1228. .long sys_adjtimex
  1229. .long sys_mprotect /* 125 */
  1230. .long sys_sigprocmask
  1231. .long sys_ni_syscall /* old sys_create_module */
  1232. .long sys_init_module
  1233. .long sys_delete_module
  1234. .long sys_ni_syscall /* old sys_get_kernel_syms */ /* 130 */
  1235. .long sys_quotactl
  1236. .long sys_getpgid
  1237. .long sys_fchdir
  1238. .long sys_bdflush
  1239. .long sys_sysfs /* 135 */
  1240. .long sys_personality
  1241. .long sys_ni_syscall /* for afs_syscall */
  1242. .long sys_setfsuid
  1243. .long sys_setfsgid
  1244. .long sys_llseek /* 140 */
  1245. .long sys_getdents
  1246. .long ppc_select
  1247. .long sys_flock
  1248. .long sys_msync
  1249. .long sys_readv /* 145 */
  1250. .long sys_writev
  1251. .long sys_getsid
  1252. .long sys_fdatasync
  1253. .long sys_sysctl
  1254. .long sys_mlock /* 150 */
  1255. .long sys_munlock
  1256. .long sys_mlockall
  1257. .long sys_munlockall
  1258. .long sys_sched_setparam
  1259. .long sys_sched_getparam /* 155 */
  1260. .long sys_sched_setscheduler
  1261. .long sys_sched_getscheduler
  1262. .long sys_sched_yield
  1263. .long sys_sched_get_priority_max
  1264. .long sys_sched_get_priority_min /* 160 */
  1265. .long sys_sched_rr_get_interval
  1266. .long sys_nanosleep
  1267. .long sys_mremap
  1268. .long sys_setresuid
  1269. .long sys_getresuid /* 165 */
  1270. .long sys_ni_syscall /* old sys_query_module */
  1271. .long sys_poll
  1272. .long sys_nfsservctl
  1273. .long sys_setresgid
  1274. .long sys_getresgid /* 170 */
  1275. .long sys_prctl
  1276. .long sys_rt_sigreturn
  1277. .long sys_rt_sigaction
  1278. .long sys_rt_sigprocmask
  1279. .long sys_rt_sigpending /* 175 */
  1280. .long sys_rt_sigtimedwait
  1281. .long sys_rt_sigqueueinfo
  1282. .long ppc_rt_sigsuspend
  1283. .long sys_pread64
  1284. .long sys_pwrite64 /* 180 */
  1285. .long sys_chown
  1286. .long sys_getcwd
  1287. .long sys_capget
  1288. .long sys_capset
  1289. .long sys_sigaltstack /* 185 */
  1290. .long sys_sendfile
  1291. .long sys_ni_syscall /* streams1 */
  1292. .long sys_ni_syscall /* streams2 */
  1293. .long ppc_vfork
  1294. .long sys_getrlimit /* 190 */
  1295. .long sys_readahead
  1296. .long sys_mmap2
  1297. .long sys_truncate64
  1298. .long sys_ftruncate64
  1299. .long sys_stat64 /* 195 */
  1300. .long sys_lstat64
  1301. .long sys_fstat64
  1302. .long sys_pciconfig_read
  1303. .long sys_pciconfig_write
  1304. .long sys_pciconfig_iobase /* 200 */
  1305. .long sys_ni_syscall /* 201 - reserved - MacOnLinux - new */
  1306. .long sys_getdents64
  1307. .long sys_pivot_root
  1308. .long sys_fcntl64
  1309. .long sys_madvise /* 205 */
  1310. .long sys_mincore
  1311. .long sys_gettid
  1312. .long sys_tkill
  1313. .long sys_setxattr
  1314. .long sys_lsetxattr /* 210 */
  1315. .long sys_fsetxattr
  1316. .long sys_getxattr
  1317. .long sys_lgetxattr
  1318. .long sys_fgetxattr
  1319. .long sys_listxattr /* 215 */
  1320. .long sys_llistxattr
  1321. .long sys_flistxattr
  1322. .long sys_removexattr
  1323. .long sys_lremovexattr
  1324. .long sys_fremovexattr /* 220 */
  1325. .long sys_futex
  1326. .long sys_sched_setaffinity
  1327. .long sys_sched_getaffinity
  1328. .long sys_ni_syscall
  1329. .long sys_ni_syscall /* 225 - reserved for Tux */
  1330. .long sys_sendfile64
  1331. .long sys_io_setup
  1332. .long sys_io_destroy
  1333. .long sys_io_getevents
  1334. .long sys_io_submit /* 230 */
  1335. .long sys_io_cancel
  1336. .long sys_set_tid_address
  1337. .long sys_fadvise64
  1338. .long sys_exit_group
  1339. .long sys_lookup_dcookie /* 235 */
  1340. .long sys_epoll_create
  1341. .long sys_epoll_ctl
  1342. .long sys_epoll_wait
  1343. .long sys_remap_file_pages
  1344. .long sys_timer_create /* 240 */
  1345. .long sys_timer_settime
  1346. .long sys_timer_gettime
  1347. .long sys_timer_getoverrun
  1348. .long sys_timer_delete
  1349. .long sys_clock_settime /* 245 */
  1350. .long sys_clock_gettime
  1351. .long sys_clock_getres
  1352. .long sys_clock_nanosleep
  1353. .long ppc_swapcontext
  1354. .long sys_tgkill /* 250 */
  1355. .long sys_utimes
  1356. .long sys_statfs64
  1357. .long sys_fstatfs64
  1358. .long ppc_fadvise64_64
  1359. .long sys_ni_syscall /* 255 - rtas (used on ppc64) */
  1360. .long sys_debug_setcontext
  1361. .long sys_ni_syscall /* 257 reserved for vserver */
  1362. .long sys_ni_syscall /* 258 reserved for new sys_remap_file_pages */
  1363. .long sys_ni_syscall /* 259 reserved for new sys_mbind */
  1364. .long sys_ni_syscall /* 260 reserved for new sys_get_mempolicy */
  1365. .long sys_ni_syscall /* 261 reserved for new sys_set_mempolicy */
  1366. .long sys_mq_open
  1367. .long sys_mq_unlink
  1368. .long sys_mq_timedsend
  1369. .long sys_mq_timedreceive /* 265 */
  1370. .long sys_mq_notify
  1371. .long sys_mq_getsetattr
  1372. .long sys_kexec_load
  1373. .long sys_add_key
  1374. .long sys_request_key /* 270 */
  1375. .long sys_keyctl
  1376. .long sys_waitid
  1377. .long sys_ioprio_set
  1378. .long sys_ioprio_get
  1379. .long sys_inotify_init /* 275 */
  1380. .long sys_inotify_add_watch
  1381. .long sys_inotify_rm_watch