misc.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /*
  2. * This file contains miscellaneous low-level functions.
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6. * and Paul Mackerras.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/unistd.h>
  16. #include <asm/errno.h>
  17. #include <asm/processor.h>
  18. #include <asm/page.h>
  19. #include <asm/cache.h>
  20. #include <asm/cputable.h>
  21. #include <asm/mmu.h>
  22. #include <asm/ppc_asm.h>
  23. #include <asm/thread_info.h>
  24. #include <asm/asm-offsets.h>
  25. #ifdef CONFIG_8xx
  26. #define ISYNC_8xx isync
  27. #else
  28. #define ISYNC_8xx
  29. #endif
  30. .text
  31. .align 5
  32. _GLOBAL(__delay)
  33. cmpwi 0,r3,0
  34. mtctr r3
  35. beqlr
  36. 1: bdnz 1b
  37. blr
  38. /*
  39. * Returns (address we're running at) - (address we were linked at)
  40. * for use before the text and data are mapped to KERNELBASE.
  41. */
  42. _GLOBAL(reloc_offset)
  43. mflr r0
  44. bl 1f
  45. 1: mflr r3
  46. lis r4,1b@ha
  47. addi r4,r4,1b@l
  48. subf r3,r4,r3
  49. mtlr r0
  50. blr
  51. /*
  52. * add_reloc_offset(x) returns x + reloc_offset().
  53. */
  54. _GLOBAL(add_reloc_offset)
  55. mflr r0
  56. bl 1f
  57. 1: mflr r5
  58. lis r4,1b@ha
  59. addi r4,r4,1b@l
  60. subf r5,r4,r5
  61. add r3,r3,r5
  62. mtlr r0
  63. blr
  64. /*
  65. * sub_reloc_offset(x) returns x - reloc_offset().
  66. */
  67. _GLOBAL(sub_reloc_offset)
  68. mflr r0
  69. bl 1f
  70. 1: mflr r5
  71. lis r4,1b@ha
  72. addi r4,r4,1b@l
  73. subf r5,r4,r5
  74. subf r3,r5,r3
  75. mtlr r0
  76. blr
  77. /*
  78. * reloc_got2 runs through the .got2 section adding an offset
  79. * to each entry.
  80. */
  81. _GLOBAL(reloc_got2)
  82. mflr r11
  83. lis r7,__got2_start@ha
  84. addi r7,r7,__got2_start@l
  85. lis r8,__got2_end@ha
  86. addi r8,r8,__got2_end@l
  87. subf r8,r7,r8
  88. srwi. r8,r8,2
  89. beqlr
  90. mtctr r8
  91. bl 1f
  92. 1: mflr r0
  93. lis r4,1b@ha
  94. addi r4,r4,1b@l
  95. subf r0,r4,r0
  96. add r7,r0,r7
  97. 2: lwz r0,0(r7)
  98. add r0,r0,r3
  99. stw r0,0(r7)
  100. addi r7,r7,4
  101. bdnz 2b
  102. mtlr r11
  103. blr
  104. /*
  105. * call_setup_cpu - call the setup_cpu function for this cpu
  106. * r3 = data offset, r24 = cpu number
  107. *
  108. * Setup function is called with:
  109. * r3 = data offset
  110. * r4 = ptr to CPU spec (relocated)
  111. */
  112. _GLOBAL(call_setup_cpu)
  113. addis r4,r3,cur_cpu_spec@ha
  114. addi r4,r4,cur_cpu_spec@l
  115. lwz r4,0(r4)
  116. add r4,r4,r3
  117. lwz r5,CPU_SPEC_SETUP(r4)
  118. cmpi 0,r5,0
  119. add r5,r5,r3
  120. beqlr
  121. mtctr r5
  122. bctr
  123. /*
  124. * complement mask on the msr then "or" some values on.
  125. * _nmask_and_or_msr(nmask, value_to_or)
  126. */
  127. _GLOBAL(_nmask_and_or_msr)
  128. mfmsr r0 /* Get current msr */
  129. andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
  130. or r0,r0,r4 /* Or on the bits in r4 (second parm) */
  131. SYNC /* Some chip revs have problems here... */
  132. mtmsr r0 /* Update machine state */
  133. isync
  134. blr /* Done */
  135. /*
  136. * Flush MMU TLB
  137. */
  138. _GLOBAL(_tlbia)
  139. #if defined(CONFIG_40x)
  140. sync /* Flush to memory before changing mapping */
  141. tlbia
  142. isync /* Flush shadow TLB */
  143. #elif defined(CONFIG_44x)
  144. li r3,0
  145. sync
  146. /* Load high watermark */
  147. lis r4,tlb_44x_hwater@ha
  148. lwz r5,tlb_44x_hwater@l(r4)
  149. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  150. addi r3,r3,1
  151. cmpw 0,r3,r5
  152. ble 1b
  153. isync
  154. #elif defined(CONFIG_FSL_BOOKE)
  155. /* Invalidate all entries in TLB0 */
  156. li r3, 0x04
  157. tlbivax 0,3
  158. /* Invalidate all entries in TLB1 */
  159. li r3, 0x0c
  160. tlbivax 0,3
  161. /* Invalidate all entries in TLB2 */
  162. li r3, 0x14
  163. tlbivax 0,3
  164. /* Invalidate all entries in TLB3 */
  165. li r3, 0x1c
  166. tlbivax 0,3
  167. msync
  168. #ifdef CONFIG_SMP
  169. tlbsync
  170. #endif /* CONFIG_SMP */
  171. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  172. #if defined(CONFIG_SMP)
  173. rlwinm r8,r1,0,0,18
  174. lwz r8,TI_CPU(r8)
  175. oris r8,r8,10
  176. mfmsr r10
  177. SYNC
  178. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  179. rlwinm r0,r0,0,28,26 /* clear DR */
  180. mtmsr r0
  181. SYNC_601
  182. isync
  183. lis r9,mmu_hash_lock@h
  184. ori r9,r9,mmu_hash_lock@l
  185. tophys(r9,r9)
  186. 10: lwarx r7,0,r9
  187. cmpwi 0,r7,0
  188. bne- 10b
  189. stwcx. r8,0,r9
  190. bne- 10b
  191. sync
  192. tlbia
  193. sync
  194. TLBSYNC
  195. li r0,0
  196. stw r0,0(r9) /* clear mmu_hash_lock */
  197. mtmsr r10
  198. SYNC_601
  199. isync
  200. #else /* CONFIG_SMP */
  201. sync
  202. tlbia
  203. sync
  204. #endif /* CONFIG_SMP */
  205. #endif /* ! defined(CONFIG_40x) */
  206. blr
  207. /*
  208. * Flush MMU TLB for a particular address
  209. */
  210. _GLOBAL(_tlbie)
  211. #if defined(CONFIG_40x)
  212. tlbsx. r3, 0, r3
  213. bne 10f
  214. sync
  215. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
  216. * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
  217. * the TLB entry. */
  218. tlbwe r3, r3, TLB_TAG
  219. isync
  220. 10:
  221. #elif defined(CONFIG_44x)
  222. mfspr r4,SPRN_MMUCR
  223. mfspr r5,SPRN_PID /* Get PID */
  224. rlwimi r4,r5,0,24,31 /* Set TID */
  225. /* We have to run the search with interrupts disabled, even critical
  226. * and debug interrupts (in fact the only critical exceptions we have
  227. * are debug and machine check). Otherwise an interrupt which causes
  228. * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
  229. mfmsr r5
  230. lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
  231. addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
  232. andc r6,r5,r6
  233. mtmsr r6
  234. mtspr SPRN_MMUCR,r4
  235. tlbsx. r3, 0, r3
  236. mtmsr r5
  237. bne 10f
  238. sync
  239. /* There are only 64 TLB entries, so r3 < 64,
  240. * which means bit 22, is clear. Since 22 is
  241. * the V bit in the TLB_PAGEID, loading this
  242. * value will invalidate the TLB entry.
  243. */
  244. tlbwe r3, r3, PPC44x_TLB_PAGEID
  245. isync
  246. 10:
  247. #elif defined(CONFIG_FSL_BOOKE)
  248. rlwinm r4, r3, 0, 0, 19
  249. ori r5, r4, 0x08 /* TLBSEL = 1 */
  250. ori r6, r4, 0x10 /* TLBSEL = 2 */
  251. ori r7, r4, 0x18 /* TLBSEL = 3 */
  252. tlbivax 0, r4
  253. tlbivax 0, r5
  254. tlbivax 0, r6
  255. tlbivax 0, r7
  256. msync
  257. #if defined(CONFIG_SMP)
  258. tlbsync
  259. #endif /* CONFIG_SMP */
  260. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  261. #if defined(CONFIG_SMP)
  262. rlwinm r8,r1,0,0,18
  263. lwz r8,TI_CPU(r8)
  264. oris r8,r8,11
  265. mfmsr r10
  266. SYNC
  267. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  268. rlwinm r0,r0,0,28,26 /* clear DR */
  269. mtmsr r0
  270. SYNC_601
  271. isync
  272. lis r9,mmu_hash_lock@h
  273. ori r9,r9,mmu_hash_lock@l
  274. tophys(r9,r9)
  275. 10: lwarx r7,0,r9
  276. cmpwi 0,r7,0
  277. bne- 10b
  278. stwcx. r8,0,r9
  279. bne- 10b
  280. eieio
  281. tlbie r3
  282. sync
  283. TLBSYNC
  284. li r0,0
  285. stw r0,0(r9) /* clear mmu_hash_lock */
  286. mtmsr r10
  287. SYNC_601
  288. isync
  289. #else /* CONFIG_SMP */
  290. tlbie r3
  291. sync
  292. #endif /* CONFIG_SMP */
  293. #endif /* ! CONFIG_40x */
  294. blr
  295. /*
  296. * Flush instruction cache.
  297. * This is a no-op on the 601.
  298. */
  299. _GLOBAL(flush_instruction_cache)
  300. #if defined(CONFIG_8xx)
  301. isync
  302. lis r5, IDC_INVALL@h
  303. mtspr SPRN_IC_CST, r5
  304. #elif defined(CONFIG_4xx)
  305. #ifdef CONFIG_403GCX
  306. li r3, 512
  307. mtctr r3
  308. lis r4, KERNELBASE@h
  309. 1: iccci 0, r4
  310. addi r4, r4, 16
  311. bdnz 1b
  312. #else
  313. lis r3, KERNELBASE@h
  314. iccci 0,r3
  315. #endif
  316. #elif CONFIG_FSL_BOOKE
  317. BEGIN_FTR_SECTION
  318. mfspr r3,SPRN_L1CSR0
  319. ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
  320. /* msync; isync recommended here */
  321. mtspr SPRN_L1CSR0,r3
  322. isync
  323. blr
  324. END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
  325. mfspr r3,SPRN_L1CSR1
  326. ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
  327. mtspr SPRN_L1CSR1,r3
  328. #else
  329. mfspr r3,SPRN_PVR
  330. rlwinm r3,r3,16,16,31
  331. cmpwi 0,r3,1
  332. beqlr /* for 601, do nothing */
  333. /* 603/604 processor - use invalidate-all bit in HID0 */
  334. mfspr r3,SPRN_HID0
  335. ori r3,r3,HID0_ICFI
  336. mtspr SPRN_HID0,r3
  337. #endif /* CONFIG_8xx/4xx */
  338. isync
  339. blr
  340. /*
  341. * Write any modified data cache blocks out to memory
  342. * and invalidate the corresponding instruction cache blocks.
  343. * This is a no-op on the 601.
  344. *
  345. * __flush_icache_range(unsigned long start, unsigned long stop)
  346. */
  347. _GLOBAL(__flush_icache_range)
  348. BEGIN_FTR_SECTION
  349. blr /* for 601, do nothing */
  350. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  351. li r5,L1_CACHE_BYTES-1
  352. andc r3,r3,r5
  353. subf r4,r3,r4
  354. add r4,r4,r5
  355. srwi. r4,r4,L1_CACHE_SHIFT
  356. beqlr
  357. mtctr r4
  358. mr r6,r3
  359. 1: dcbst 0,r3
  360. addi r3,r3,L1_CACHE_BYTES
  361. bdnz 1b
  362. sync /* wait for dcbst's to get to ram */
  363. mtctr r4
  364. 2: icbi 0,r6
  365. addi r6,r6,L1_CACHE_BYTES
  366. bdnz 2b
  367. sync /* additional sync needed on g4 */
  368. isync
  369. blr
  370. /*
  371. * Write any modified data cache blocks out to memory.
  372. * Does not invalidate the corresponding cache lines (especially for
  373. * any corresponding instruction cache).
  374. *
  375. * clean_dcache_range(unsigned long start, unsigned long stop)
  376. */
  377. _GLOBAL(clean_dcache_range)
  378. li r5,L1_CACHE_BYTES-1
  379. andc r3,r3,r5
  380. subf r4,r3,r4
  381. add r4,r4,r5
  382. srwi. r4,r4,L1_CACHE_SHIFT
  383. beqlr
  384. mtctr r4
  385. 1: dcbst 0,r3
  386. addi r3,r3,L1_CACHE_BYTES
  387. bdnz 1b
  388. sync /* wait for dcbst's to get to ram */
  389. blr
  390. /*
  391. * Write any modified data cache blocks out to memory and invalidate them.
  392. * Does not invalidate the corresponding instruction cache blocks.
  393. *
  394. * flush_dcache_range(unsigned long start, unsigned long stop)
  395. */
  396. _GLOBAL(flush_dcache_range)
  397. li r5,L1_CACHE_BYTES-1
  398. andc r3,r3,r5
  399. subf r4,r3,r4
  400. add r4,r4,r5
  401. srwi. r4,r4,L1_CACHE_SHIFT
  402. beqlr
  403. mtctr r4
  404. 1: dcbf 0,r3
  405. addi r3,r3,L1_CACHE_BYTES
  406. bdnz 1b
  407. sync /* wait for dcbst's to get to ram */
  408. blr
  409. /*
  410. * Like above, but invalidate the D-cache. This is used by the 8xx
  411. * to invalidate the cache so the PPC core doesn't get stale data
  412. * from the CPM (no cache snooping here :-).
  413. *
  414. * invalidate_dcache_range(unsigned long start, unsigned long stop)
  415. */
  416. _GLOBAL(invalidate_dcache_range)
  417. li r5,L1_CACHE_BYTES-1
  418. andc r3,r3,r5
  419. subf r4,r3,r4
  420. add r4,r4,r5
  421. srwi. r4,r4,L1_CACHE_SHIFT
  422. beqlr
  423. mtctr r4
  424. 1: dcbi 0,r3
  425. addi r3,r3,L1_CACHE_BYTES
  426. bdnz 1b
  427. sync /* wait for dcbi's to get to ram */
  428. blr
  429. #ifdef CONFIG_NOT_COHERENT_CACHE
  430. /*
  431. * 40x cores have 8K or 16K dcache and 32 byte line size.
  432. * 44x has a 32K dcache and 32 byte line size.
  433. * 8xx has 1, 2, 4, 8K variants.
  434. * For now, cover the worst case of the 44x.
  435. * Must be called with external interrupts disabled.
  436. */
  437. #define CACHE_NWAYS 64
  438. #define CACHE_NLINES 16
  439. _GLOBAL(flush_dcache_all)
  440. li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
  441. mtctr r4
  442. lis r5, KERNELBASE@h
  443. 1: lwz r3, 0(r5) /* Load one word from every line */
  444. addi r5, r5, L1_CACHE_BYTES
  445. bdnz 1b
  446. blr
  447. #endif /* CONFIG_NOT_COHERENT_CACHE */
  448. /*
  449. * Flush a particular page from the data cache to RAM.
  450. * Note: this is necessary because the instruction cache does *not*
  451. * snoop from the data cache.
  452. * This is a no-op on the 601 which has a unified cache.
  453. *
  454. * void __flush_dcache_icache(void *page)
  455. */
  456. _GLOBAL(__flush_dcache_icache)
  457. BEGIN_FTR_SECTION
  458. blr /* for 601, do nothing */
  459. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  460. rlwinm r3,r3,0,0,19 /* Get page base address */
  461. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  462. mtctr r4
  463. mr r6,r3
  464. 0: dcbst 0,r3 /* Write line to ram */
  465. addi r3,r3,L1_CACHE_BYTES
  466. bdnz 0b
  467. sync
  468. mtctr r4
  469. 1: icbi 0,r6
  470. addi r6,r6,L1_CACHE_BYTES
  471. bdnz 1b
  472. sync
  473. isync
  474. blr
  475. /*
  476. * Flush a particular page from the data cache to RAM, identified
  477. * by its physical address. We turn off the MMU so we can just use
  478. * the physical address (this may be a highmem page without a kernel
  479. * mapping).
  480. *
  481. * void __flush_dcache_icache_phys(unsigned long physaddr)
  482. */
  483. _GLOBAL(__flush_dcache_icache_phys)
  484. BEGIN_FTR_SECTION
  485. blr /* for 601, do nothing */
  486. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  487. mfmsr r10
  488. rlwinm r0,r10,0,28,26 /* clear DR */
  489. mtmsr r0
  490. isync
  491. rlwinm r3,r3,0,0,19 /* Get page base address */
  492. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  493. mtctr r4
  494. mr r6,r3
  495. 0: dcbst 0,r3 /* Write line to ram */
  496. addi r3,r3,L1_CACHE_BYTES
  497. bdnz 0b
  498. sync
  499. mtctr r4
  500. 1: icbi 0,r6
  501. addi r6,r6,L1_CACHE_BYTES
  502. bdnz 1b
  503. sync
  504. mtmsr r10 /* restore DR */
  505. isync
  506. blr
  507. /*
  508. * Clear pages using the dcbz instruction, which doesn't cause any
  509. * memory traffic (except to write out any cache lines which get
  510. * displaced). This only works on cacheable memory.
  511. *
  512. * void clear_pages(void *page, int order) ;
  513. */
  514. _GLOBAL(clear_pages)
  515. li r0,4096/L1_CACHE_BYTES
  516. slw r0,r0,r4
  517. mtctr r0
  518. #ifdef CONFIG_8xx
  519. li r4, 0
  520. 1: stw r4, 0(r3)
  521. stw r4, 4(r3)
  522. stw r4, 8(r3)
  523. stw r4, 12(r3)
  524. #else
  525. 1: dcbz 0,r3
  526. #endif
  527. addi r3,r3,L1_CACHE_BYTES
  528. bdnz 1b
  529. blr
  530. /*
  531. * Copy a whole page. We use the dcbz instruction on the destination
  532. * to reduce memory traffic (it eliminates the unnecessary reads of
  533. * the destination into cache). This requires that the destination
  534. * is cacheable.
  535. */
  536. #define COPY_16_BYTES \
  537. lwz r6,4(r4); \
  538. lwz r7,8(r4); \
  539. lwz r8,12(r4); \
  540. lwzu r9,16(r4); \
  541. stw r6,4(r3); \
  542. stw r7,8(r3); \
  543. stw r8,12(r3); \
  544. stwu r9,16(r3)
  545. _GLOBAL(copy_page)
  546. addi r3,r3,-4
  547. addi r4,r4,-4
  548. #ifdef CONFIG_8xx
  549. /* don't use prefetch on 8xx */
  550. li r0,4096/L1_CACHE_BYTES
  551. mtctr r0
  552. 1: COPY_16_BYTES
  553. bdnz 1b
  554. blr
  555. #else /* not 8xx, we can prefetch */
  556. li r5,4
  557. #if MAX_COPY_PREFETCH > 1
  558. li r0,MAX_COPY_PREFETCH
  559. li r11,4
  560. mtctr r0
  561. 11: dcbt r11,r4
  562. addi r11,r11,L1_CACHE_BYTES
  563. bdnz 11b
  564. #else /* MAX_COPY_PREFETCH == 1 */
  565. dcbt r5,r4
  566. li r11,L1_CACHE_BYTES+4
  567. #endif /* MAX_COPY_PREFETCH */
  568. li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
  569. crclr 4*cr0+eq
  570. 2:
  571. mtctr r0
  572. 1:
  573. dcbt r11,r4
  574. dcbz r5,r3
  575. COPY_16_BYTES
  576. #if L1_CACHE_BYTES >= 32
  577. COPY_16_BYTES
  578. #if L1_CACHE_BYTES >= 64
  579. COPY_16_BYTES
  580. COPY_16_BYTES
  581. #if L1_CACHE_BYTES >= 128
  582. COPY_16_BYTES
  583. COPY_16_BYTES
  584. COPY_16_BYTES
  585. COPY_16_BYTES
  586. #endif
  587. #endif
  588. #endif
  589. bdnz 1b
  590. beqlr
  591. crnot 4*cr0+eq,4*cr0+eq
  592. li r0,MAX_COPY_PREFETCH
  593. li r11,4
  594. b 2b
  595. #endif /* CONFIG_8xx */
  596. /*
  597. * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
  598. * void atomic_set_mask(atomic_t mask, atomic_t *addr);
  599. */
  600. _GLOBAL(atomic_clear_mask)
  601. 10: lwarx r5,0,r4
  602. andc r5,r5,r3
  603. PPC405_ERR77(0,r4)
  604. stwcx. r5,0,r4
  605. bne- 10b
  606. blr
  607. _GLOBAL(atomic_set_mask)
  608. 10: lwarx r5,0,r4
  609. or r5,r5,r3
  610. PPC405_ERR77(0,r4)
  611. stwcx. r5,0,r4
  612. bne- 10b
  613. blr
  614. /*
  615. * I/O string operations
  616. *
  617. * insb(port, buf, len)
  618. * outsb(port, buf, len)
  619. * insw(port, buf, len)
  620. * outsw(port, buf, len)
  621. * insl(port, buf, len)
  622. * outsl(port, buf, len)
  623. * insw_ns(port, buf, len)
  624. * outsw_ns(port, buf, len)
  625. * insl_ns(port, buf, len)
  626. * outsl_ns(port, buf, len)
  627. *
  628. * The *_ns versions don't do byte-swapping.
  629. */
  630. _GLOBAL(_insb)
  631. cmpwi 0,r5,0
  632. mtctr r5
  633. subi r4,r4,1
  634. blelr-
  635. 00: lbz r5,0(r3)
  636. 01: eieio
  637. 02: stbu r5,1(r4)
  638. ISYNC_8xx
  639. .section .fixup,"ax"
  640. 03: blr
  641. .text
  642. .section __ex_table, "a"
  643. .align 2
  644. .long 00b, 03b
  645. .long 01b, 03b
  646. .long 02b, 03b
  647. .text
  648. bdnz 00b
  649. blr
  650. _GLOBAL(_outsb)
  651. cmpwi 0,r5,0
  652. mtctr r5
  653. subi r4,r4,1
  654. blelr-
  655. 00: lbzu r5,1(r4)
  656. 01: stb r5,0(r3)
  657. 02: eieio
  658. ISYNC_8xx
  659. .section .fixup,"ax"
  660. 03: blr
  661. .text
  662. .section __ex_table, "a"
  663. .align 2
  664. .long 00b, 03b
  665. .long 01b, 03b
  666. .long 02b, 03b
  667. .text
  668. bdnz 00b
  669. blr
  670. _GLOBAL(_insw_ns)
  671. cmpwi 0,r5,0
  672. mtctr r5
  673. subi r4,r4,2
  674. blelr-
  675. 00: lhz r5,0(r3)
  676. 01: eieio
  677. 02: sthu r5,2(r4)
  678. ISYNC_8xx
  679. .section .fixup,"ax"
  680. 03: blr
  681. .text
  682. .section __ex_table, "a"
  683. .align 2
  684. .long 00b, 03b
  685. .long 01b, 03b
  686. .long 02b, 03b
  687. .text
  688. bdnz 00b
  689. blr
  690. _GLOBAL(_outsw_ns)
  691. cmpwi 0,r5,0
  692. mtctr r5
  693. subi r4,r4,2
  694. blelr-
  695. 00: lhzu r5,2(r4)
  696. 01: sth r5,0(r3)
  697. 02: eieio
  698. ISYNC_8xx
  699. .section .fixup,"ax"
  700. 03: blr
  701. .text
  702. .section __ex_table, "a"
  703. .align 2
  704. .long 00b, 03b
  705. .long 01b, 03b
  706. .long 02b, 03b
  707. .text
  708. bdnz 00b
  709. blr
  710. _GLOBAL(_insl_ns)
  711. cmpwi 0,r5,0
  712. mtctr r5
  713. subi r4,r4,4
  714. blelr-
  715. 00: lwz r5,0(r3)
  716. 01: eieio
  717. 02: stwu r5,4(r4)
  718. ISYNC_8xx
  719. .section .fixup,"ax"
  720. 03: blr
  721. .text
  722. .section __ex_table, "a"
  723. .align 2
  724. .long 00b, 03b
  725. .long 01b, 03b
  726. .long 02b, 03b
  727. .text
  728. bdnz 00b
  729. blr
  730. _GLOBAL(_outsl_ns)
  731. cmpwi 0,r5,0
  732. mtctr r5
  733. subi r4,r4,4
  734. blelr-
  735. 00: lwzu r5,4(r4)
  736. 01: stw r5,0(r3)
  737. 02: eieio
  738. ISYNC_8xx
  739. .section .fixup,"ax"
  740. 03: blr
  741. .text
  742. .section __ex_table, "a"
  743. .align 2
  744. .long 00b, 03b
  745. .long 01b, 03b
  746. .long 02b, 03b
  747. .text
  748. bdnz 00b
  749. blr
  750. /*
  751. * Extended precision shifts.
  752. *
  753. * Updated to be valid for shift counts from 0 to 63 inclusive.
  754. * -- Gabriel
  755. *
  756. * R3/R4 has 64 bit value
  757. * R5 has shift count
  758. * result in R3/R4
  759. *
  760. * ashrdi3: arithmetic right shift (sign propagation)
  761. * lshrdi3: logical right shift
  762. * ashldi3: left shift
  763. */
  764. _GLOBAL(__ashrdi3)
  765. subfic r6,r5,32
  766. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  767. addi r7,r5,32 # could be xori, or addi with -32
  768. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  769. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  770. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  771. or r4,r4,r6 # LSW |= t1
  772. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  773. sraw r3,r3,r5 # MSW = MSW >> count
  774. or r4,r4,r7 # LSW |= t2
  775. blr
  776. _GLOBAL(__ashldi3)
  777. subfic r6,r5,32
  778. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  779. addi r7,r5,32 # could be xori, or addi with -32
  780. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  781. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  782. or r3,r3,r6 # MSW |= t1
  783. slw r4,r4,r5 # LSW = LSW << count
  784. or r3,r3,r7 # MSW |= t2
  785. blr
  786. _GLOBAL(__lshrdi3)
  787. subfic r6,r5,32
  788. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  789. addi r7,r5,32 # could be xori, or addi with -32
  790. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  791. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  792. or r4,r4,r6 # LSW |= t1
  793. srw r3,r3,r5 # MSW = MSW >> count
  794. or r4,r4,r7 # LSW |= t2
  795. blr
  796. _GLOBAL(abs)
  797. srawi r4,r3,31
  798. xor r3,r3,r4
  799. sub r3,r3,r4
  800. blr
  801. _GLOBAL(_get_SP)
  802. mr r3,r1 /* Close enough */
  803. blr
  804. /*
  805. * Create a kernel thread
  806. * kernel_thread(fn, arg, flags)
  807. */
  808. _GLOBAL(kernel_thread)
  809. stwu r1,-16(r1)
  810. stw r30,8(r1)
  811. stw r31,12(r1)
  812. mr r30,r3 /* function */
  813. mr r31,r4 /* argument */
  814. ori r3,r5,CLONE_VM /* flags */
  815. oris r3,r3,CLONE_UNTRACED>>16
  816. li r4,0 /* new sp (unused) */
  817. li r0,__NR_clone
  818. sc
  819. cmpwi 0,r3,0 /* parent or child? */
  820. bne 1f /* return if parent */
  821. li r0,0 /* make top-level stack frame */
  822. stwu r0,-16(r1)
  823. mtlr r30 /* fn addr in lr */
  824. mr r3,r31 /* load arg and call fn */
  825. PPC440EP_ERR42
  826. blrl
  827. li r0,__NR_exit /* exit if function returns */
  828. li r3,0
  829. sc
  830. 1: lwz r30,8(r1)
  831. lwz r31,12(r1)
  832. addi r1,r1,16
  833. blr
  834. _GLOBAL(kernel_execve)
  835. li r0,__NR_execve
  836. sc
  837. bnslr
  838. neg r3,r3
  839. blr
  840. /*
  841. * This routine is just here to keep GCC happy - sigh...
  842. */
  843. _GLOBAL(__main)
  844. blr