misc.S 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910
  1. /*
  2. * This file contains miscellaneous low-level functions.
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6. * and Paul Mackerras.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/unistd.h>
  16. #include <asm/errno.h>
  17. #include <asm/processor.h>
  18. #include <asm/page.h>
  19. #include <asm/cache.h>
  20. #include <asm/cputable.h>
  21. #include <asm/mmu.h>
  22. #include <asm/ppc_asm.h>
  23. #include <asm/thread_info.h>
  24. #include <asm/asm-offsets.h>
  25. #ifdef CONFIG_8xx
  26. #define ISYNC_8xx isync
  27. #else
  28. #define ISYNC_8xx
  29. #endif
  30. .text
  31. .align 5
  32. _GLOBAL(__delay)
  33. cmpwi 0,r3,0
  34. mtctr r3
  35. beqlr
  36. 1: bdnz 1b
  37. blr
  38. /*
  39. * Returns (address we're running at) - (address we were linked at)
  40. * for use before the text and data are mapped to KERNELBASE.
  41. */
  42. _GLOBAL(reloc_offset)
  43. mflr r0
  44. bl 1f
  45. 1: mflr r3
  46. lis r4,1b@ha
  47. addi r4,r4,1b@l
  48. subf r3,r4,r3
  49. mtlr r0
  50. blr
  51. /*
  52. * add_reloc_offset(x) returns x + reloc_offset().
  53. */
  54. _GLOBAL(add_reloc_offset)
  55. mflr r0
  56. bl 1f
  57. 1: mflr r5
  58. lis r4,1b@ha
  59. addi r4,r4,1b@l
  60. subf r5,r4,r5
  61. add r3,r3,r5
  62. mtlr r0
  63. blr
  64. /*
  65. * sub_reloc_offset(x) returns x - reloc_offset().
  66. */
  67. _GLOBAL(sub_reloc_offset)
  68. mflr r0
  69. bl 1f
  70. 1: mflr r5
  71. lis r4,1b@ha
  72. addi r4,r4,1b@l
  73. subf r5,r4,r5
  74. subf r3,r5,r3
  75. mtlr r0
  76. blr
  77. /*
  78. * reloc_got2 runs through the .got2 section adding an offset
  79. * to each entry.
  80. */
  81. _GLOBAL(reloc_got2)
  82. mflr r11
  83. lis r7,__got2_start@ha
  84. addi r7,r7,__got2_start@l
  85. lis r8,__got2_end@ha
  86. addi r8,r8,__got2_end@l
  87. subf r8,r7,r8
  88. srwi. r8,r8,2
  89. beqlr
  90. mtctr r8
  91. bl 1f
  92. 1: mflr r0
  93. lis r4,1b@ha
  94. addi r4,r4,1b@l
  95. subf r0,r4,r0
  96. add r7,r0,r7
  97. 2: lwz r0,0(r7)
  98. add r0,r0,r3
  99. stw r0,0(r7)
  100. addi r7,r7,4
  101. bdnz 2b
  102. mtlr r11
  103. blr
  104. /*
  105. * call_setup_cpu - call the setup_cpu function for this cpu
  106. * r3 = data offset, r24 = cpu number
  107. *
  108. * Setup function is called with:
  109. * r3 = data offset
  110. * r4 = ptr to CPU spec (relocated)
  111. */
  112. _GLOBAL(call_setup_cpu)
  113. addis r4,r3,cur_cpu_spec@ha
  114. addi r4,r4,cur_cpu_spec@l
  115. lwz r4,0(r4)
  116. add r4,r4,r3
  117. lwz r5,CPU_SPEC_SETUP(r4)
  118. cmpi 0,r5,0
  119. add r5,r5,r3
  120. beqlr
  121. mtctr r5
  122. bctr
  123. /*
  124. * complement mask on the msr then "or" some values on.
  125. * _nmask_and_or_msr(nmask, value_to_or)
  126. */
  127. _GLOBAL(_nmask_and_or_msr)
  128. mfmsr r0 /* Get current msr */
  129. andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
  130. or r0,r0,r4 /* Or on the bits in r4 (second parm) */
  131. SYNC /* Some chip revs have problems here... */
  132. mtmsr r0 /* Update machine state */
  133. isync
  134. blr /* Done */
  135. /*
  136. * Flush MMU TLB
  137. */
  138. _GLOBAL(_tlbia)
  139. #if defined(CONFIG_40x)
  140. sync /* Flush to memory before changing mapping */
  141. tlbia
  142. isync /* Flush shadow TLB */
  143. #elif defined(CONFIG_44x)
  144. li r3,0
  145. sync
  146. /* Load high watermark */
  147. lis r4,tlb_44x_hwater@ha
  148. lwz r5,tlb_44x_hwater@l(r4)
  149. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  150. addi r3,r3,1
  151. cmpw 0,r3,r5
  152. ble 1b
  153. isync
  154. #elif defined(CONFIG_FSL_BOOKE)
  155. /* Invalidate all entries in TLB0 */
  156. li r3, 0x04
  157. tlbivax 0,3
  158. /* Invalidate all entries in TLB1 */
  159. li r3, 0x0c
  160. tlbivax 0,3
  161. /* Invalidate all entries in TLB2 */
  162. li r3, 0x14
  163. tlbivax 0,3
  164. /* Invalidate all entries in TLB3 */
  165. li r3, 0x1c
  166. tlbivax 0,3
  167. msync
  168. #ifdef CONFIG_SMP
  169. tlbsync
  170. #endif /* CONFIG_SMP */
  171. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  172. #if defined(CONFIG_SMP)
  173. rlwinm r8,r1,0,0,18
  174. lwz r8,TI_CPU(r8)
  175. oris r8,r8,10
  176. mfmsr r10
  177. SYNC
  178. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  179. rlwinm r0,r0,0,28,26 /* clear DR */
  180. mtmsr r0
  181. SYNC_601
  182. isync
  183. lis r9,mmu_hash_lock@h
  184. ori r9,r9,mmu_hash_lock@l
  185. tophys(r9,r9)
  186. 10: lwarx r7,0,r9
  187. cmpwi 0,r7,0
  188. bne- 10b
  189. stwcx. r8,0,r9
  190. bne- 10b
  191. sync
  192. tlbia
  193. sync
  194. TLBSYNC
  195. li r0,0
  196. stw r0,0(r9) /* clear mmu_hash_lock */
  197. mtmsr r10
  198. SYNC_601
  199. isync
  200. #else /* CONFIG_SMP */
  201. sync
  202. tlbia
  203. sync
  204. #endif /* CONFIG_SMP */
  205. #endif /* ! defined(CONFIG_40x) */
  206. blr
  207. /*
  208. * Flush MMU TLB for a particular address
  209. */
  210. _GLOBAL(_tlbie)
  211. #if defined(CONFIG_40x)
  212. /* We run the search with interrupts disabled because we have to change
  213. * the PID and I don't want to preempt when that happens.
  214. */
  215. mfmsr r5
  216. mfspr r6,SPRN_PID
  217. wrteei 0
  218. mtspr SPRN_PID,r4
  219. tlbsx. r3, 0, r3
  220. mtspr SPRN_PID,r6
  221. wrtee r5
  222. bne 10f
  223. sync
  224. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
  225. * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
  226. * the TLB entry. */
  227. tlbwe r3, r3, TLB_TAG
  228. isync
  229. 10:
  230. #elif defined(CONFIG_44x)
  231. mfspr r5,SPRN_MMUCR
  232. rlwimi r5,r4,0,24,31 /* Set TID */
  233. /* We have to run the search with interrupts disabled, even critical
  234. * and debug interrupts (in fact the only critical exceptions we have
  235. * are debug and machine check). Otherwise an interrupt which causes
  236. * a TLB miss can clobber the MMUCR between the mtspr and the tlbsx. */
  237. mfmsr r4
  238. lis r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@ha
  239. addi r6,r6,(MSR_EE|MSR_CE|MSR_ME|MSR_DE)@l
  240. andc r6,r4,r6
  241. mtmsr r6
  242. mtspr SPRN_MMUCR,r5
  243. tlbsx. r3, 0, r3
  244. mtmsr r4
  245. bne 10f
  246. sync
  247. /* There are only 64 TLB entries, so r3 < 64,
  248. * which means bit 22, is clear. Since 22 is
  249. * the V bit in the TLB_PAGEID, loading this
  250. * value will invalidate the TLB entry.
  251. */
  252. tlbwe r3, r3, PPC44x_TLB_PAGEID
  253. isync
  254. 10:
  255. #elif defined(CONFIG_FSL_BOOKE)
  256. rlwinm r4, r3, 0, 0, 19
  257. ori r5, r4, 0x08 /* TLBSEL = 1 */
  258. ori r6, r4, 0x10 /* TLBSEL = 2 */
  259. ori r7, r4, 0x18 /* TLBSEL = 3 */
  260. tlbivax 0, r4
  261. tlbivax 0, r5
  262. tlbivax 0, r6
  263. tlbivax 0, r7
  264. msync
  265. #if defined(CONFIG_SMP)
  266. tlbsync
  267. #endif /* CONFIG_SMP */
  268. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  269. #if defined(CONFIG_SMP)
  270. rlwinm r8,r1,0,0,18
  271. lwz r8,TI_CPU(r8)
  272. oris r8,r8,11
  273. mfmsr r10
  274. SYNC
  275. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  276. rlwinm r0,r0,0,28,26 /* clear DR */
  277. mtmsr r0
  278. SYNC_601
  279. isync
  280. lis r9,mmu_hash_lock@h
  281. ori r9,r9,mmu_hash_lock@l
  282. tophys(r9,r9)
  283. 10: lwarx r7,0,r9
  284. cmpwi 0,r7,0
  285. bne- 10b
  286. stwcx. r8,0,r9
  287. bne- 10b
  288. eieio
  289. tlbie r3
  290. sync
  291. TLBSYNC
  292. li r0,0
  293. stw r0,0(r9) /* clear mmu_hash_lock */
  294. mtmsr r10
  295. SYNC_601
  296. isync
  297. #else /* CONFIG_SMP */
  298. tlbie r3
  299. sync
  300. #endif /* CONFIG_SMP */
  301. #endif /* ! CONFIG_40x */
  302. blr
  303. /*
  304. * Flush instruction cache.
  305. * This is a no-op on the 601.
  306. */
  307. _GLOBAL(flush_instruction_cache)
  308. #if defined(CONFIG_8xx)
  309. isync
  310. lis r5, IDC_INVALL@h
  311. mtspr SPRN_IC_CST, r5
  312. #elif defined(CONFIG_4xx)
  313. #ifdef CONFIG_403GCX
  314. li r3, 512
  315. mtctr r3
  316. lis r4, KERNELBASE@h
  317. 1: iccci 0, r4
  318. addi r4, r4, 16
  319. bdnz 1b
  320. #else
  321. lis r3, KERNELBASE@h
  322. iccci 0,r3
  323. #endif
  324. #elif CONFIG_FSL_BOOKE
  325. BEGIN_FTR_SECTION
  326. mfspr r3,SPRN_L1CSR0
  327. ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
  328. /* msync; isync recommended here */
  329. mtspr SPRN_L1CSR0,r3
  330. isync
  331. blr
  332. END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
  333. mfspr r3,SPRN_L1CSR1
  334. ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
  335. mtspr SPRN_L1CSR1,r3
  336. #else
  337. mfspr r3,SPRN_PVR
  338. rlwinm r3,r3,16,16,31
  339. cmpwi 0,r3,1
  340. beqlr /* for 601, do nothing */
  341. /* 603/604 processor - use invalidate-all bit in HID0 */
  342. mfspr r3,SPRN_HID0
  343. ori r3,r3,HID0_ICFI
  344. mtspr SPRN_HID0,r3
  345. #endif /* CONFIG_8xx/4xx */
  346. isync
  347. blr
  348. /*
  349. * Write any modified data cache blocks out to memory
  350. * and invalidate the corresponding instruction cache blocks.
  351. * This is a no-op on the 601.
  352. *
  353. * __flush_icache_range(unsigned long start, unsigned long stop)
  354. */
  355. _GLOBAL(__flush_icache_range)
  356. BEGIN_FTR_SECTION
  357. blr /* for 601, do nothing */
  358. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  359. li r5,L1_CACHE_BYTES-1
  360. andc r3,r3,r5
  361. subf r4,r3,r4
  362. add r4,r4,r5
  363. srwi. r4,r4,L1_CACHE_SHIFT
  364. beqlr
  365. mtctr r4
  366. mr r6,r3
  367. 1: dcbst 0,r3
  368. addi r3,r3,L1_CACHE_BYTES
  369. bdnz 1b
  370. sync /* wait for dcbst's to get to ram */
  371. mtctr r4
  372. 2: icbi 0,r6
  373. addi r6,r6,L1_CACHE_BYTES
  374. bdnz 2b
  375. sync /* additional sync needed on g4 */
  376. isync
  377. blr
  378. /*
  379. * Write any modified data cache blocks out to memory.
  380. * Does not invalidate the corresponding cache lines (especially for
  381. * any corresponding instruction cache).
  382. *
  383. * clean_dcache_range(unsigned long start, unsigned long stop)
  384. */
  385. _GLOBAL(clean_dcache_range)
  386. li r5,L1_CACHE_BYTES-1
  387. andc r3,r3,r5
  388. subf r4,r3,r4
  389. add r4,r4,r5
  390. srwi. r4,r4,L1_CACHE_SHIFT
  391. beqlr
  392. mtctr r4
  393. 1: dcbst 0,r3
  394. addi r3,r3,L1_CACHE_BYTES
  395. bdnz 1b
  396. sync /* wait for dcbst's to get to ram */
  397. blr
  398. /*
  399. * Write any modified data cache blocks out to memory and invalidate them.
  400. * Does not invalidate the corresponding instruction cache blocks.
  401. *
  402. * flush_dcache_range(unsigned long start, unsigned long stop)
  403. */
  404. _GLOBAL(flush_dcache_range)
  405. li r5,L1_CACHE_BYTES-1
  406. andc r3,r3,r5
  407. subf r4,r3,r4
  408. add r4,r4,r5
  409. srwi. r4,r4,L1_CACHE_SHIFT
  410. beqlr
  411. mtctr r4
  412. 1: dcbf 0,r3
  413. addi r3,r3,L1_CACHE_BYTES
  414. bdnz 1b
  415. sync /* wait for dcbst's to get to ram */
  416. blr
  417. /*
  418. * Like above, but invalidate the D-cache. This is used by the 8xx
  419. * to invalidate the cache so the PPC core doesn't get stale data
  420. * from the CPM (no cache snooping here :-).
  421. *
  422. * invalidate_dcache_range(unsigned long start, unsigned long stop)
  423. */
  424. _GLOBAL(invalidate_dcache_range)
  425. li r5,L1_CACHE_BYTES-1
  426. andc r3,r3,r5
  427. subf r4,r3,r4
  428. add r4,r4,r5
  429. srwi. r4,r4,L1_CACHE_SHIFT
  430. beqlr
  431. mtctr r4
  432. 1: dcbi 0,r3
  433. addi r3,r3,L1_CACHE_BYTES
  434. bdnz 1b
  435. sync /* wait for dcbi's to get to ram */
  436. blr
  437. #ifdef CONFIG_NOT_COHERENT_CACHE
  438. /*
  439. * 40x cores have 8K or 16K dcache and 32 byte line size.
  440. * 44x has a 32K dcache and 32 byte line size.
  441. * 8xx has 1, 2, 4, 8K variants.
  442. * For now, cover the worst case of the 44x.
  443. * Must be called with external interrupts disabled.
  444. */
  445. #define CACHE_NWAYS 64
  446. #define CACHE_NLINES 16
  447. _GLOBAL(flush_dcache_all)
  448. li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
  449. mtctr r4
  450. lis r5, KERNELBASE@h
  451. 1: lwz r3, 0(r5) /* Load one word from every line */
  452. addi r5, r5, L1_CACHE_BYTES
  453. bdnz 1b
  454. blr
  455. #endif /* CONFIG_NOT_COHERENT_CACHE */
  456. /*
  457. * Flush a particular page from the data cache to RAM.
  458. * Note: this is necessary because the instruction cache does *not*
  459. * snoop from the data cache.
  460. * This is a no-op on the 601 which has a unified cache.
  461. *
  462. * void __flush_dcache_icache(void *page)
  463. */
  464. _GLOBAL(__flush_dcache_icache)
  465. BEGIN_FTR_SECTION
  466. blr /* for 601, do nothing */
  467. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  468. rlwinm r3,r3,0,0,19 /* Get page base address */
  469. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  470. mtctr r4
  471. mr r6,r3
  472. 0: dcbst 0,r3 /* Write line to ram */
  473. addi r3,r3,L1_CACHE_BYTES
  474. bdnz 0b
  475. sync
  476. #ifndef CONFIG_44x
  477. /* We don't flush the icache on 44x. Those have a virtual icache
  478. * and we don't have access to the virtual address here (it's
  479. * not the page vaddr but where it's mapped in user space). The
  480. * flushing of the icache on these is handled elsewhere, when
  481. * a change in the address space occurs, before returning to
  482. * user space
  483. */
  484. mtctr r4
  485. 1: icbi 0,r6
  486. addi r6,r6,L1_CACHE_BYTES
  487. bdnz 1b
  488. sync
  489. isync
  490. #endif /* CONFIG_44x */
  491. blr
  492. /*
  493. * Flush a particular page from the data cache to RAM, identified
  494. * by its physical address. We turn off the MMU so we can just use
  495. * the physical address (this may be a highmem page without a kernel
  496. * mapping).
  497. *
  498. * void __flush_dcache_icache_phys(unsigned long physaddr)
  499. */
  500. _GLOBAL(__flush_dcache_icache_phys)
  501. BEGIN_FTR_SECTION
  502. blr /* for 601, do nothing */
  503. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  504. mfmsr r10
  505. rlwinm r0,r10,0,28,26 /* clear DR */
  506. mtmsr r0
  507. isync
  508. rlwinm r3,r3,0,0,19 /* Get page base address */
  509. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  510. mtctr r4
  511. mr r6,r3
  512. 0: dcbst 0,r3 /* Write line to ram */
  513. addi r3,r3,L1_CACHE_BYTES
  514. bdnz 0b
  515. sync
  516. mtctr r4
  517. 1: icbi 0,r6
  518. addi r6,r6,L1_CACHE_BYTES
  519. bdnz 1b
  520. sync
  521. mtmsr r10 /* restore DR */
  522. isync
  523. blr
  524. /*
  525. * Clear pages using the dcbz instruction, which doesn't cause any
  526. * memory traffic (except to write out any cache lines which get
  527. * displaced). This only works on cacheable memory.
  528. *
  529. * void clear_pages(void *page, int order) ;
  530. */
  531. _GLOBAL(clear_pages)
  532. li r0,4096/L1_CACHE_BYTES
  533. slw r0,r0,r4
  534. mtctr r0
  535. #ifdef CONFIG_8xx
  536. li r4, 0
  537. 1: stw r4, 0(r3)
  538. stw r4, 4(r3)
  539. stw r4, 8(r3)
  540. stw r4, 12(r3)
  541. #else
  542. 1: dcbz 0,r3
  543. #endif
  544. addi r3,r3,L1_CACHE_BYTES
  545. bdnz 1b
  546. blr
  547. /*
  548. * Copy a whole page. We use the dcbz instruction on the destination
  549. * to reduce memory traffic (it eliminates the unnecessary reads of
  550. * the destination into cache). This requires that the destination
  551. * is cacheable.
  552. */
  553. #define COPY_16_BYTES \
  554. lwz r6,4(r4); \
  555. lwz r7,8(r4); \
  556. lwz r8,12(r4); \
  557. lwzu r9,16(r4); \
  558. stw r6,4(r3); \
  559. stw r7,8(r3); \
  560. stw r8,12(r3); \
  561. stwu r9,16(r3)
  562. _GLOBAL(copy_page)
  563. addi r3,r3,-4
  564. addi r4,r4,-4
  565. #ifdef CONFIG_8xx
  566. /* don't use prefetch on 8xx */
  567. li r0,4096/L1_CACHE_BYTES
  568. mtctr r0
  569. 1: COPY_16_BYTES
  570. bdnz 1b
  571. blr
  572. #else /* not 8xx, we can prefetch */
  573. li r5,4
  574. #if MAX_COPY_PREFETCH > 1
  575. li r0,MAX_COPY_PREFETCH
  576. li r11,4
  577. mtctr r0
  578. 11: dcbt r11,r4
  579. addi r11,r11,L1_CACHE_BYTES
  580. bdnz 11b
  581. #else /* MAX_COPY_PREFETCH == 1 */
  582. dcbt r5,r4
  583. li r11,L1_CACHE_BYTES+4
  584. #endif /* MAX_COPY_PREFETCH */
  585. li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
  586. crclr 4*cr0+eq
  587. 2:
  588. mtctr r0
  589. 1:
  590. dcbt r11,r4
  591. dcbz r5,r3
  592. COPY_16_BYTES
  593. #if L1_CACHE_BYTES >= 32
  594. COPY_16_BYTES
  595. #if L1_CACHE_BYTES >= 64
  596. COPY_16_BYTES
  597. COPY_16_BYTES
  598. #if L1_CACHE_BYTES >= 128
  599. COPY_16_BYTES
  600. COPY_16_BYTES
  601. COPY_16_BYTES
  602. COPY_16_BYTES
  603. #endif
  604. #endif
  605. #endif
  606. bdnz 1b
  607. beqlr
  608. crnot 4*cr0+eq,4*cr0+eq
  609. li r0,MAX_COPY_PREFETCH
  610. li r11,4
  611. b 2b
  612. #endif /* CONFIG_8xx */
  613. /*
  614. * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
  615. * void atomic_set_mask(atomic_t mask, atomic_t *addr);
  616. */
  617. _GLOBAL(atomic_clear_mask)
  618. 10: lwarx r5,0,r4
  619. andc r5,r5,r3
  620. PPC405_ERR77(0,r4)
  621. stwcx. r5,0,r4
  622. bne- 10b
  623. blr
  624. _GLOBAL(atomic_set_mask)
  625. 10: lwarx r5,0,r4
  626. or r5,r5,r3
  627. PPC405_ERR77(0,r4)
  628. stwcx. r5,0,r4
  629. bne- 10b
  630. blr
  631. /*
  632. * I/O string operations
  633. *
  634. * insb(port, buf, len)
  635. * outsb(port, buf, len)
  636. * insw(port, buf, len)
  637. * outsw(port, buf, len)
  638. * insl(port, buf, len)
  639. * outsl(port, buf, len)
  640. * insw_ns(port, buf, len)
  641. * outsw_ns(port, buf, len)
  642. * insl_ns(port, buf, len)
  643. * outsl_ns(port, buf, len)
  644. *
  645. * The *_ns versions don't do byte-swapping.
  646. */
  647. _GLOBAL(_insb)
  648. cmpwi 0,r5,0
  649. mtctr r5
  650. subi r4,r4,1
  651. blelr-
  652. 00: lbz r5,0(r3)
  653. 01: eieio
  654. 02: stbu r5,1(r4)
  655. ISYNC_8xx
  656. .section .fixup,"ax"
  657. 03: blr
  658. .text
  659. .section __ex_table, "a"
  660. .align 2
  661. .long 00b, 03b
  662. .long 01b, 03b
  663. .long 02b, 03b
  664. .text
  665. bdnz 00b
  666. blr
  667. _GLOBAL(_outsb)
  668. cmpwi 0,r5,0
  669. mtctr r5
  670. subi r4,r4,1
  671. blelr-
  672. 00: lbzu r5,1(r4)
  673. 01: stb r5,0(r3)
  674. 02: eieio
  675. ISYNC_8xx
  676. .section .fixup,"ax"
  677. 03: blr
  678. .text
  679. .section __ex_table, "a"
  680. .align 2
  681. .long 00b, 03b
  682. .long 01b, 03b
  683. .long 02b, 03b
  684. .text
  685. bdnz 00b
  686. blr
  687. _GLOBAL(_insw_ns)
  688. cmpwi 0,r5,0
  689. mtctr r5
  690. subi r4,r4,2
  691. blelr-
  692. 00: lhz r5,0(r3)
  693. 01: eieio
  694. 02: sthu r5,2(r4)
  695. ISYNC_8xx
  696. .section .fixup,"ax"
  697. 03: blr
  698. .text
  699. .section __ex_table, "a"
  700. .align 2
  701. .long 00b, 03b
  702. .long 01b, 03b
  703. .long 02b, 03b
  704. .text
  705. bdnz 00b
  706. blr
  707. _GLOBAL(_outsw_ns)
  708. cmpwi 0,r5,0
  709. mtctr r5
  710. subi r4,r4,2
  711. blelr-
  712. 00: lhzu r5,2(r4)
  713. 01: sth r5,0(r3)
  714. 02: eieio
  715. ISYNC_8xx
  716. .section .fixup,"ax"
  717. 03: blr
  718. .text
  719. .section __ex_table, "a"
  720. .align 2
  721. .long 00b, 03b
  722. .long 01b, 03b
  723. .long 02b, 03b
  724. .text
  725. bdnz 00b
  726. blr
  727. _GLOBAL(_insl_ns)
  728. cmpwi 0,r5,0
  729. mtctr r5
  730. subi r4,r4,4
  731. blelr-
  732. 00: lwz r5,0(r3)
  733. 01: eieio
  734. 02: stwu r5,4(r4)
  735. ISYNC_8xx
  736. .section .fixup,"ax"
  737. 03: blr
  738. .text
  739. .section __ex_table, "a"
  740. .align 2
  741. .long 00b, 03b
  742. .long 01b, 03b
  743. .long 02b, 03b
  744. .text
  745. bdnz 00b
  746. blr
  747. _GLOBAL(_outsl_ns)
  748. cmpwi 0,r5,0
  749. mtctr r5
  750. subi r4,r4,4
  751. blelr-
  752. 00: lwzu r5,4(r4)
  753. 01: stw r5,0(r3)
  754. 02: eieio
  755. ISYNC_8xx
  756. .section .fixup,"ax"
  757. 03: blr
  758. .text
  759. .section __ex_table, "a"
  760. .align 2
  761. .long 00b, 03b
  762. .long 01b, 03b
  763. .long 02b, 03b
  764. .text
  765. bdnz 00b
  766. blr
  767. /*
  768. * Extended precision shifts.
  769. *
  770. * Updated to be valid for shift counts from 0 to 63 inclusive.
  771. * -- Gabriel
  772. *
  773. * R3/R4 has 64 bit value
  774. * R5 has shift count
  775. * result in R3/R4
  776. *
  777. * ashrdi3: arithmetic right shift (sign propagation)
  778. * lshrdi3: logical right shift
  779. * ashldi3: left shift
  780. */
  781. _GLOBAL(__ashrdi3)
  782. subfic r6,r5,32
  783. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  784. addi r7,r5,32 # could be xori, or addi with -32
  785. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  786. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  787. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  788. or r4,r4,r6 # LSW |= t1
  789. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  790. sraw r3,r3,r5 # MSW = MSW >> count
  791. or r4,r4,r7 # LSW |= t2
  792. blr
  793. _GLOBAL(__ashldi3)
  794. subfic r6,r5,32
  795. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  796. addi r7,r5,32 # could be xori, or addi with -32
  797. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  798. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  799. or r3,r3,r6 # MSW |= t1
  800. slw r4,r4,r5 # LSW = LSW << count
  801. or r3,r3,r7 # MSW |= t2
  802. blr
  803. _GLOBAL(__lshrdi3)
  804. subfic r6,r5,32
  805. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  806. addi r7,r5,32 # could be xori, or addi with -32
  807. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  808. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  809. or r4,r4,r6 # LSW |= t1
  810. srw r3,r3,r5 # MSW = MSW >> count
  811. or r4,r4,r7 # LSW |= t2
  812. blr
  813. _GLOBAL(abs)
  814. srawi r4,r3,31
  815. xor r3,r3,r4
  816. sub r3,r3,r4
  817. blr
  818. _GLOBAL(_get_SP)
  819. mr r3,r1 /* Close enough */
  820. blr
  821. /*
  822. * Create a kernel thread
  823. * kernel_thread(fn, arg, flags)
  824. */
  825. _GLOBAL(kernel_thread)
  826. stwu r1,-16(r1)
  827. stw r30,8(r1)
  828. stw r31,12(r1)
  829. mr r30,r3 /* function */
  830. mr r31,r4 /* argument */
  831. ori r3,r5,CLONE_VM /* flags */
  832. oris r3,r3,CLONE_UNTRACED>>16
  833. li r4,0 /* new sp (unused) */
  834. li r0,__NR_clone
  835. sc
  836. cmpwi 0,r3,0 /* parent or child? */
  837. bne 1f /* return if parent */
  838. li r0,0 /* make top-level stack frame */
  839. stwu r0,-16(r1)
  840. mtlr r30 /* fn addr in lr */
  841. mr r3,r31 /* load arg and call fn */
  842. PPC440EP_ERR42
  843. blrl
  844. li r0,__NR_exit /* exit if function returns */
  845. li r3,0
  846. sc
  847. 1: lwz r30,8(r1)
  848. lwz r31,12(r1)
  849. addi r1,r1,16
  850. blr
  851. _GLOBAL(kernel_execve)
  852. li r0,__NR_execve
  853. sc
  854. bnslr
  855. neg r3,r3
  856. blr
  857. /*
  858. * This routine is just here to keep GCC happy - sigh...
  859. */
  860. _GLOBAL(__main)
  861. blr