misc.S 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883
  1. /*
  2. * This file contains miscellaneous low-level functions.
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6. * and Paul Mackerras.
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License
  10. * as published by the Free Software Foundation; either version
  11. * 2 of the License, or (at your option) any later version.
  12. *
  13. */
  14. #include <linux/sys.h>
  15. #include <asm/unistd.h>
  16. #include <asm/errno.h>
  17. #include <asm/processor.h>
  18. #include <asm/page.h>
  19. #include <asm/cache.h>
  20. #include <asm/cputable.h>
  21. #include <asm/mmu.h>
  22. #include <asm/ppc_asm.h>
  23. #include <asm/thread_info.h>
  24. #include <asm/asm-offsets.h>
  25. #ifdef CONFIG_8xx
  26. #define ISYNC_8xx isync
  27. #else
  28. #define ISYNC_8xx
  29. #endif
  30. .text
  31. .align 5
  32. _GLOBAL(__delay)
  33. cmpwi 0,r3,0
  34. mtctr r3
  35. beqlr
  36. 1: bdnz 1b
  37. blr
  38. /*
  39. * Returns (address we're running at) - (address we were linked at)
  40. * for use before the text and data are mapped to KERNELBASE.
  41. */
  42. _GLOBAL(reloc_offset)
  43. mflr r0
  44. bl 1f
  45. 1: mflr r3
  46. lis r4,1b@ha
  47. addi r4,r4,1b@l
  48. subf r3,r4,r3
  49. mtlr r0
  50. blr
  51. /*
  52. * add_reloc_offset(x) returns x + reloc_offset().
  53. */
  54. _GLOBAL(add_reloc_offset)
  55. mflr r0
  56. bl 1f
  57. 1: mflr r5
  58. lis r4,1b@ha
  59. addi r4,r4,1b@l
  60. subf r5,r4,r5
  61. add r3,r3,r5
  62. mtlr r0
  63. blr
  64. /*
  65. * sub_reloc_offset(x) returns x - reloc_offset().
  66. */
  67. _GLOBAL(sub_reloc_offset)
  68. mflr r0
  69. bl 1f
  70. 1: mflr r5
  71. lis r4,1b@ha
  72. addi r4,r4,1b@l
  73. subf r5,r4,r5
  74. subf r3,r5,r3
  75. mtlr r0
  76. blr
  77. /*
  78. * reloc_got2 runs through the .got2 section adding an offset
  79. * to each entry.
  80. */
  81. _GLOBAL(reloc_got2)
  82. mflr r11
  83. lis r7,__got2_start@ha
  84. addi r7,r7,__got2_start@l
  85. lis r8,__got2_end@ha
  86. addi r8,r8,__got2_end@l
  87. subf r8,r7,r8
  88. srwi. r8,r8,2
  89. beqlr
  90. mtctr r8
  91. bl 1f
  92. 1: mflr r0
  93. lis r4,1b@ha
  94. addi r4,r4,1b@l
  95. subf r0,r4,r0
  96. add r7,r0,r7
  97. 2: lwz r0,0(r7)
  98. add r0,r0,r3
  99. stw r0,0(r7)
  100. addi r7,r7,4
  101. bdnz 2b
  102. mtlr r11
  103. blr
  104. /*
  105. * call_setup_cpu - call the setup_cpu function for this cpu
  106. * r3 = data offset, r24 = cpu number
  107. *
  108. * Setup function is called with:
  109. * r3 = data offset
  110. * r4 = ptr to CPU spec (relocated)
  111. */
  112. _GLOBAL(call_setup_cpu)
  113. addis r4,r3,cur_cpu_spec@ha
  114. addi r4,r4,cur_cpu_spec@l
  115. lwz r4,0(r4)
  116. add r4,r4,r3
  117. lwz r5,CPU_SPEC_SETUP(r4)
  118. cmpi 0,r5,0
  119. add r5,r5,r3
  120. beqlr
  121. mtctr r5
  122. bctr
  123. /*
  124. * complement mask on the msr then "or" some values on.
  125. * _nmask_and_or_msr(nmask, value_to_or)
  126. */
  127. _GLOBAL(_nmask_and_or_msr)
  128. mfmsr r0 /* Get current msr */
  129. andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
  130. or r0,r0,r4 /* Or on the bits in r4 (second parm) */
  131. SYNC /* Some chip revs have problems here... */
  132. mtmsr r0 /* Update machine state */
  133. isync
  134. blr /* Done */
  135. /*
  136. * Flush MMU TLB
  137. */
  138. _GLOBAL(_tlbia)
  139. #if defined(CONFIG_40x)
  140. sync /* Flush to memory before changing mapping */
  141. tlbia
  142. isync /* Flush shadow TLB */
  143. #elif defined(CONFIG_44x)
  144. li r3,0
  145. sync
  146. /* Load high watermark */
  147. lis r4,tlb_44x_hwater@ha
  148. lwz r5,tlb_44x_hwater@l(r4)
  149. 1: tlbwe r3,r3,PPC44x_TLB_PAGEID
  150. addi r3,r3,1
  151. cmpw 0,r3,r5
  152. ble 1b
  153. isync
  154. #elif defined(CONFIG_FSL_BOOKE)
  155. /* Invalidate all entries in TLB0 */
  156. li r3, 0x04
  157. tlbivax 0,3
  158. /* Invalidate all entries in TLB1 */
  159. li r3, 0x0c
  160. tlbivax 0,3
  161. /* Invalidate all entries in TLB2 */
  162. li r3, 0x14
  163. tlbivax 0,3
  164. /* Invalidate all entries in TLB3 */
  165. li r3, 0x1c
  166. tlbivax 0,3
  167. msync
  168. #ifdef CONFIG_SMP
  169. tlbsync
  170. #endif /* CONFIG_SMP */
  171. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  172. #if defined(CONFIG_SMP)
  173. rlwinm r8,r1,0,0,18
  174. lwz r8,TI_CPU(r8)
  175. oris r8,r8,10
  176. mfmsr r10
  177. SYNC
  178. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  179. rlwinm r0,r0,0,28,26 /* clear DR */
  180. mtmsr r0
  181. SYNC_601
  182. isync
  183. lis r9,mmu_hash_lock@h
  184. ori r9,r9,mmu_hash_lock@l
  185. tophys(r9,r9)
  186. 10: lwarx r7,0,r9
  187. cmpwi 0,r7,0
  188. bne- 10b
  189. stwcx. r8,0,r9
  190. bne- 10b
  191. sync
  192. tlbia
  193. sync
  194. TLBSYNC
  195. li r0,0
  196. stw r0,0(r9) /* clear mmu_hash_lock */
  197. mtmsr r10
  198. SYNC_601
  199. isync
  200. #else /* CONFIG_SMP */
  201. sync
  202. tlbia
  203. sync
  204. #endif /* CONFIG_SMP */
  205. #endif /* ! defined(CONFIG_40x) */
  206. blr
  207. /*
  208. * Flush MMU TLB for a particular address
  209. */
  210. _GLOBAL(_tlbie)
  211. #if defined(CONFIG_40x)
  212. tlbsx. r3, 0, r3
  213. bne 10f
  214. sync
  215. /* There are only 64 TLB entries, so r3 < 64, which means bit 25 is clear.
  216. * Since 25 is the V bit in the TLB_TAG, loading this value will invalidate
  217. * the TLB entry. */
  218. tlbwe r3, r3, TLB_TAG
  219. isync
  220. 10:
  221. #elif defined(CONFIG_44x)
  222. mfspr r4,SPRN_MMUCR
  223. mfspr r5,SPRN_PID /* Get PID */
  224. rlwimi r4,r5,0,24,31 /* Set TID */
  225. mtspr SPRN_MMUCR,r4
  226. tlbsx. r3, 0, r3
  227. bne 10f
  228. sync
  229. /* There are only 64 TLB entries, so r3 < 64,
  230. * which means bit 22, is clear. Since 22 is
  231. * the V bit in the TLB_PAGEID, loading this
  232. * value will invalidate the TLB entry.
  233. */
  234. tlbwe r3, r3, PPC44x_TLB_PAGEID
  235. isync
  236. 10:
  237. #elif defined(CONFIG_FSL_BOOKE)
  238. rlwinm r4, r3, 0, 0, 19
  239. ori r5, r4, 0x08 /* TLBSEL = 1 */
  240. ori r6, r4, 0x10 /* TLBSEL = 2 */
  241. ori r7, r4, 0x18 /* TLBSEL = 3 */
  242. tlbivax 0, r4
  243. tlbivax 0, r5
  244. tlbivax 0, r6
  245. tlbivax 0, r7
  246. msync
  247. #if defined(CONFIG_SMP)
  248. tlbsync
  249. #endif /* CONFIG_SMP */
  250. #else /* !(CONFIG_40x || CONFIG_44x || CONFIG_FSL_BOOKE) */
  251. #if defined(CONFIG_SMP)
  252. rlwinm r8,r1,0,0,18
  253. lwz r8,TI_CPU(r8)
  254. oris r8,r8,11
  255. mfmsr r10
  256. SYNC
  257. rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
  258. rlwinm r0,r0,0,28,26 /* clear DR */
  259. mtmsr r0
  260. SYNC_601
  261. isync
  262. lis r9,mmu_hash_lock@h
  263. ori r9,r9,mmu_hash_lock@l
  264. tophys(r9,r9)
  265. 10: lwarx r7,0,r9
  266. cmpwi 0,r7,0
  267. bne- 10b
  268. stwcx. r8,0,r9
  269. bne- 10b
  270. eieio
  271. tlbie r3
  272. sync
  273. TLBSYNC
  274. li r0,0
  275. stw r0,0(r9) /* clear mmu_hash_lock */
  276. mtmsr r10
  277. SYNC_601
  278. isync
  279. #else /* CONFIG_SMP */
  280. tlbie r3
  281. sync
  282. #endif /* CONFIG_SMP */
  283. #endif /* ! CONFIG_40x */
  284. blr
  285. /*
  286. * Flush instruction cache.
  287. * This is a no-op on the 601.
  288. */
  289. _GLOBAL(flush_instruction_cache)
  290. #if defined(CONFIG_8xx)
  291. isync
  292. lis r5, IDC_INVALL@h
  293. mtspr SPRN_IC_CST, r5
  294. #elif defined(CONFIG_4xx)
  295. #ifdef CONFIG_403GCX
  296. li r3, 512
  297. mtctr r3
  298. lis r4, KERNELBASE@h
  299. 1: iccci 0, r4
  300. addi r4, r4, 16
  301. bdnz 1b
  302. #else
  303. lis r3, KERNELBASE@h
  304. iccci 0,r3
  305. #endif
  306. #elif CONFIG_FSL_BOOKE
  307. BEGIN_FTR_SECTION
  308. mfspr r3,SPRN_L1CSR0
  309. ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
  310. /* msync; isync recommended here */
  311. mtspr SPRN_L1CSR0,r3
  312. isync
  313. blr
  314. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  315. mfspr r3,SPRN_L1CSR1
  316. ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
  317. mtspr SPRN_L1CSR1,r3
  318. #else
  319. mfspr r3,SPRN_PVR
  320. rlwinm r3,r3,16,16,31
  321. cmpwi 0,r3,1
  322. beqlr /* for 601, do nothing */
  323. /* 603/604 processor - use invalidate-all bit in HID0 */
  324. mfspr r3,SPRN_HID0
  325. ori r3,r3,HID0_ICFI
  326. mtspr SPRN_HID0,r3
  327. #endif /* CONFIG_8xx/4xx */
  328. isync
  329. blr
  330. /*
  331. * Write any modified data cache blocks out to memory
  332. * and invalidate the corresponding instruction cache blocks.
  333. * This is a no-op on the 601.
  334. *
  335. * __flush_icache_range(unsigned long start, unsigned long stop)
  336. */
  337. _GLOBAL(__flush_icache_range)
  338. BEGIN_FTR_SECTION
  339. blr /* for 601, do nothing */
  340. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  341. li r5,L1_CACHE_BYTES-1
  342. andc r3,r3,r5
  343. subf r4,r3,r4
  344. add r4,r4,r5
  345. srwi. r4,r4,L1_CACHE_SHIFT
  346. beqlr
  347. mtctr r4
  348. mr r6,r3
  349. 1: dcbst 0,r3
  350. addi r3,r3,L1_CACHE_BYTES
  351. bdnz 1b
  352. sync /* wait for dcbst's to get to ram */
  353. mtctr r4
  354. 2: icbi 0,r6
  355. addi r6,r6,L1_CACHE_BYTES
  356. bdnz 2b
  357. sync /* additional sync needed on g4 */
  358. isync
  359. blr
  360. /*
  361. * Write any modified data cache blocks out to memory.
  362. * Does not invalidate the corresponding cache lines (especially for
  363. * any corresponding instruction cache).
  364. *
  365. * clean_dcache_range(unsigned long start, unsigned long stop)
  366. */
  367. _GLOBAL(clean_dcache_range)
  368. li r5,L1_CACHE_BYTES-1
  369. andc r3,r3,r5
  370. subf r4,r3,r4
  371. add r4,r4,r5
  372. srwi. r4,r4,L1_CACHE_SHIFT
  373. beqlr
  374. mtctr r4
  375. 1: dcbst 0,r3
  376. addi r3,r3,L1_CACHE_BYTES
  377. bdnz 1b
  378. sync /* wait for dcbst's to get to ram */
  379. blr
  380. /*
  381. * Write any modified data cache blocks out to memory and invalidate them.
  382. * Does not invalidate the corresponding instruction cache blocks.
  383. *
  384. * flush_dcache_range(unsigned long start, unsigned long stop)
  385. */
  386. _GLOBAL(flush_dcache_range)
  387. li r5,L1_CACHE_BYTES-1
  388. andc r3,r3,r5
  389. subf r4,r3,r4
  390. add r4,r4,r5
  391. srwi. r4,r4,L1_CACHE_SHIFT
  392. beqlr
  393. mtctr r4
  394. 1: dcbf 0,r3
  395. addi r3,r3,L1_CACHE_BYTES
  396. bdnz 1b
  397. sync /* wait for dcbst's to get to ram */
  398. blr
  399. /*
  400. * Like above, but invalidate the D-cache. This is used by the 8xx
  401. * to invalidate the cache so the PPC core doesn't get stale data
  402. * from the CPM (no cache snooping here :-).
  403. *
  404. * invalidate_dcache_range(unsigned long start, unsigned long stop)
  405. */
  406. _GLOBAL(invalidate_dcache_range)
  407. li r5,L1_CACHE_BYTES-1
  408. andc r3,r3,r5
  409. subf r4,r3,r4
  410. add r4,r4,r5
  411. srwi. r4,r4,L1_CACHE_SHIFT
  412. beqlr
  413. mtctr r4
  414. 1: dcbi 0,r3
  415. addi r3,r3,L1_CACHE_BYTES
  416. bdnz 1b
  417. sync /* wait for dcbi's to get to ram */
  418. blr
  419. #ifdef CONFIG_NOT_COHERENT_CACHE
  420. /*
  421. * 40x cores have 8K or 16K dcache and 32 byte line size.
  422. * 44x has a 32K dcache and 32 byte line size.
  423. * 8xx has 1, 2, 4, 8K variants.
  424. * For now, cover the worst case of the 44x.
  425. * Must be called with external interrupts disabled.
  426. */
  427. #define CACHE_NWAYS 64
  428. #define CACHE_NLINES 16
  429. _GLOBAL(flush_dcache_all)
  430. li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
  431. mtctr r4
  432. lis r5, KERNELBASE@h
  433. 1: lwz r3, 0(r5) /* Load one word from every line */
  434. addi r5, r5, L1_CACHE_BYTES
  435. bdnz 1b
  436. blr
  437. #endif /* CONFIG_NOT_COHERENT_CACHE */
  438. /*
  439. * Flush a particular page from the data cache to RAM.
  440. * Note: this is necessary because the instruction cache does *not*
  441. * snoop from the data cache.
  442. * This is a no-op on the 601 which has a unified cache.
  443. *
  444. * void __flush_dcache_icache(void *page)
  445. */
  446. _GLOBAL(__flush_dcache_icache)
  447. BEGIN_FTR_SECTION
  448. blr /* for 601, do nothing */
  449. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  450. rlwinm r3,r3,0,0,19 /* Get page base address */
  451. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  452. mtctr r4
  453. mr r6,r3
  454. 0: dcbst 0,r3 /* Write line to ram */
  455. addi r3,r3,L1_CACHE_BYTES
  456. bdnz 0b
  457. sync
  458. mtctr r4
  459. 1: icbi 0,r6
  460. addi r6,r6,L1_CACHE_BYTES
  461. bdnz 1b
  462. sync
  463. isync
  464. blr
  465. /*
  466. * Flush a particular page from the data cache to RAM, identified
  467. * by its physical address. We turn off the MMU so we can just use
  468. * the physical address (this may be a highmem page without a kernel
  469. * mapping).
  470. *
  471. * void __flush_dcache_icache_phys(unsigned long physaddr)
  472. */
  473. _GLOBAL(__flush_dcache_icache_phys)
  474. BEGIN_FTR_SECTION
  475. blr /* for 601, do nothing */
  476. END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
  477. mfmsr r10
  478. rlwinm r0,r10,0,28,26 /* clear DR */
  479. mtmsr r0
  480. isync
  481. rlwinm r3,r3,0,0,19 /* Get page base address */
  482. li r4,4096/L1_CACHE_BYTES /* Number of lines in a page */
  483. mtctr r4
  484. mr r6,r3
  485. 0: dcbst 0,r3 /* Write line to ram */
  486. addi r3,r3,L1_CACHE_BYTES
  487. bdnz 0b
  488. sync
  489. mtctr r4
  490. 1: icbi 0,r6
  491. addi r6,r6,L1_CACHE_BYTES
  492. bdnz 1b
  493. sync
  494. mtmsr r10 /* restore DR */
  495. isync
  496. blr
  497. /*
  498. * Clear pages using the dcbz instruction, which doesn't cause any
  499. * memory traffic (except to write out any cache lines which get
  500. * displaced). This only works on cacheable memory.
  501. *
  502. * void clear_pages(void *page, int order) ;
  503. */
  504. _GLOBAL(clear_pages)
  505. li r0,4096/L1_CACHE_BYTES
  506. slw r0,r0,r4
  507. mtctr r0
  508. #ifdef CONFIG_8xx
  509. li r4, 0
  510. 1: stw r4, 0(r3)
  511. stw r4, 4(r3)
  512. stw r4, 8(r3)
  513. stw r4, 12(r3)
  514. #else
  515. 1: dcbz 0,r3
  516. #endif
  517. addi r3,r3,L1_CACHE_BYTES
  518. bdnz 1b
  519. blr
  520. /*
  521. * Copy a whole page. We use the dcbz instruction on the destination
  522. * to reduce memory traffic (it eliminates the unnecessary reads of
  523. * the destination into cache). This requires that the destination
  524. * is cacheable.
  525. */
  526. #define COPY_16_BYTES \
  527. lwz r6,4(r4); \
  528. lwz r7,8(r4); \
  529. lwz r8,12(r4); \
  530. lwzu r9,16(r4); \
  531. stw r6,4(r3); \
  532. stw r7,8(r3); \
  533. stw r8,12(r3); \
  534. stwu r9,16(r3)
  535. _GLOBAL(copy_page)
  536. addi r3,r3,-4
  537. addi r4,r4,-4
  538. #ifdef CONFIG_8xx
  539. /* don't use prefetch on 8xx */
  540. li r0,4096/L1_CACHE_BYTES
  541. mtctr r0
  542. 1: COPY_16_BYTES
  543. bdnz 1b
  544. blr
  545. #else /* not 8xx, we can prefetch */
  546. li r5,4
  547. #if MAX_COPY_PREFETCH > 1
  548. li r0,MAX_COPY_PREFETCH
  549. li r11,4
  550. mtctr r0
  551. 11: dcbt r11,r4
  552. addi r11,r11,L1_CACHE_BYTES
  553. bdnz 11b
  554. #else /* MAX_COPY_PREFETCH == 1 */
  555. dcbt r5,r4
  556. li r11,L1_CACHE_BYTES+4
  557. #endif /* MAX_COPY_PREFETCH */
  558. li r0,4096/L1_CACHE_BYTES - MAX_COPY_PREFETCH
  559. crclr 4*cr0+eq
  560. 2:
  561. mtctr r0
  562. 1:
  563. dcbt r11,r4
  564. dcbz r5,r3
  565. COPY_16_BYTES
  566. #if L1_CACHE_BYTES >= 32
  567. COPY_16_BYTES
  568. #if L1_CACHE_BYTES >= 64
  569. COPY_16_BYTES
  570. COPY_16_BYTES
  571. #if L1_CACHE_BYTES >= 128
  572. COPY_16_BYTES
  573. COPY_16_BYTES
  574. COPY_16_BYTES
  575. COPY_16_BYTES
  576. #endif
  577. #endif
  578. #endif
  579. bdnz 1b
  580. beqlr
  581. crnot 4*cr0+eq,4*cr0+eq
  582. li r0,MAX_COPY_PREFETCH
  583. li r11,4
  584. b 2b
  585. #endif /* CONFIG_8xx */
  586. /*
  587. * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
  588. * void atomic_set_mask(atomic_t mask, atomic_t *addr);
  589. */
  590. _GLOBAL(atomic_clear_mask)
  591. 10: lwarx r5,0,r4
  592. andc r5,r5,r3
  593. PPC405_ERR77(0,r4)
  594. stwcx. r5,0,r4
  595. bne- 10b
  596. blr
  597. _GLOBAL(atomic_set_mask)
  598. 10: lwarx r5,0,r4
  599. or r5,r5,r3
  600. PPC405_ERR77(0,r4)
  601. stwcx. r5,0,r4
  602. bne- 10b
  603. blr
  604. /*
  605. * I/O string operations
  606. *
  607. * insb(port, buf, len)
  608. * outsb(port, buf, len)
  609. * insw(port, buf, len)
  610. * outsw(port, buf, len)
  611. * insl(port, buf, len)
  612. * outsl(port, buf, len)
  613. * insw_ns(port, buf, len)
  614. * outsw_ns(port, buf, len)
  615. * insl_ns(port, buf, len)
  616. * outsl_ns(port, buf, len)
  617. *
  618. * The *_ns versions don't do byte-swapping.
  619. */
  620. _GLOBAL(_insb)
  621. cmpwi 0,r5,0
  622. mtctr r5
  623. subi r4,r4,1
  624. blelr-
  625. 00: lbz r5,0(r3)
  626. 01: eieio
  627. 02: stbu r5,1(r4)
  628. ISYNC_8xx
  629. .section .fixup,"ax"
  630. 03: blr
  631. .text
  632. .section __ex_table, "a"
  633. .align 2
  634. .long 00b, 03b
  635. .long 01b, 03b
  636. .long 02b, 03b
  637. .text
  638. bdnz 00b
  639. blr
  640. _GLOBAL(_outsb)
  641. cmpwi 0,r5,0
  642. mtctr r5
  643. subi r4,r4,1
  644. blelr-
  645. 00: lbzu r5,1(r4)
  646. 01: stb r5,0(r3)
  647. 02: eieio
  648. ISYNC_8xx
  649. .section .fixup,"ax"
  650. 03: blr
  651. .text
  652. .section __ex_table, "a"
  653. .align 2
  654. .long 00b, 03b
  655. .long 01b, 03b
  656. .long 02b, 03b
  657. .text
  658. bdnz 00b
  659. blr
  660. _GLOBAL(_insw_ns)
  661. cmpwi 0,r5,0
  662. mtctr r5
  663. subi r4,r4,2
  664. blelr-
  665. 00: lhz r5,0(r3)
  666. 01: eieio
  667. 02: sthu r5,2(r4)
  668. ISYNC_8xx
  669. .section .fixup,"ax"
  670. 03: blr
  671. .text
  672. .section __ex_table, "a"
  673. .align 2
  674. .long 00b, 03b
  675. .long 01b, 03b
  676. .long 02b, 03b
  677. .text
  678. bdnz 00b
  679. blr
  680. _GLOBAL(_outsw_ns)
  681. cmpwi 0,r5,0
  682. mtctr r5
  683. subi r4,r4,2
  684. blelr-
  685. 00: lhzu r5,2(r4)
  686. 01: sth r5,0(r3)
  687. 02: eieio
  688. ISYNC_8xx
  689. .section .fixup,"ax"
  690. 03: blr
  691. .text
  692. .section __ex_table, "a"
  693. .align 2
  694. .long 00b, 03b
  695. .long 01b, 03b
  696. .long 02b, 03b
  697. .text
  698. bdnz 00b
  699. blr
  700. _GLOBAL(_insl_ns)
  701. cmpwi 0,r5,0
  702. mtctr r5
  703. subi r4,r4,4
  704. blelr-
  705. 00: lwz r5,0(r3)
  706. 01: eieio
  707. 02: stwu r5,4(r4)
  708. ISYNC_8xx
  709. .section .fixup,"ax"
  710. 03: blr
  711. .text
  712. .section __ex_table, "a"
  713. .align 2
  714. .long 00b, 03b
  715. .long 01b, 03b
  716. .long 02b, 03b
  717. .text
  718. bdnz 00b
  719. blr
  720. _GLOBAL(_outsl_ns)
  721. cmpwi 0,r5,0
  722. mtctr r5
  723. subi r4,r4,4
  724. blelr-
  725. 00: lwzu r5,4(r4)
  726. 01: stw r5,0(r3)
  727. 02: eieio
  728. ISYNC_8xx
  729. .section .fixup,"ax"
  730. 03: blr
  731. .text
  732. .section __ex_table, "a"
  733. .align 2
  734. .long 00b, 03b
  735. .long 01b, 03b
  736. .long 02b, 03b
  737. .text
  738. bdnz 00b
  739. blr
  740. /*
  741. * Extended precision shifts.
  742. *
  743. * Updated to be valid for shift counts from 0 to 63 inclusive.
  744. * -- Gabriel
  745. *
  746. * R3/R4 has 64 bit value
  747. * R5 has shift count
  748. * result in R3/R4
  749. *
  750. * ashrdi3: arithmetic right shift (sign propagation)
  751. * lshrdi3: logical right shift
  752. * ashldi3: left shift
  753. */
  754. _GLOBAL(__ashrdi3)
  755. subfic r6,r5,32
  756. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  757. addi r7,r5,32 # could be xori, or addi with -32
  758. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  759. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  760. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  761. or r4,r4,r6 # LSW |= t1
  762. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  763. sraw r3,r3,r5 # MSW = MSW >> count
  764. or r4,r4,r7 # LSW |= t2
  765. blr
  766. _GLOBAL(__ashldi3)
  767. subfic r6,r5,32
  768. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  769. addi r7,r5,32 # could be xori, or addi with -32
  770. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  771. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  772. or r3,r3,r6 # MSW |= t1
  773. slw r4,r4,r5 # LSW = LSW << count
  774. or r3,r3,r7 # MSW |= t2
  775. blr
  776. _GLOBAL(__lshrdi3)
  777. subfic r6,r5,32
  778. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  779. addi r7,r5,32 # could be xori, or addi with -32
  780. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  781. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  782. or r4,r4,r6 # LSW |= t1
  783. srw r3,r3,r5 # MSW = MSW >> count
  784. or r4,r4,r7 # LSW |= t2
  785. blr
  786. _GLOBAL(abs)
  787. srawi r4,r3,31
  788. xor r3,r3,r4
  789. sub r3,r3,r4
  790. blr
  791. _GLOBAL(_get_SP)
  792. mr r3,r1 /* Close enough */
  793. blr
  794. /*
  795. * Create a kernel thread
  796. * kernel_thread(fn, arg, flags)
  797. */
  798. _GLOBAL(kernel_thread)
  799. stwu r1,-16(r1)
  800. stw r30,8(r1)
  801. stw r31,12(r1)
  802. mr r30,r3 /* function */
  803. mr r31,r4 /* argument */
  804. ori r3,r5,CLONE_VM /* flags */
  805. oris r3,r3,CLONE_UNTRACED>>16
  806. li r4,0 /* new sp (unused) */
  807. li r0,__NR_clone
  808. sc
  809. cmpwi 0,r3,0 /* parent or child? */
  810. bne 1f /* return if parent */
  811. li r0,0 /* make top-level stack frame */
  812. stwu r0,-16(r1)
  813. mtlr r30 /* fn addr in lr */
  814. mr r3,r31 /* load arg and call fn */
  815. PPC440EP_ERR42
  816. blrl
  817. li r0,__NR_exit /* exit if function returns */
  818. li r3,0
  819. sc
  820. 1: lwz r30,8(r1)
  821. lwz r31,12(r1)
  822. addi r1,r1,16
  823. blr
  824. _GLOBAL(kernel_execve)
  825. li r0,__NR_execve
  826. sc
  827. bnslr
  828. neg r3,r3
  829. blr
  830. /*
  831. * This routine is just here to keep GCC happy - sigh...
  832. */
  833. _GLOBAL(__main)
  834. blr