misc_32.S 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176
  1. /*
  2. * This file contains miscellaneous low-level functions.
  3. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  4. *
  5. * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
  6. * and Paul Mackerras.
  7. *
  8. * kexec bits:
  9. * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com>
  10. * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz
  11. * PPC44x port. Copyright (C) 2011, IBM Corporation
  12. * Author: Suzuki Poulose <suzuki@in.ibm.com>
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. *
  19. */
  20. #include <linux/sys.h>
  21. #include <asm/unistd.h>
  22. #include <asm/errno.h>
  23. #include <asm/reg.h>
  24. #include <asm/page.h>
  25. #include <asm/cache.h>
  26. #include <asm/cputable.h>
  27. #include <asm/mmu.h>
  28. #include <asm/ppc_asm.h>
  29. #include <asm/thread_info.h>
  30. #include <asm/asm-offsets.h>
  31. #include <asm/processor.h>
  32. #include <asm/kexec.h>
  33. #include <asm/bug.h>
  34. #include <asm/ptrace.h>
  35. .text
  36. _GLOBAL(call_do_softirq)
  37. mflr r0
  38. stw r0,4(r1)
  39. stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3)
  40. mr r1,r3
  41. bl __do_softirq
  42. lwz r1,0(r1)
  43. lwz r0,4(r1)
  44. mtlr r0
  45. blr
  46. _GLOBAL(call_handle_irq)
  47. mflr r0
  48. stw r0,4(r1)
  49. mtctr r6
  50. stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5)
  51. mr r1,r5
  52. bctrl
  53. lwz r1,0(r1)
  54. lwz r0,4(r1)
  55. mtlr r0
  56. blr
  57. /*
  58. * This returns the high 64 bits of the product of two 64-bit numbers.
  59. */
  60. _GLOBAL(mulhdu)
  61. cmpwi r6,0
  62. cmpwi cr1,r3,0
  63. mr r10,r4
  64. mulhwu r4,r4,r5
  65. beq 1f
  66. mulhwu r0,r10,r6
  67. mullw r7,r10,r5
  68. addc r7,r0,r7
  69. addze r4,r4
  70. 1: beqlr cr1 /* all done if high part of A is 0 */
  71. mr r10,r3
  72. mullw r9,r3,r5
  73. mulhwu r3,r3,r5
  74. beq 2f
  75. mullw r0,r10,r6
  76. mulhwu r8,r10,r6
  77. addc r7,r0,r7
  78. adde r4,r4,r8
  79. addze r3,r3
  80. 2: addc r4,r4,r9
  81. addze r3,r3
  82. blr
  83. /*
  84. * sub_reloc_offset(x) returns x - reloc_offset().
  85. */
  86. _GLOBAL(sub_reloc_offset)
  87. mflr r0
  88. bl 1f
  89. 1: mflr r5
  90. lis r4,1b@ha
  91. addi r4,r4,1b@l
  92. subf r5,r4,r5
  93. subf r3,r5,r3
  94. mtlr r0
  95. blr
  96. /*
  97. * reloc_got2 runs through the .got2 section adding an offset
  98. * to each entry.
  99. */
  100. _GLOBAL(reloc_got2)
  101. mflr r11
  102. lis r7,__got2_start@ha
  103. addi r7,r7,__got2_start@l
  104. lis r8,__got2_end@ha
  105. addi r8,r8,__got2_end@l
  106. subf r8,r7,r8
  107. srwi. r8,r8,2
  108. beqlr
  109. mtctr r8
  110. bl 1f
  111. 1: mflr r0
  112. lis r4,1b@ha
  113. addi r4,r4,1b@l
  114. subf r0,r4,r0
  115. add r7,r0,r7
  116. 2: lwz r0,0(r7)
  117. add r0,r0,r3
  118. stw r0,0(r7)
  119. addi r7,r7,4
  120. bdnz 2b
  121. mtlr r11
  122. blr
  123. /*
  124. * call_setup_cpu - call the setup_cpu function for this cpu
  125. * r3 = data offset, r24 = cpu number
  126. *
  127. * Setup function is called with:
  128. * r3 = data offset
  129. * r4 = ptr to CPU spec (relocated)
  130. */
  131. _GLOBAL(call_setup_cpu)
  132. addis r4,r3,cur_cpu_spec@ha
  133. addi r4,r4,cur_cpu_spec@l
  134. lwz r4,0(r4)
  135. add r4,r4,r3
  136. lwz r5,CPU_SPEC_SETUP(r4)
  137. cmpwi 0,r5,0
  138. add r5,r5,r3
  139. beqlr
  140. mtctr r5
  141. bctr
  142. #if defined(CONFIG_CPU_FREQ_PMAC) && defined(CONFIG_6xx)
  143. /* This gets called by via-pmu.c to switch the PLL selection
  144. * on 750fx CPU. This function should really be moved to some
  145. * other place (as most of the cpufreq code in via-pmu
  146. */
  147. _GLOBAL(low_choose_750fx_pll)
  148. /* Clear MSR:EE */
  149. mfmsr r7
  150. rlwinm r0,r7,0,17,15
  151. mtmsr r0
  152. /* If switching to PLL1, disable HID0:BTIC */
  153. cmplwi cr0,r3,0
  154. beq 1f
  155. mfspr r5,SPRN_HID0
  156. rlwinm r5,r5,0,27,25
  157. sync
  158. mtspr SPRN_HID0,r5
  159. isync
  160. sync
  161. 1:
  162. /* Calc new HID1 value */
  163. mfspr r4,SPRN_HID1 /* Build a HID1:PS bit from parameter */
  164. rlwinm r5,r3,16,15,15 /* Clear out HID1:PS from value read */
  165. rlwinm r4,r4,0,16,14 /* Could have I used rlwimi here ? */
  166. or r4,r4,r5
  167. mtspr SPRN_HID1,r4
  168. /* Store new HID1 image */
  169. CURRENT_THREAD_INFO(r6, r1)
  170. lwz r6,TI_CPU(r6)
  171. slwi r6,r6,2
  172. addis r6,r6,nap_save_hid1@ha
  173. stw r4,nap_save_hid1@l(r6)
  174. /* If switching to PLL0, enable HID0:BTIC */
  175. cmplwi cr0,r3,0
  176. bne 1f
  177. mfspr r5,SPRN_HID0
  178. ori r5,r5,HID0_BTIC
  179. sync
  180. mtspr SPRN_HID0,r5
  181. isync
  182. sync
  183. 1:
  184. /* Return */
  185. mtmsr r7
  186. blr
  187. _GLOBAL(low_choose_7447a_dfs)
  188. /* Clear MSR:EE */
  189. mfmsr r7
  190. rlwinm r0,r7,0,17,15
  191. mtmsr r0
  192. /* Calc new HID1 value */
  193. mfspr r4,SPRN_HID1
  194. insrwi r4,r3,1,9 /* insert parameter into bit 9 */
  195. sync
  196. mtspr SPRN_HID1,r4
  197. sync
  198. isync
  199. /* Return */
  200. mtmsr r7
  201. blr
  202. #endif /* CONFIG_CPU_FREQ_PMAC && CONFIG_6xx */
  203. /*
  204. * complement mask on the msr then "or" some values on.
  205. * _nmask_and_or_msr(nmask, value_to_or)
  206. */
  207. _GLOBAL(_nmask_and_or_msr)
  208. mfmsr r0 /* Get current msr */
  209. andc r0,r0,r3 /* And off the bits set in r3 (first parm) */
  210. or r0,r0,r4 /* Or on the bits in r4 (second parm) */
  211. SYNC /* Some chip revs have problems here... */
  212. mtmsr r0 /* Update machine state */
  213. isync
  214. blr /* Done */
  215. #ifdef CONFIG_40x
  216. /*
  217. * Do an IO access in real mode
  218. */
  219. _GLOBAL(real_readb)
  220. mfmsr r7
  221. ori r0,r7,MSR_DR
  222. xori r0,r0,MSR_DR
  223. sync
  224. mtmsr r0
  225. sync
  226. isync
  227. lbz r3,0(r3)
  228. sync
  229. mtmsr r7
  230. sync
  231. isync
  232. blr
  233. /*
  234. * Do an IO access in real mode
  235. */
  236. _GLOBAL(real_writeb)
  237. mfmsr r7
  238. ori r0,r7,MSR_DR
  239. xori r0,r0,MSR_DR
  240. sync
  241. mtmsr r0
  242. sync
  243. isync
  244. stb r3,0(r4)
  245. sync
  246. mtmsr r7
  247. sync
  248. isync
  249. blr
  250. #endif /* CONFIG_40x */
  251. /*
  252. * Flush instruction cache.
  253. * This is a no-op on the 601.
  254. */
  255. _GLOBAL(flush_instruction_cache)
  256. #if defined(CONFIG_8xx)
  257. isync
  258. lis r5, IDC_INVALL@h
  259. mtspr SPRN_IC_CST, r5
  260. #elif defined(CONFIG_4xx)
  261. #ifdef CONFIG_403GCX
  262. li r3, 512
  263. mtctr r3
  264. lis r4, KERNELBASE@h
  265. 1: iccci 0, r4
  266. addi r4, r4, 16
  267. bdnz 1b
  268. #else
  269. lis r3, KERNELBASE@h
  270. iccci 0,r3
  271. #endif
  272. #elif CONFIG_FSL_BOOKE
  273. BEGIN_FTR_SECTION
  274. mfspr r3,SPRN_L1CSR0
  275. ori r3,r3,L1CSR0_CFI|L1CSR0_CLFC
  276. /* msync; isync recommended here */
  277. mtspr SPRN_L1CSR0,r3
  278. isync
  279. blr
  280. END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE)
  281. mfspr r3,SPRN_L1CSR1
  282. ori r3,r3,L1CSR1_ICFI|L1CSR1_ICLFR
  283. mtspr SPRN_L1CSR1,r3
  284. #else
  285. mfspr r3,SPRN_PVR
  286. rlwinm r3,r3,16,16,31
  287. cmpwi 0,r3,1
  288. beqlr /* for 601, do nothing */
  289. /* 603/604 processor - use invalidate-all bit in HID0 */
  290. mfspr r3,SPRN_HID0
  291. ori r3,r3,HID0_ICFI
  292. mtspr SPRN_HID0,r3
  293. #endif /* CONFIG_8xx/4xx */
  294. isync
  295. blr
  296. /*
  297. * Write any modified data cache blocks out to memory
  298. * and invalidate the corresponding instruction cache blocks.
  299. * This is a no-op on the 601.
  300. *
  301. * flush_icache_range(unsigned long start, unsigned long stop)
  302. */
  303. _KPROBE(flush_icache_range)
  304. BEGIN_FTR_SECTION
  305. isync
  306. blr /* for 601, do nothing */
  307. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  308. li r5,L1_CACHE_BYTES-1
  309. andc r3,r3,r5
  310. subf r4,r3,r4
  311. add r4,r4,r5
  312. srwi. r4,r4,L1_CACHE_SHIFT
  313. beqlr
  314. mtctr r4
  315. mr r6,r3
  316. 1: dcbst 0,r3
  317. addi r3,r3,L1_CACHE_BYTES
  318. bdnz 1b
  319. sync /* wait for dcbst's to get to ram */
  320. #ifndef CONFIG_44x
  321. mtctr r4
  322. 2: icbi 0,r6
  323. addi r6,r6,L1_CACHE_BYTES
  324. bdnz 2b
  325. #else
  326. /* Flash invalidate on 44x because we are passed kmapped addresses and
  327. this doesn't work for userspace pages due to the virtually tagged
  328. icache. Sigh. */
  329. iccci 0, r0
  330. #endif
  331. sync /* additional sync needed on g4 */
  332. isync
  333. blr
  334. /*
  335. * Write any modified data cache blocks out to memory.
  336. * Does not invalidate the corresponding cache lines (especially for
  337. * any corresponding instruction cache).
  338. *
  339. * clean_dcache_range(unsigned long start, unsigned long stop)
  340. */
  341. _GLOBAL(clean_dcache_range)
  342. li r5,L1_CACHE_BYTES-1
  343. andc r3,r3,r5
  344. subf r4,r3,r4
  345. add r4,r4,r5
  346. srwi. r4,r4,L1_CACHE_SHIFT
  347. beqlr
  348. mtctr r4
  349. 1: dcbst 0,r3
  350. addi r3,r3,L1_CACHE_BYTES
  351. bdnz 1b
  352. sync /* wait for dcbst's to get to ram */
  353. blr
  354. /*
  355. * Write any modified data cache blocks out to memory and invalidate them.
  356. * Does not invalidate the corresponding instruction cache blocks.
  357. *
  358. * flush_dcache_range(unsigned long start, unsigned long stop)
  359. */
  360. _GLOBAL(flush_dcache_range)
  361. li r5,L1_CACHE_BYTES-1
  362. andc r3,r3,r5
  363. subf r4,r3,r4
  364. add r4,r4,r5
  365. srwi. r4,r4,L1_CACHE_SHIFT
  366. beqlr
  367. mtctr r4
  368. 1: dcbf 0,r3
  369. addi r3,r3,L1_CACHE_BYTES
  370. bdnz 1b
  371. sync /* wait for dcbst's to get to ram */
  372. blr
  373. /*
  374. * Like above, but invalidate the D-cache. This is used by the 8xx
  375. * to invalidate the cache so the PPC core doesn't get stale data
  376. * from the CPM (no cache snooping here :-).
  377. *
  378. * invalidate_dcache_range(unsigned long start, unsigned long stop)
  379. */
  380. _GLOBAL(invalidate_dcache_range)
  381. li r5,L1_CACHE_BYTES-1
  382. andc r3,r3,r5
  383. subf r4,r3,r4
  384. add r4,r4,r5
  385. srwi. r4,r4,L1_CACHE_SHIFT
  386. beqlr
  387. mtctr r4
  388. 1: dcbi 0,r3
  389. addi r3,r3,L1_CACHE_BYTES
  390. bdnz 1b
  391. sync /* wait for dcbi's to get to ram */
  392. blr
  393. /*
  394. * Flush a particular page from the data cache to RAM.
  395. * Note: this is necessary because the instruction cache does *not*
  396. * snoop from the data cache.
  397. * This is a no-op on the 601 which has a unified cache.
  398. *
  399. * void __flush_dcache_icache(void *page)
  400. */
  401. _GLOBAL(__flush_dcache_icache)
  402. BEGIN_FTR_SECTION
  403. blr
  404. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  405. rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
  406. li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
  407. mtctr r4
  408. mr r6,r3
  409. 0: dcbst 0,r3 /* Write line to ram */
  410. addi r3,r3,L1_CACHE_BYTES
  411. bdnz 0b
  412. sync
  413. #ifdef CONFIG_44x
  414. /* We don't flush the icache on 44x. Those have a virtual icache
  415. * and we don't have access to the virtual address here (it's
  416. * not the page vaddr but where it's mapped in user space). The
  417. * flushing of the icache on these is handled elsewhere, when
  418. * a change in the address space occurs, before returning to
  419. * user space
  420. */
  421. BEGIN_MMU_FTR_SECTION
  422. blr
  423. END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_44x)
  424. #endif /* CONFIG_44x */
  425. mtctr r4
  426. 1: icbi 0,r6
  427. addi r6,r6,L1_CACHE_BYTES
  428. bdnz 1b
  429. sync
  430. isync
  431. blr
  432. #ifndef CONFIG_BOOKE
  433. /*
  434. * Flush a particular page from the data cache to RAM, identified
  435. * by its physical address. We turn off the MMU so we can just use
  436. * the physical address (this may be a highmem page without a kernel
  437. * mapping).
  438. *
  439. * void __flush_dcache_icache_phys(unsigned long physaddr)
  440. */
  441. _GLOBAL(__flush_dcache_icache_phys)
  442. BEGIN_FTR_SECTION
  443. blr /* for 601, do nothing */
  444. END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE)
  445. mfmsr r10
  446. rlwinm r0,r10,0,28,26 /* clear DR */
  447. mtmsr r0
  448. isync
  449. rlwinm r3,r3,0,0,31-PAGE_SHIFT /* Get page base address */
  450. li r4,PAGE_SIZE/L1_CACHE_BYTES /* Number of lines in a page */
  451. mtctr r4
  452. mr r6,r3
  453. 0: dcbst 0,r3 /* Write line to ram */
  454. addi r3,r3,L1_CACHE_BYTES
  455. bdnz 0b
  456. sync
  457. mtctr r4
  458. 1: icbi 0,r6
  459. addi r6,r6,L1_CACHE_BYTES
  460. bdnz 1b
  461. sync
  462. mtmsr r10 /* restore DR */
  463. isync
  464. blr
  465. #endif /* CONFIG_BOOKE */
  466. /*
  467. * Clear pages using the dcbz instruction, which doesn't cause any
  468. * memory traffic (except to write out any cache lines which get
  469. * displaced). This only works on cacheable memory.
  470. *
  471. * void clear_pages(void *page, int order) ;
  472. */
  473. _GLOBAL(clear_pages)
  474. li r0,PAGE_SIZE/L1_CACHE_BYTES
  475. slw r0,r0,r4
  476. mtctr r0
  477. 1: dcbz 0,r3
  478. addi r3,r3,L1_CACHE_BYTES
  479. bdnz 1b
  480. blr
  481. /*
  482. * Copy a whole page. We use the dcbz instruction on the destination
  483. * to reduce memory traffic (it eliminates the unnecessary reads of
  484. * the destination into cache). This requires that the destination
  485. * is cacheable.
  486. */
  487. #define COPY_16_BYTES \
  488. lwz r6,4(r4); \
  489. lwz r7,8(r4); \
  490. lwz r8,12(r4); \
  491. lwzu r9,16(r4); \
  492. stw r6,4(r3); \
  493. stw r7,8(r3); \
  494. stw r8,12(r3); \
  495. stwu r9,16(r3)
  496. _GLOBAL(copy_page)
  497. addi r3,r3,-4
  498. addi r4,r4,-4
  499. li r5,4
  500. #if MAX_COPY_PREFETCH > 1
  501. li r0,MAX_COPY_PREFETCH
  502. li r11,4
  503. mtctr r0
  504. 11: dcbt r11,r4
  505. addi r11,r11,L1_CACHE_BYTES
  506. bdnz 11b
  507. #else /* MAX_COPY_PREFETCH == 1 */
  508. dcbt r5,r4
  509. li r11,L1_CACHE_BYTES+4
  510. #endif /* MAX_COPY_PREFETCH */
  511. li r0,PAGE_SIZE/L1_CACHE_BYTES - MAX_COPY_PREFETCH
  512. crclr 4*cr0+eq
  513. 2:
  514. mtctr r0
  515. 1:
  516. dcbt r11,r4
  517. dcbz r5,r3
  518. COPY_16_BYTES
  519. #if L1_CACHE_BYTES >= 32
  520. COPY_16_BYTES
  521. #if L1_CACHE_BYTES >= 64
  522. COPY_16_BYTES
  523. COPY_16_BYTES
  524. #if L1_CACHE_BYTES >= 128
  525. COPY_16_BYTES
  526. COPY_16_BYTES
  527. COPY_16_BYTES
  528. COPY_16_BYTES
  529. #endif
  530. #endif
  531. #endif
  532. bdnz 1b
  533. beqlr
  534. crnot 4*cr0+eq,4*cr0+eq
  535. li r0,MAX_COPY_PREFETCH
  536. li r11,4
  537. b 2b
  538. /*
  539. * void atomic_clear_mask(atomic_t mask, atomic_t *addr)
  540. * void atomic_set_mask(atomic_t mask, atomic_t *addr);
  541. */
  542. _GLOBAL(atomic_clear_mask)
  543. 10: lwarx r5,0,r4
  544. andc r5,r5,r3
  545. PPC405_ERR77(0,r4)
  546. stwcx. r5,0,r4
  547. bne- 10b
  548. blr
  549. _GLOBAL(atomic_set_mask)
  550. 10: lwarx r5,0,r4
  551. or r5,r5,r3
  552. PPC405_ERR77(0,r4)
  553. stwcx. r5,0,r4
  554. bne- 10b
  555. blr
  556. /*
  557. * Extended precision shifts.
  558. *
  559. * Updated to be valid for shift counts from 0 to 63 inclusive.
  560. * -- Gabriel
  561. *
  562. * R3/R4 has 64 bit value
  563. * R5 has shift count
  564. * result in R3/R4
  565. *
  566. * ashrdi3: arithmetic right shift (sign propagation)
  567. * lshrdi3: logical right shift
  568. * ashldi3: left shift
  569. */
  570. _GLOBAL(__ashrdi3)
  571. subfic r6,r5,32
  572. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  573. addi r7,r5,32 # could be xori, or addi with -32
  574. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  575. rlwinm r8,r7,0,32 # t3 = (count < 32) ? 32 : 0
  576. sraw r7,r3,r7 # t2 = MSW >> (count-32)
  577. or r4,r4,r6 # LSW |= t1
  578. slw r7,r7,r8 # t2 = (count < 32) ? 0 : t2
  579. sraw r3,r3,r5 # MSW = MSW >> count
  580. or r4,r4,r7 # LSW |= t2
  581. blr
  582. _GLOBAL(__ashldi3)
  583. subfic r6,r5,32
  584. slw r3,r3,r5 # MSW = count > 31 ? 0 : MSW << count
  585. addi r7,r5,32 # could be xori, or addi with -32
  586. srw r6,r4,r6 # t1 = count > 31 ? 0 : LSW >> (32-count)
  587. slw r7,r4,r7 # t2 = count < 32 ? 0 : LSW << (count-32)
  588. or r3,r3,r6 # MSW |= t1
  589. slw r4,r4,r5 # LSW = LSW << count
  590. or r3,r3,r7 # MSW |= t2
  591. blr
  592. _GLOBAL(__lshrdi3)
  593. subfic r6,r5,32
  594. srw r4,r4,r5 # LSW = count > 31 ? 0 : LSW >> count
  595. addi r7,r5,32 # could be xori, or addi with -32
  596. slw r6,r3,r6 # t1 = count > 31 ? 0 : MSW << (32-count)
  597. srw r7,r3,r7 # t2 = count < 32 ? 0 : MSW >> (count-32)
  598. or r4,r4,r6 # LSW |= t1
  599. srw r3,r3,r5 # MSW = MSW >> count
  600. or r4,r4,r7 # LSW |= t2
  601. blr
  602. /*
  603. * 64-bit comparison: __ucmpdi2(u64 a, u64 b)
  604. * Returns 0 if a < b, 1 if a == b, 2 if a > b.
  605. */
  606. _GLOBAL(__ucmpdi2)
  607. cmplw r3,r5
  608. li r3,1
  609. bne 1f
  610. cmplw r4,r6
  611. beqlr
  612. 1: li r3,0
  613. bltlr
  614. li r3,2
  615. blr
  616. _GLOBAL(__bswapdi2)
  617. rotlwi r9,r4,8
  618. rotlwi r10,r3,8
  619. rlwimi r9,r4,24,0,7
  620. rlwimi r10,r3,24,0,7
  621. rlwimi r9,r4,24,16,23
  622. rlwimi r10,r3,24,16,23
  623. mr r3,r9
  624. mr r4,r10
  625. blr
  626. _GLOBAL(abs)
  627. srawi r4,r3,31
  628. xor r3,r3,r4
  629. sub r3,r3,r4
  630. blr
  631. #ifdef CONFIG_SMP
  632. _GLOBAL(start_secondary_resume)
  633. /* Reset stack */
  634. CURRENT_THREAD_INFO(r1, r1)
  635. addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
  636. li r3,0
  637. stw r3,0(r1) /* Zero the stack frame pointer */
  638. bl start_secondary
  639. b .
  640. #endif /* CONFIG_SMP */
  641. /*
  642. * This routine is just here to keep GCC happy - sigh...
  643. */
  644. _GLOBAL(__main)
  645. blr
  646. #ifdef CONFIG_KEXEC
  647. /*
  648. * Must be relocatable PIC code callable as a C function.
  649. */
  650. .globl relocate_new_kernel
  651. relocate_new_kernel:
  652. /* r3 = page_list */
  653. /* r4 = reboot_code_buffer */
  654. /* r5 = start_address */
  655. #ifdef CONFIG_FSL_BOOKE
  656. mr r29, r3
  657. mr r30, r4
  658. mr r31, r5
  659. #define ENTRY_MAPPING_KEXEC_SETUP
  660. #include "fsl_booke_entry_mapping.S"
  661. #undef ENTRY_MAPPING_KEXEC_SETUP
  662. mr r3, r29
  663. mr r4, r30
  664. mr r5, r31
  665. li r0, 0
  666. #elif defined(CONFIG_44x)
  667. /* Save our parameters */
  668. mr r29, r3
  669. mr r30, r4
  670. mr r31, r5
  671. #ifdef CONFIG_PPC_47x
  672. /* Check for 47x cores */
  673. mfspr r3,SPRN_PVR
  674. srwi r3,r3,16
  675. cmplwi cr0,r3,PVR_476@h
  676. beq setup_map_47x
  677. cmplwi cr0,r3,PVR_476_ISS@h
  678. beq setup_map_47x
  679. #endif /* CONFIG_PPC_47x */
  680. /*
  681. * Code for setting up 1:1 mapping for PPC440x for KEXEC
  682. *
  683. * We cannot switch off the MMU on PPC44x.
  684. * So we:
  685. * 1) Invalidate all the mappings except the one we are running from.
  686. * 2) Create a tmp mapping for our code in the other address space(TS) and
  687. * jump to it. Invalidate the entry we started in.
  688. * 3) Create a 1:1 mapping for 0-2GiB in chunks of 256M in original TS.
  689. * 4) Jump to the 1:1 mapping in original TS.
  690. * 5) Invalidate the tmp mapping.
  691. *
  692. * - Based on the kexec support code for FSL BookE
  693. *
  694. */
  695. /*
  696. * Load the PID with kernel PID (0).
  697. * Also load our MSR_IS and TID to MMUCR for TLB search.
  698. */
  699. li r3, 0
  700. mtspr SPRN_PID, r3
  701. mfmsr r4
  702. andi. r4,r4,MSR_IS@l
  703. beq wmmucr
  704. oris r3,r3,PPC44x_MMUCR_STS@h
  705. wmmucr:
  706. mtspr SPRN_MMUCR,r3
  707. sync
  708. /*
  709. * Invalidate all the TLB entries except the current entry
  710. * where we are running from
  711. */
  712. bl 0f /* Find our address */
  713. 0: mflr r5 /* Make it accessible */
  714. tlbsx r23,0,r5 /* Find entry we are in */
  715. li r4,0 /* Start at TLB entry 0 */
  716. li r3,0 /* Set PAGEID inval value */
  717. 1: cmpw r23,r4 /* Is this our entry? */
  718. beq skip /* If so, skip the inval */
  719. tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */
  720. skip:
  721. addi r4,r4,1 /* Increment */
  722. cmpwi r4,64 /* Are we done? */
  723. bne 1b /* If not, repeat */
  724. isync
  725. /* Create a temp mapping and jump to it */
  726. andi. r6, r23, 1 /* Find the index to use */
  727. addi r24, r6, 1 /* r24 will contain 1 or 2 */
  728. mfmsr r9 /* get the MSR */
  729. rlwinm r5, r9, 27, 31, 31 /* Extract the MSR[IS] */
  730. xori r7, r5, 1 /* Use the other address space */
  731. /* Read the current mapping entries */
  732. tlbre r3, r23, PPC44x_TLB_PAGEID
  733. tlbre r4, r23, PPC44x_TLB_XLAT
  734. tlbre r5, r23, PPC44x_TLB_ATTRIB
  735. /* Save our current XLAT entry */
  736. mr r25, r4
  737. /* Extract the TLB PageSize */
  738. li r10, 1 /* r10 will hold PageSize */
  739. rlwinm r11, r3, 0, 24, 27 /* bits 24-27 */
  740. /* XXX: As of now we use 256M, 4K pages */
  741. cmpwi r11, PPC44x_TLB_256M
  742. bne tlb_4k
  743. rotlwi r10, r10, 28 /* r10 = 256M */
  744. b write_out
  745. tlb_4k:
  746. cmpwi r11, PPC44x_TLB_4K
  747. bne default
  748. rotlwi r10, r10, 12 /* r10 = 4K */
  749. b write_out
  750. default:
  751. rotlwi r10, r10, 10 /* r10 = 1K */
  752. write_out:
  753. /*
  754. * Write out the tmp 1:1 mapping for this code in other address space
  755. * Fixup EPN = RPN , TS=other address space
  756. */
  757. insrwi r3, r7, 1, 23 /* Bit 23 is TS for PAGEID field */
  758. /* Write out the tmp mapping entries */
  759. tlbwe r3, r24, PPC44x_TLB_PAGEID
  760. tlbwe r4, r24, PPC44x_TLB_XLAT
  761. tlbwe r5, r24, PPC44x_TLB_ATTRIB
  762. subi r11, r10, 1 /* PageOffset Mask = PageSize - 1 */
  763. not r10, r11 /* Mask for PageNum */
  764. /* Switch to other address space in MSR */
  765. insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
  766. bl 1f
  767. 1: mflr r8
  768. addi r8, r8, (2f-1b) /* Find the target offset */
  769. /* Jump to the tmp mapping */
  770. mtspr SPRN_SRR0, r8
  771. mtspr SPRN_SRR1, r9
  772. rfi
  773. 2:
  774. /* Invalidate the entry we were executing from */
  775. li r3, 0
  776. tlbwe r3, r23, PPC44x_TLB_PAGEID
  777. /* attribute fields. rwx for SUPERVISOR mode */
  778. li r5, 0
  779. ori r5, r5, (PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G)
  780. /* Create 1:1 mapping in 256M pages */
  781. xori r7, r7, 1 /* Revert back to Original TS */
  782. li r8, 0 /* PageNumber */
  783. li r6, 3 /* TLB Index, start at 3 */
  784. next_tlb:
  785. rotlwi r3, r8, 28 /* Create EPN (bits 0-3) */
  786. mr r4, r3 /* RPN = EPN */
  787. ori r3, r3, (PPC44x_TLB_VALID | PPC44x_TLB_256M) /* SIZE = 256M, Valid */
  788. insrwi r3, r7, 1, 23 /* Set TS from r7 */
  789. tlbwe r3, r6, PPC44x_TLB_PAGEID /* PageID field : EPN, V, SIZE */
  790. tlbwe r4, r6, PPC44x_TLB_XLAT /* Address translation : RPN */
  791. tlbwe r5, r6, PPC44x_TLB_ATTRIB /* Attributes */
  792. addi r8, r8, 1 /* Increment PN */
  793. addi r6, r6, 1 /* Increment TLB Index */
  794. cmpwi r8, 8 /* Are we done ? */
  795. bne next_tlb
  796. isync
  797. /* Jump to the new mapping 1:1 */
  798. li r9,0
  799. insrwi r9, r7, 1, 26 /* Set MSR[IS] = r7 */
  800. bl 1f
  801. 1: mflr r8
  802. and r8, r8, r11 /* Get our offset within page */
  803. addi r8, r8, (2f-1b)
  804. and r5, r25, r10 /* Get our target PageNum */
  805. or r8, r8, r5 /* Target jump address */
  806. mtspr SPRN_SRR0, r8
  807. mtspr SPRN_SRR1, r9
  808. rfi
  809. 2:
  810. /* Invalidate the tmp entry we used */
  811. li r3, 0
  812. tlbwe r3, r24, PPC44x_TLB_PAGEID
  813. sync
  814. b ppc44x_map_done
  815. #ifdef CONFIG_PPC_47x
  816. /* 1:1 mapping for 47x */
  817. setup_map_47x:
  818. /*
  819. * Load the kernel pid (0) to PID and also to MMUCR[TID].
  820. * Also set the MSR IS->MMUCR STS
  821. */
  822. li r3, 0
  823. mtspr SPRN_PID, r3 /* Set PID */
  824. mfmsr r4 /* Get MSR */
  825. andi. r4, r4, MSR_IS@l /* TS=1? */
  826. beq 1f /* If not, leave STS=0 */
  827. oris r3, r3, PPC47x_MMUCR_STS@h /* Set STS=1 */
  828. 1: mtspr SPRN_MMUCR, r3 /* Put MMUCR */
  829. sync
  830. /* Find the entry we are running from */
  831. bl 2f
  832. 2: mflr r23
  833. tlbsx r23, 0, r23
  834. tlbre r24, r23, 0 /* TLB Word 0 */
  835. tlbre r25, r23, 1 /* TLB Word 1 */
  836. tlbre r26, r23, 2 /* TLB Word 2 */
  837. /*
  838. * Invalidates all the tlb entries by writing to 256 RPNs(r4)
  839. * of 4k page size in all 4 ways (0-3 in r3).
  840. * This would invalidate the entire UTLB including the one we are
  841. * running from. However the shadow TLB entries would help us
  842. * to continue the execution, until we flush them (rfi/isync).
  843. */
  844. addis r3, 0, 0x8000 /* specify the way */
  845. addi r4, 0, 0 /* TLB Word0 = (EPN=0, VALID = 0) */
  846. addi r5, 0, 0
  847. b clear_utlb_entry
  848. /* Align the loop to speed things up. from head_44x.S */
  849. .align 6
  850. clear_utlb_entry:
  851. tlbwe r4, r3, 0
  852. tlbwe r5, r3, 1
  853. tlbwe r5, r3, 2
  854. addis r3, r3, 0x2000 /* Increment the way */
  855. cmpwi r3, 0
  856. bne clear_utlb_entry
  857. addis r3, 0, 0x8000
  858. addis r4, r4, 0x100 /* Increment the EPN */
  859. cmpwi r4, 0
  860. bne clear_utlb_entry
  861. /* Create the entries in the other address space */
  862. mfmsr r5
  863. rlwinm r7, r5, 27, 31, 31 /* Get the TS (Bit 26) from MSR */
  864. xori r7, r7, 1 /* r7 = !TS */
  865. insrwi r24, r7, 1, 21 /* Change the TS in the saved TLB word 0 */
  866. /*
  867. * write out the TLB entries for the tmp mapping
  868. * Use way '0' so that we could easily invalidate it later.
  869. */
  870. lis r3, 0x8000 /* Way '0' */
  871. tlbwe r24, r3, 0
  872. tlbwe r25, r3, 1
  873. tlbwe r26, r3, 2
  874. /* Update the msr to the new TS */
  875. insrwi r5, r7, 1, 26
  876. bl 1f
  877. 1: mflr r6
  878. addi r6, r6, (2f-1b)
  879. mtspr SPRN_SRR0, r6
  880. mtspr SPRN_SRR1, r5
  881. rfi
  882. /*
  883. * Now we are in the tmp address space.
  884. * Create a 1:1 mapping for 0-2GiB in the original TS.
  885. */
  886. 2:
  887. li r3, 0
  888. li r4, 0 /* TLB Word 0 */
  889. li r5, 0 /* TLB Word 1 */
  890. li r6, 0
  891. ori r6, r6, PPC47x_TLB2_S_RWX /* TLB word 2 */
  892. li r8, 0 /* PageIndex */
  893. xori r7, r7, 1 /* revert back to original TS */
  894. write_utlb:
  895. rotlwi r5, r8, 28 /* RPN = PageIndex * 256M */
  896. /* ERPN = 0 as we don't use memory above 2G */
  897. mr r4, r5 /* EPN = RPN */
  898. ori r4, r4, (PPC47x_TLB0_VALID | PPC47x_TLB0_256M)
  899. insrwi r4, r7, 1, 21 /* Insert the TS to Word 0 */
  900. tlbwe r4, r3, 0 /* Write out the entries */
  901. tlbwe r5, r3, 1
  902. tlbwe r6, r3, 2
  903. addi r8, r8, 1
  904. cmpwi r8, 8 /* Have we completed ? */
  905. bne write_utlb
  906. /* make sure we complete the TLB write up */
  907. isync
  908. /*
  909. * Prepare to jump to the 1:1 mapping.
  910. * 1) Extract page size of the tmp mapping
  911. * DSIZ = TLB_Word0[22:27]
  912. * 2) Calculate the physical address of the address
  913. * to jump to.
  914. */
  915. rlwinm r10, r24, 0, 22, 27
  916. cmpwi r10, PPC47x_TLB0_4K
  917. bne 0f
  918. li r10, 0x1000 /* r10 = 4k */
  919. bl 1f
  920. 0:
  921. /* Defaults to 256M */
  922. lis r10, 0x1000
  923. bl 1f
  924. 1: mflr r4
  925. addi r4, r4, (2f-1b) /* virtual address of 2f */
  926. subi r11, r10, 1 /* offsetmask = Pagesize - 1 */
  927. not r10, r11 /* Pagemask = ~(offsetmask) */
  928. and r5, r25, r10 /* Physical page */
  929. and r6, r4, r11 /* offset within the current page */
  930. or r5, r5, r6 /* Physical address for 2f */
  931. /* Switch the TS in MSR to the original one */
  932. mfmsr r8
  933. insrwi r8, r7, 1, 26
  934. mtspr SPRN_SRR1, r8
  935. mtspr SPRN_SRR0, r5
  936. rfi
  937. 2:
  938. /* Invalidate the tmp mapping */
  939. lis r3, 0x8000 /* Way '0' */
  940. clrrwi r24, r24, 12 /* Clear the valid bit */
  941. tlbwe r24, r3, 0
  942. tlbwe r25, r3, 1
  943. tlbwe r26, r3, 2
  944. /* Make sure we complete the TLB write and flush the shadow TLB */
  945. isync
  946. #endif
  947. ppc44x_map_done:
  948. /* Restore the parameters */
  949. mr r3, r29
  950. mr r4, r30
  951. mr r5, r31
  952. li r0, 0
  953. #else
  954. li r0, 0
  955. /*
  956. * Set Machine Status Register to a known status,
  957. * switch the MMU off and jump to 1: in a single step.
  958. */
  959. mr r8, r0
  960. ori r8, r8, MSR_RI|MSR_ME
  961. mtspr SPRN_SRR1, r8
  962. addi r8, r4, 1f - relocate_new_kernel
  963. mtspr SPRN_SRR0, r8
  964. sync
  965. rfi
  966. 1:
  967. #endif
  968. /* from this point address translation is turned off */
  969. /* and interrupts are disabled */
  970. /* set a new stack at the bottom of our page... */
  971. /* (not really needed now) */
  972. addi r1, r4, KEXEC_CONTROL_PAGE_SIZE - 8 /* for LR Save+Back Chain */
  973. stw r0, 0(r1)
  974. /* Do the copies */
  975. li r6, 0 /* checksum */
  976. mr r0, r3
  977. b 1f
  978. 0: /* top, read another word for the indirection page */
  979. lwzu r0, 4(r3)
  980. 1:
  981. /* is it a destination page? (r8) */
  982. rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */
  983. beq 2f
  984. rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */
  985. b 0b
  986. 2: /* is it an indirection page? (r3) */
  987. rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */
  988. beq 2f
  989. rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */
  990. subi r3, r3, 4
  991. b 0b
  992. 2: /* are we done? */
  993. rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */
  994. beq 2f
  995. b 3f
  996. 2: /* is it a source page? (r9) */
  997. rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */
  998. beq 0b
  999. rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */
  1000. li r7, PAGE_SIZE / 4
  1001. mtctr r7
  1002. subi r9, r9, 4
  1003. subi r8, r8, 4
  1004. 9:
  1005. lwzu r0, 4(r9) /* do the copy */
  1006. xor r6, r6, r0
  1007. stwu r0, 4(r8)
  1008. dcbst 0, r8
  1009. sync
  1010. icbi 0, r8
  1011. bdnz 9b
  1012. addi r9, r9, 4
  1013. addi r8, r8, 4
  1014. b 0b
  1015. 3:
  1016. /* To be certain of avoiding problems with self-modifying code
  1017. * execute a serializing instruction here.
  1018. */
  1019. isync
  1020. sync
  1021. mfspr r3, SPRN_PIR /* current core we are running on */
  1022. mr r4, r5 /* load physical address of chunk called */
  1023. /* jump to the entry point, usually the setup routine */
  1024. mtlr r5
  1025. blrl
  1026. 1: b 1b
  1027. relocate_new_kernel_end:
  1028. .globl relocate_new_kernel_size
  1029. relocate_new_kernel_size:
  1030. .long relocate_new_kernel_end - relocate_new_kernel
  1031. #endif