cache-sh4.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775
  1. /*
  2. * arch/sh/mm/cache-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2001 - 2007 Paul Mundt
  6. * Copyright (C) 2003 Richard Curnow
  7. * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/io.h>
  16. #include <linux/mutex.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/cacheflush.h>
  19. /*
  20. * The maximum number of pages we support up to when doing ranged dcache
  21. * flushing. Anything exceeding this will simply flush the dcache in its
  22. * entirety.
  23. */
  24. #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
  25. #define MAX_ICACHE_PAGES 32
  26. static void __flush_dcache_segment_1way(unsigned long start,
  27. unsigned long extent);
  28. static void __flush_dcache_segment_2way(unsigned long start,
  29. unsigned long extent);
  30. static void __flush_dcache_segment_4way(unsigned long start,
  31. unsigned long extent);
  32. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  33. unsigned long exec_offset);
  34. /*
  35. * This is initialised here to ensure that it is not placed in the BSS. If
  36. * that were to happen, note that cache_init gets called before the BSS is
  37. * cleared, so this would get nulled out which would be hopeless.
  38. */
  39. static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
  40. (void (*)(unsigned long, unsigned long))0xdeadbeef;
  41. static void compute_alias(struct cache_info *c)
  42. {
  43. c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
  44. c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
  45. }
  46. static void __init emit_cache_params(void)
  47. {
  48. printk("PVR=%08x CVR=%08x PRR=%08x\n",
  49. ctrl_inl(CCN_PVR),
  50. ctrl_inl(CCN_CVR),
  51. ctrl_inl(CCN_PRR));
  52. printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  53. boot_cpu_data.icache.ways,
  54. boot_cpu_data.icache.sets,
  55. boot_cpu_data.icache.way_incr);
  56. printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  57. boot_cpu_data.icache.entry_mask,
  58. boot_cpu_data.icache.alias_mask,
  59. boot_cpu_data.icache.n_aliases);
  60. printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  61. boot_cpu_data.dcache.ways,
  62. boot_cpu_data.dcache.sets,
  63. boot_cpu_data.dcache.way_incr);
  64. printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  65. boot_cpu_data.dcache.entry_mask,
  66. boot_cpu_data.dcache.alias_mask,
  67. boot_cpu_data.dcache.n_aliases);
  68. /*
  69. * Emit Secondary Cache parameters if the CPU has a probed L2.
  70. */
  71. if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
  72. printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  73. boot_cpu_data.scache.ways,
  74. boot_cpu_data.scache.sets,
  75. boot_cpu_data.scache.way_incr);
  76. printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  77. boot_cpu_data.scache.entry_mask,
  78. boot_cpu_data.scache.alias_mask,
  79. boot_cpu_data.scache.n_aliases);
  80. }
  81. if (!__flush_dcache_segment_fn)
  82. panic("unknown number of cache ways\n");
  83. }
  84. /*
  85. * SH-4 has virtually indexed and physically tagged cache.
  86. */
  87. void __init p3_cache_init(void)
  88. {
  89. compute_alias(&boot_cpu_data.icache);
  90. compute_alias(&boot_cpu_data.dcache);
  91. compute_alias(&boot_cpu_data.scache);
  92. switch (boot_cpu_data.dcache.ways) {
  93. case 1:
  94. __flush_dcache_segment_fn = __flush_dcache_segment_1way;
  95. break;
  96. case 2:
  97. __flush_dcache_segment_fn = __flush_dcache_segment_2way;
  98. break;
  99. case 4:
  100. __flush_dcache_segment_fn = __flush_dcache_segment_4way;
  101. break;
  102. default:
  103. __flush_dcache_segment_fn = NULL;
  104. break;
  105. }
  106. emit_cache_params();
  107. }
  108. /*
  109. * Write back the dirty D-caches, but not invalidate them.
  110. *
  111. * START: Virtual Address (U0, P1, or P3)
  112. * SIZE: Size of the region.
  113. */
  114. void __flush_wback_region(void *start, int size)
  115. {
  116. unsigned long v;
  117. unsigned long begin, end;
  118. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  119. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  120. & ~(L1_CACHE_BYTES-1);
  121. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  122. asm volatile("ocbwb %0"
  123. : /* no output */
  124. : "m" (__m(v)));
  125. }
  126. }
  127. /*
  128. * Write back the dirty D-caches and invalidate them.
  129. *
  130. * START: Virtual Address (U0, P1, or P3)
  131. * SIZE: Size of the region.
  132. */
  133. void __flush_purge_region(void *start, int size)
  134. {
  135. unsigned long v;
  136. unsigned long begin, end;
  137. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  138. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  139. & ~(L1_CACHE_BYTES-1);
  140. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  141. asm volatile("ocbp %0"
  142. : /* no output */
  143. : "m" (__m(v)));
  144. }
  145. }
  146. /*
  147. * No write back please
  148. */
  149. void __flush_invalidate_region(void *start, int size)
  150. {
  151. unsigned long v;
  152. unsigned long begin, end;
  153. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  154. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  155. & ~(L1_CACHE_BYTES-1);
  156. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  157. asm volatile("ocbi %0"
  158. : /* no output */
  159. : "m" (__m(v)));
  160. }
  161. }
  162. /*
  163. * Write back the range of D-cache, and purge the I-cache.
  164. *
  165. * Called from kernel/module.c:sys_init_module and routine for a.out format,
  166. * signal handler code and kprobes code
  167. */
  168. void flush_icache_range(unsigned long start, unsigned long end)
  169. {
  170. int icacheaddr;
  171. unsigned long flags, v;
  172. int i;
  173. /* If there are too many pages then just blow the caches */
  174. if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  175. flush_cache_all();
  176. } else {
  177. /* selectively flush d-cache then invalidate the i-cache */
  178. /* this is inefficient, so only use for small ranges */
  179. start &= ~(L1_CACHE_BYTES-1);
  180. end += L1_CACHE_BYTES-1;
  181. end &= ~(L1_CACHE_BYTES-1);
  182. local_irq_save(flags);
  183. jump_to_uncached();
  184. for (v = start; v < end; v+=L1_CACHE_BYTES) {
  185. asm volatile("ocbwb %0"
  186. : /* no output */
  187. : "m" (__m(v)));
  188. icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
  189. v & cpu_data->icache.entry_mask);
  190. for (i = 0; i < cpu_data->icache.ways;
  191. i++, icacheaddr += cpu_data->icache.way_incr)
  192. /* Clear i-cache line valid-bit */
  193. ctrl_outl(0, icacheaddr);
  194. }
  195. back_to_cached();
  196. local_irq_restore(flags);
  197. }
  198. }
  199. static inline void flush_cache_4096(unsigned long start,
  200. unsigned long phys)
  201. {
  202. unsigned long flags, exec_offset = 0;
  203. /*
  204. * All types of SH-4 require PC to be in P2 to operate on the I-cache.
  205. * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
  206. */
  207. if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
  208. (start < CACHE_OC_ADDRESS_ARRAY))
  209. exec_offset = 0x20000000;
  210. local_irq_save(flags);
  211. __flush_cache_4096(start | SH_CACHE_ASSOC,
  212. P1SEGADDR(phys), exec_offset);
  213. local_irq_restore(flags);
  214. }
  215. /*
  216. * Write back & invalidate the D-cache of the page.
  217. * (To avoid "alias" issues)
  218. */
  219. void flush_dcache_page(struct page *page)
  220. {
  221. if (test_bit(PG_mapped, &page->flags)) {
  222. unsigned long phys = PHYSADDR(page_address(page));
  223. unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
  224. int i, n;
  225. /* Loop all the D-cache */
  226. n = boot_cpu_data.dcache.n_aliases;
  227. for (i = 0; i < n; i++, addr += 4096)
  228. flush_cache_4096(addr, phys);
  229. }
  230. wmb();
  231. }
  232. /* TODO: Selective icache invalidation through IC address array.. */
  233. static void __uses_jump_to_uncached flush_icache_all(void)
  234. {
  235. unsigned long flags, ccr;
  236. local_irq_save(flags);
  237. jump_to_uncached();
  238. /* Flush I-cache */
  239. ccr = ctrl_inl(CCR);
  240. ccr |= CCR_CACHE_ICI;
  241. ctrl_outl(ccr, CCR);
  242. /*
  243. * back_to_cached() will take care of the barrier for us, don't add
  244. * another one!
  245. */
  246. back_to_cached();
  247. local_irq_restore(flags);
  248. }
  249. void flush_dcache_all(void)
  250. {
  251. (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
  252. wmb();
  253. }
  254. void flush_cache_all(void)
  255. {
  256. flush_dcache_all();
  257. flush_icache_all();
  258. }
  259. static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
  260. unsigned long end)
  261. {
  262. unsigned long d = 0, p = start & PAGE_MASK;
  263. unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
  264. unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
  265. unsigned long select_bit;
  266. unsigned long all_aliases_mask;
  267. unsigned long addr_offset;
  268. pgd_t *dir;
  269. pmd_t *pmd;
  270. pud_t *pud;
  271. pte_t *pte;
  272. int i;
  273. dir = pgd_offset(mm, p);
  274. pud = pud_offset(dir, p);
  275. pmd = pmd_offset(pud, p);
  276. end = PAGE_ALIGN(end);
  277. all_aliases_mask = (1 << n_aliases) - 1;
  278. do {
  279. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
  280. p &= PMD_MASK;
  281. p += PMD_SIZE;
  282. pmd++;
  283. continue;
  284. }
  285. pte = pte_offset_kernel(pmd, p);
  286. do {
  287. unsigned long phys;
  288. pte_t entry = *pte;
  289. if (!(pte_val(entry) & _PAGE_PRESENT)) {
  290. pte++;
  291. p += PAGE_SIZE;
  292. continue;
  293. }
  294. phys = pte_val(entry) & PTE_PHYS_MASK;
  295. if ((p ^ phys) & alias_mask) {
  296. d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
  297. d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
  298. if (d == all_aliases_mask)
  299. goto loop_exit;
  300. }
  301. pte++;
  302. p += PAGE_SIZE;
  303. } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
  304. pmd++;
  305. } while (p < end);
  306. loop_exit:
  307. addr_offset = 0;
  308. select_bit = 1;
  309. for (i = 0; i < n_aliases; i++) {
  310. if (d & select_bit) {
  311. (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
  312. wmb();
  313. }
  314. select_bit <<= 1;
  315. addr_offset += PAGE_SIZE;
  316. }
  317. }
  318. /*
  319. * Note : (RPC) since the caches are physically tagged, the only point
  320. * of flush_cache_mm for SH-4 is to get rid of aliases from the
  321. * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
  322. * lines can stay resident so long as the virtual address they were
  323. * accessed with (hence cache set) is in accord with the physical
  324. * address (i.e. tag). It's no different here. So I reckon we don't
  325. * need to flush the I-cache, since aliases don't matter for that. We
  326. * should try that.
  327. *
  328. * Caller takes mm->mmap_sem.
  329. */
  330. void flush_cache_mm(struct mm_struct *mm)
  331. {
  332. /*
  333. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  334. * the cache is physically tagged, the data can just be left in there.
  335. */
  336. if (boot_cpu_data.dcache.n_aliases == 0)
  337. return;
  338. /*
  339. * Don't bother groveling around the dcache for the VMA ranges
  340. * if there are too many PTEs to make it worthwhile.
  341. */
  342. if (mm->nr_ptes >= MAX_DCACHE_PAGES)
  343. flush_dcache_all();
  344. else {
  345. struct vm_area_struct *vma;
  346. /*
  347. * In this case there are reasonably sized ranges to flush,
  348. * iterate through the VMA list and take care of any aliases.
  349. */
  350. for (vma = mm->mmap; vma; vma = vma->vm_next)
  351. __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
  352. }
  353. /* Only touch the icache if one of the VMAs has VM_EXEC set. */
  354. if (mm->exec_vm)
  355. flush_icache_all();
  356. }
  357. /*
  358. * Write back and invalidate I/D-caches for the page.
  359. *
  360. * ADDR: Virtual Address (U0 address)
  361. * PFN: Physical page number
  362. */
  363. void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
  364. unsigned long pfn)
  365. {
  366. unsigned long phys = pfn << PAGE_SHIFT;
  367. unsigned int alias_mask;
  368. alias_mask = boot_cpu_data.dcache.alias_mask;
  369. /* We only need to flush D-cache when we have alias */
  370. if ((address^phys) & alias_mask) {
  371. /* Loop 4K of the D-cache */
  372. flush_cache_4096(
  373. CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
  374. phys);
  375. /* Loop another 4K of the D-cache */
  376. flush_cache_4096(
  377. CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
  378. phys);
  379. }
  380. alias_mask = boot_cpu_data.icache.alias_mask;
  381. if (vma->vm_flags & VM_EXEC) {
  382. /*
  383. * Evict entries from the portion of the cache from which code
  384. * may have been executed at this address (virtual). There's
  385. * no need to evict from the portion corresponding to the
  386. * physical address as for the D-cache, because we know the
  387. * kernel has never executed the code through its identity
  388. * translation.
  389. */
  390. flush_cache_4096(
  391. CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
  392. phys);
  393. }
  394. }
  395. /*
  396. * Write back and invalidate D-caches.
  397. *
  398. * START, END: Virtual Address (U0 address)
  399. *
  400. * NOTE: We need to flush the _physical_ page entry.
  401. * Flushing the cache lines for U0 only isn't enough.
  402. * We need to flush for P1 too, which may contain aliases.
  403. */
  404. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  405. unsigned long end)
  406. {
  407. /*
  408. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  409. * the cache is physically tagged, the data can just be left in there.
  410. */
  411. if (boot_cpu_data.dcache.n_aliases == 0)
  412. return;
  413. /*
  414. * Don't bother with the lookup and alias check if we have a
  415. * wide range to cover, just blow away the dcache in its
  416. * entirety instead. -- PFM.
  417. */
  418. if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
  419. flush_dcache_all();
  420. else
  421. __flush_cache_mm(vma->vm_mm, start, end);
  422. if (vma->vm_flags & VM_EXEC) {
  423. /*
  424. * TODO: Is this required??? Need to look at how I-cache
  425. * coherency is assured when new programs are loaded to see if
  426. * this matters.
  427. */
  428. flush_icache_all();
  429. }
  430. }
  431. /*
  432. * flush_icache_user_range
  433. * @vma: VMA of the process
  434. * @page: page
  435. * @addr: U0 address
  436. * @len: length of the range (< page size)
  437. */
  438. void flush_icache_user_range(struct vm_area_struct *vma,
  439. struct page *page, unsigned long addr, int len)
  440. {
  441. flush_cache_page(vma, addr, page_to_pfn(page));
  442. mb();
  443. }
  444. /**
  445. * __flush_cache_4096
  446. *
  447. * @addr: address in memory mapped cache array
  448. * @phys: P1 address to flush (has to match tags if addr has 'A' bit
  449. * set i.e. associative write)
  450. * @exec_offset: set to 0x20000000 if flush has to be executed from P2
  451. * region else 0x0
  452. *
  453. * The offset into the cache array implied by 'addr' selects the
  454. * 'colour' of the virtual address range that will be flushed. The
  455. * operation (purge/write-back) is selected by the lower 2 bits of
  456. * 'phys'.
  457. */
  458. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  459. unsigned long exec_offset)
  460. {
  461. int way_count;
  462. unsigned long base_addr = addr;
  463. struct cache_info *dcache;
  464. unsigned long way_incr;
  465. unsigned long a, ea, p;
  466. unsigned long temp_pc;
  467. dcache = &boot_cpu_data.dcache;
  468. /* Write this way for better assembly. */
  469. way_count = dcache->ways;
  470. way_incr = dcache->way_incr;
  471. /*
  472. * Apply exec_offset (i.e. branch to P2 if required.).
  473. *
  474. * FIXME:
  475. *
  476. * If I write "=r" for the (temp_pc), it puts this in r6 hence
  477. * trashing exec_offset before it's been added on - why? Hence
  478. * "=&r" as a 'workaround'
  479. */
  480. asm volatile("mov.l 1f, %0\n\t"
  481. "add %1, %0\n\t"
  482. "jmp @%0\n\t"
  483. "nop\n\t"
  484. ".balign 4\n\t"
  485. "1: .long 2f\n\t"
  486. "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
  487. /*
  488. * We know there will be >=1 iteration, so write as do-while to avoid
  489. * pointless nead-of-loop check for 0 iterations.
  490. */
  491. do {
  492. ea = base_addr + PAGE_SIZE;
  493. a = base_addr;
  494. p = phys;
  495. do {
  496. *(volatile unsigned long *)a = p;
  497. /*
  498. * Next line: intentionally not p+32, saves an add, p
  499. * will do since only the cache tag bits need to
  500. * match.
  501. */
  502. *(volatile unsigned long *)(a+32) = p;
  503. a += 64;
  504. p += 64;
  505. } while (a < ea);
  506. base_addr += way_incr;
  507. } while (--way_count != 0);
  508. }
  509. /*
  510. * Break the 1, 2 and 4 way variants of this out into separate functions to
  511. * avoid nearly all the overhead of having the conditional stuff in the function
  512. * bodies (+ the 1 and 2 way cases avoid saving any registers too).
  513. */
  514. static void __flush_dcache_segment_1way(unsigned long start,
  515. unsigned long extent_per_way)
  516. {
  517. unsigned long orig_sr, sr_with_bl;
  518. unsigned long base_addr;
  519. unsigned long way_incr, linesz, way_size;
  520. struct cache_info *dcache;
  521. register unsigned long a0, a0e;
  522. asm volatile("stc sr, %0" : "=r" (orig_sr));
  523. sr_with_bl = orig_sr | (1<<28);
  524. base_addr = ((unsigned long)&empty_zero_page[0]);
  525. /*
  526. * The previous code aligned base_addr to 16k, i.e. the way_size of all
  527. * existing SH-4 D-caches. Whilst I don't see a need to have this
  528. * aligned to any better than the cache line size (which it will be
  529. * anyway by construction), let's align it to at least the way_size of
  530. * any existing or conceivable SH-4 D-cache. -- RPC
  531. */
  532. base_addr = ((base_addr >> 16) << 16);
  533. base_addr |= start;
  534. dcache = &boot_cpu_data.dcache;
  535. linesz = dcache->linesz;
  536. way_incr = dcache->way_incr;
  537. way_size = dcache->way_size;
  538. a0 = base_addr;
  539. a0e = base_addr + extent_per_way;
  540. do {
  541. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  542. asm volatile("movca.l r0, @%0\n\t"
  543. "ocbi @%0" : : "r" (a0));
  544. a0 += linesz;
  545. asm volatile("movca.l r0, @%0\n\t"
  546. "ocbi @%0" : : "r" (a0));
  547. a0 += linesz;
  548. asm volatile("movca.l r0, @%0\n\t"
  549. "ocbi @%0" : : "r" (a0));
  550. a0 += linesz;
  551. asm volatile("movca.l r0, @%0\n\t"
  552. "ocbi @%0" : : "r" (a0));
  553. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  554. a0 += linesz;
  555. } while (a0 < a0e);
  556. }
  557. static void __flush_dcache_segment_2way(unsigned long start,
  558. unsigned long extent_per_way)
  559. {
  560. unsigned long orig_sr, sr_with_bl;
  561. unsigned long base_addr;
  562. unsigned long way_incr, linesz, way_size;
  563. struct cache_info *dcache;
  564. register unsigned long a0, a1, a0e;
  565. asm volatile("stc sr, %0" : "=r" (orig_sr));
  566. sr_with_bl = orig_sr | (1<<28);
  567. base_addr = ((unsigned long)&empty_zero_page[0]);
  568. /* See comment under 1-way above */
  569. base_addr = ((base_addr >> 16) << 16);
  570. base_addr |= start;
  571. dcache = &boot_cpu_data.dcache;
  572. linesz = dcache->linesz;
  573. way_incr = dcache->way_incr;
  574. way_size = dcache->way_size;
  575. a0 = base_addr;
  576. a1 = a0 + way_incr;
  577. a0e = base_addr + extent_per_way;
  578. do {
  579. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  580. asm volatile("movca.l r0, @%0\n\t"
  581. "movca.l r0, @%1\n\t"
  582. "ocbi @%0\n\t"
  583. "ocbi @%1" : :
  584. "r" (a0), "r" (a1));
  585. a0 += linesz;
  586. a1 += linesz;
  587. asm volatile("movca.l r0, @%0\n\t"
  588. "movca.l r0, @%1\n\t"
  589. "ocbi @%0\n\t"
  590. "ocbi @%1" : :
  591. "r" (a0), "r" (a1));
  592. a0 += linesz;
  593. a1 += linesz;
  594. asm volatile("movca.l r0, @%0\n\t"
  595. "movca.l r0, @%1\n\t"
  596. "ocbi @%0\n\t"
  597. "ocbi @%1" : :
  598. "r" (a0), "r" (a1));
  599. a0 += linesz;
  600. a1 += linesz;
  601. asm volatile("movca.l r0, @%0\n\t"
  602. "movca.l r0, @%1\n\t"
  603. "ocbi @%0\n\t"
  604. "ocbi @%1" : :
  605. "r" (a0), "r" (a1));
  606. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  607. a0 += linesz;
  608. a1 += linesz;
  609. } while (a0 < a0e);
  610. }
  611. static void __flush_dcache_segment_4way(unsigned long start,
  612. unsigned long extent_per_way)
  613. {
  614. unsigned long orig_sr, sr_with_bl;
  615. unsigned long base_addr;
  616. unsigned long way_incr, linesz, way_size;
  617. struct cache_info *dcache;
  618. register unsigned long a0, a1, a2, a3, a0e;
  619. asm volatile("stc sr, %0" : "=r" (orig_sr));
  620. sr_with_bl = orig_sr | (1<<28);
  621. base_addr = ((unsigned long)&empty_zero_page[0]);
  622. /* See comment under 1-way above */
  623. base_addr = ((base_addr >> 16) << 16);
  624. base_addr |= start;
  625. dcache = &boot_cpu_data.dcache;
  626. linesz = dcache->linesz;
  627. way_incr = dcache->way_incr;
  628. way_size = dcache->way_size;
  629. a0 = base_addr;
  630. a1 = a0 + way_incr;
  631. a2 = a1 + way_incr;
  632. a3 = a2 + way_incr;
  633. a0e = base_addr + extent_per_way;
  634. do {
  635. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  636. asm volatile("movca.l r0, @%0\n\t"
  637. "movca.l r0, @%1\n\t"
  638. "movca.l r0, @%2\n\t"
  639. "movca.l r0, @%3\n\t"
  640. "ocbi @%0\n\t"
  641. "ocbi @%1\n\t"
  642. "ocbi @%2\n\t"
  643. "ocbi @%3\n\t" : :
  644. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  645. a0 += linesz;
  646. a1 += linesz;
  647. a2 += linesz;
  648. a3 += linesz;
  649. asm volatile("movca.l r0, @%0\n\t"
  650. "movca.l r0, @%1\n\t"
  651. "movca.l r0, @%2\n\t"
  652. "movca.l r0, @%3\n\t"
  653. "ocbi @%0\n\t"
  654. "ocbi @%1\n\t"
  655. "ocbi @%2\n\t"
  656. "ocbi @%3\n\t" : :
  657. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  658. a0 += linesz;
  659. a1 += linesz;
  660. a2 += linesz;
  661. a3 += linesz;
  662. asm volatile("movca.l r0, @%0\n\t"
  663. "movca.l r0, @%1\n\t"
  664. "movca.l r0, @%2\n\t"
  665. "movca.l r0, @%3\n\t"
  666. "ocbi @%0\n\t"
  667. "ocbi @%1\n\t"
  668. "ocbi @%2\n\t"
  669. "ocbi @%3\n\t" : :
  670. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  671. a0 += linesz;
  672. a1 += linesz;
  673. a2 += linesz;
  674. a3 += linesz;
  675. asm volatile("movca.l r0, @%0\n\t"
  676. "movca.l r0, @%1\n\t"
  677. "movca.l r0, @%2\n\t"
  678. "movca.l r0, @%3\n\t"
  679. "ocbi @%0\n\t"
  680. "ocbi @%1\n\t"
  681. "ocbi @%2\n\t"
  682. "ocbi @%3\n\t" : :
  683. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  684. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  685. a0 += linesz;
  686. a1 += linesz;
  687. a2 += linesz;
  688. a3 += linesz;
  689. } while (a0 < a0e);
  690. }