cache-sh4.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /*
  2. * arch/sh/mm/cache-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2001 - 2007 Paul Mundt
  6. * Copyright (C) 2003 Richard Curnow
  7. * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/io.h>
  16. #include <linux/mutex.h>
  17. #include <linux/fs.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/cacheflush.h>
  20. /*
  21. * The maximum number of pages we support up to when doing ranged dcache
  22. * flushing. Anything exceeding this will simply flush the dcache in its
  23. * entirety.
  24. */
  25. #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
  26. #define MAX_ICACHE_PAGES 32
  27. static void __flush_dcache_segment_1way(unsigned long start,
  28. unsigned long extent);
  29. static void __flush_dcache_segment_2way(unsigned long start,
  30. unsigned long extent);
  31. static void __flush_dcache_segment_4way(unsigned long start,
  32. unsigned long extent);
  33. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  34. unsigned long exec_offset);
  35. /*
  36. * This is initialised here to ensure that it is not placed in the BSS. If
  37. * that were to happen, note that cache_init gets called before the BSS is
  38. * cleared, so this would get nulled out which would be hopeless.
  39. */
  40. static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
  41. (void (*)(unsigned long, unsigned long))0xdeadbeef;
  42. static void compute_alias(struct cache_info *c)
  43. {
  44. c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
  45. c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
  46. }
  47. static void __init emit_cache_params(void)
  48. {
  49. printk("PVR=%08x CVR=%08x PRR=%08x\n",
  50. ctrl_inl(CCN_PVR),
  51. ctrl_inl(CCN_CVR),
  52. ctrl_inl(CCN_PRR));
  53. printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  54. boot_cpu_data.icache.ways,
  55. boot_cpu_data.icache.sets,
  56. boot_cpu_data.icache.way_incr);
  57. printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  58. boot_cpu_data.icache.entry_mask,
  59. boot_cpu_data.icache.alias_mask,
  60. boot_cpu_data.icache.n_aliases);
  61. printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  62. boot_cpu_data.dcache.ways,
  63. boot_cpu_data.dcache.sets,
  64. boot_cpu_data.dcache.way_incr);
  65. printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  66. boot_cpu_data.dcache.entry_mask,
  67. boot_cpu_data.dcache.alias_mask,
  68. boot_cpu_data.dcache.n_aliases);
  69. /*
  70. * Emit Secondary Cache parameters if the CPU has a probed L2.
  71. */
  72. if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
  73. printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  74. boot_cpu_data.scache.ways,
  75. boot_cpu_data.scache.sets,
  76. boot_cpu_data.scache.way_incr);
  77. printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  78. boot_cpu_data.scache.entry_mask,
  79. boot_cpu_data.scache.alias_mask,
  80. boot_cpu_data.scache.n_aliases);
  81. }
  82. if (!__flush_dcache_segment_fn)
  83. panic("unknown number of cache ways\n");
  84. }
  85. /*
  86. * SH-4 has virtually indexed and physically tagged cache.
  87. */
  88. void __init sh4_cache_init(void)
  89. {
  90. compute_alias(&boot_cpu_data.icache);
  91. compute_alias(&boot_cpu_data.dcache);
  92. compute_alias(&boot_cpu_data.scache);
  93. switch (boot_cpu_data.dcache.ways) {
  94. case 1:
  95. __flush_dcache_segment_fn = __flush_dcache_segment_1way;
  96. break;
  97. case 2:
  98. __flush_dcache_segment_fn = __flush_dcache_segment_2way;
  99. break;
  100. case 4:
  101. __flush_dcache_segment_fn = __flush_dcache_segment_4way;
  102. break;
  103. default:
  104. __flush_dcache_segment_fn = NULL;
  105. break;
  106. }
  107. emit_cache_params();
  108. }
  109. /*
  110. * Write back the range of D-cache, and purge the I-cache.
  111. *
  112. * Called from kernel/module.c:sys_init_module and routine for a.out format,
  113. * signal handler code and kprobes code
  114. */
  115. void flush_icache_range(unsigned long start, unsigned long end)
  116. {
  117. int icacheaddr;
  118. unsigned long flags, v;
  119. int i;
  120. /* If there are too many pages then just blow the caches */
  121. if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  122. flush_cache_all();
  123. } else {
  124. /* selectively flush d-cache then invalidate the i-cache */
  125. /* this is inefficient, so only use for small ranges */
  126. start &= ~(L1_CACHE_BYTES-1);
  127. end += L1_CACHE_BYTES-1;
  128. end &= ~(L1_CACHE_BYTES-1);
  129. local_irq_save(flags);
  130. jump_to_uncached();
  131. for (v = start; v < end; v+=L1_CACHE_BYTES) {
  132. asm volatile("ocbwb %0"
  133. : /* no output */
  134. : "m" (__m(v)));
  135. icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
  136. v & cpu_data->icache.entry_mask);
  137. for (i = 0; i < cpu_data->icache.ways;
  138. i++, icacheaddr += cpu_data->icache.way_incr)
  139. /* Clear i-cache line valid-bit */
  140. ctrl_outl(0, icacheaddr);
  141. }
  142. back_to_cached();
  143. local_irq_restore(flags);
  144. }
  145. }
  146. static inline void flush_cache_4096(unsigned long start,
  147. unsigned long phys)
  148. {
  149. unsigned long flags, exec_offset = 0;
  150. /*
  151. * All types of SH-4 require PC to be in P2 to operate on the I-cache.
  152. * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
  153. */
  154. if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
  155. (start < CACHE_OC_ADDRESS_ARRAY))
  156. exec_offset = 0x20000000;
  157. local_irq_save(flags);
  158. __flush_cache_4096(start | SH_CACHE_ASSOC,
  159. P1SEGADDR(phys), exec_offset);
  160. local_irq_restore(flags);
  161. }
  162. /*
  163. * Write back & invalidate the D-cache of the page.
  164. * (To avoid "alias" issues)
  165. */
  166. void flush_dcache_page(struct page *page)
  167. {
  168. struct address_space *mapping = page_mapping(page);
  169. #ifndef CONFIG_SMP
  170. if (mapping && !mapping_mapped(mapping))
  171. set_bit(PG_dcache_dirty, &page->flags);
  172. else
  173. #endif
  174. {
  175. unsigned long phys = PHYSADDR(page_address(page));
  176. unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
  177. int i, n;
  178. /* Loop all the D-cache */
  179. n = boot_cpu_data.dcache.n_aliases;
  180. for (i = 0; i < n; i++, addr += 4096)
  181. flush_cache_4096(addr, phys);
  182. }
  183. wmb();
  184. }
  185. /* TODO: Selective icache invalidation through IC address array.. */
  186. static void __uses_jump_to_uncached flush_icache_all(void)
  187. {
  188. unsigned long flags, ccr;
  189. local_irq_save(flags);
  190. jump_to_uncached();
  191. /* Flush I-cache */
  192. ccr = ctrl_inl(CCR);
  193. ccr |= CCR_CACHE_ICI;
  194. ctrl_outl(ccr, CCR);
  195. /*
  196. * back_to_cached() will take care of the barrier for us, don't add
  197. * another one!
  198. */
  199. back_to_cached();
  200. local_irq_restore(flags);
  201. }
  202. void flush_dcache_all(void)
  203. {
  204. (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
  205. wmb();
  206. }
  207. void flush_cache_all(void)
  208. {
  209. flush_dcache_all();
  210. flush_icache_all();
  211. }
  212. static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
  213. unsigned long end)
  214. {
  215. unsigned long d = 0, p = start & PAGE_MASK;
  216. unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
  217. unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
  218. unsigned long select_bit;
  219. unsigned long all_aliases_mask;
  220. unsigned long addr_offset;
  221. pgd_t *dir;
  222. pmd_t *pmd;
  223. pud_t *pud;
  224. pte_t *pte;
  225. int i;
  226. dir = pgd_offset(mm, p);
  227. pud = pud_offset(dir, p);
  228. pmd = pmd_offset(pud, p);
  229. end = PAGE_ALIGN(end);
  230. all_aliases_mask = (1 << n_aliases) - 1;
  231. do {
  232. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
  233. p &= PMD_MASK;
  234. p += PMD_SIZE;
  235. pmd++;
  236. continue;
  237. }
  238. pte = pte_offset_kernel(pmd, p);
  239. do {
  240. unsigned long phys;
  241. pte_t entry = *pte;
  242. if (!(pte_val(entry) & _PAGE_PRESENT)) {
  243. pte++;
  244. p += PAGE_SIZE;
  245. continue;
  246. }
  247. phys = pte_val(entry) & PTE_PHYS_MASK;
  248. if ((p ^ phys) & alias_mask) {
  249. d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
  250. d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
  251. if (d == all_aliases_mask)
  252. goto loop_exit;
  253. }
  254. pte++;
  255. p += PAGE_SIZE;
  256. } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
  257. pmd++;
  258. } while (p < end);
  259. loop_exit:
  260. addr_offset = 0;
  261. select_bit = 1;
  262. for (i = 0; i < n_aliases; i++) {
  263. if (d & select_bit) {
  264. (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
  265. wmb();
  266. }
  267. select_bit <<= 1;
  268. addr_offset += PAGE_SIZE;
  269. }
  270. }
  271. /*
  272. * Note : (RPC) since the caches are physically tagged, the only point
  273. * of flush_cache_mm for SH-4 is to get rid of aliases from the
  274. * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
  275. * lines can stay resident so long as the virtual address they were
  276. * accessed with (hence cache set) is in accord with the physical
  277. * address (i.e. tag). It's no different here. So I reckon we don't
  278. * need to flush the I-cache, since aliases don't matter for that. We
  279. * should try that.
  280. *
  281. * Caller takes mm->mmap_sem.
  282. */
  283. void flush_cache_mm(struct mm_struct *mm)
  284. {
  285. if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
  286. return;
  287. /*
  288. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  289. * the cache is physically tagged, the data can just be left in there.
  290. */
  291. if (boot_cpu_data.dcache.n_aliases == 0)
  292. return;
  293. /*
  294. * Don't bother groveling around the dcache for the VMA ranges
  295. * if there are too many PTEs to make it worthwhile.
  296. */
  297. if (mm->nr_ptes >= MAX_DCACHE_PAGES)
  298. flush_dcache_all();
  299. else {
  300. struct vm_area_struct *vma;
  301. /*
  302. * In this case there are reasonably sized ranges to flush,
  303. * iterate through the VMA list and take care of any aliases.
  304. */
  305. for (vma = mm->mmap; vma; vma = vma->vm_next)
  306. __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
  307. }
  308. /* Only touch the icache if one of the VMAs has VM_EXEC set. */
  309. if (mm->exec_vm)
  310. flush_icache_all();
  311. }
  312. /*
  313. * Write back and invalidate I/D-caches for the page.
  314. *
  315. * ADDR: Virtual Address (U0 address)
  316. * PFN: Physical page number
  317. */
  318. void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
  319. unsigned long pfn)
  320. {
  321. unsigned long phys = pfn << PAGE_SHIFT;
  322. unsigned int alias_mask;
  323. if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  324. return;
  325. alias_mask = boot_cpu_data.dcache.alias_mask;
  326. /* We only need to flush D-cache when we have alias */
  327. if ((address^phys) & alias_mask) {
  328. /* Loop 4K of the D-cache */
  329. flush_cache_4096(
  330. CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
  331. phys);
  332. /* Loop another 4K of the D-cache */
  333. flush_cache_4096(
  334. CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
  335. phys);
  336. }
  337. alias_mask = boot_cpu_data.icache.alias_mask;
  338. if (vma->vm_flags & VM_EXEC) {
  339. /*
  340. * Evict entries from the portion of the cache from which code
  341. * may have been executed at this address (virtual). There's
  342. * no need to evict from the portion corresponding to the
  343. * physical address as for the D-cache, because we know the
  344. * kernel has never executed the code through its identity
  345. * translation.
  346. */
  347. flush_cache_4096(
  348. CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
  349. phys);
  350. }
  351. }
  352. /*
  353. * Write back and invalidate D-caches.
  354. *
  355. * START, END: Virtual Address (U0 address)
  356. *
  357. * NOTE: We need to flush the _physical_ page entry.
  358. * Flushing the cache lines for U0 only isn't enough.
  359. * We need to flush for P1 too, which may contain aliases.
  360. */
  361. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  362. unsigned long end)
  363. {
  364. if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  365. return;
  366. /*
  367. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  368. * the cache is physically tagged, the data can just be left in there.
  369. */
  370. if (boot_cpu_data.dcache.n_aliases == 0)
  371. return;
  372. /*
  373. * Don't bother with the lookup and alias check if we have a
  374. * wide range to cover, just blow away the dcache in its
  375. * entirety instead. -- PFM.
  376. */
  377. if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
  378. flush_dcache_all();
  379. else
  380. __flush_cache_mm(vma->vm_mm, start, end);
  381. if (vma->vm_flags & VM_EXEC) {
  382. /*
  383. * TODO: Is this required??? Need to look at how I-cache
  384. * coherency is assured when new programs are loaded to see if
  385. * this matters.
  386. */
  387. flush_icache_all();
  388. }
  389. }
  390. /*
  391. * flush_icache_user_range
  392. * @vma: VMA of the process
  393. * @page: page
  394. * @addr: U0 address
  395. * @len: length of the range (< page size)
  396. */
  397. void flush_icache_user_range(struct vm_area_struct *vma,
  398. struct page *page, unsigned long addr, int len)
  399. {
  400. flush_cache_page(vma, addr, page_to_pfn(page));
  401. mb();
  402. }
  403. /**
  404. * __flush_cache_4096
  405. *
  406. * @addr: address in memory mapped cache array
  407. * @phys: P1 address to flush (has to match tags if addr has 'A' bit
  408. * set i.e. associative write)
  409. * @exec_offset: set to 0x20000000 if flush has to be executed from P2
  410. * region else 0x0
  411. *
  412. * The offset into the cache array implied by 'addr' selects the
  413. * 'colour' of the virtual address range that will be flushed. The
  414. * operation (purge/write-back) is selected by the lower 2 bits of
  415. * 'phys'.
  416. */
  417. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  418. unsigned long exec_offset)
  419. {
  420. int way_count;
  421. unsigned long base_addr = addr;
  422. struct cache_info *dcache;
  423. unsigned long way_incr;
  424. unsigned long a, ea, p;
  425. unsigned long temp_pc;
  426. dcache = &boot_cpu_data.dcache;
  427. /* Write this way for better assembly. */
  428. way_count = dcache->ways;
  429. way_incr = dcache->way_incr;
  430. /*
  431. * Apply exec_offset (i.e. branch to P2 if required.).
  432. *
  433. * FIXME:
  434. *
  435. * If I write "=r" for the (temp_pc), it puts this in r6 hence
  436. * trashing exec_offset before it's been added on - why? Hence
  437. * "=&r" as a 'workaround'
  438. */
  439. asm volatile("mov.l 1f, %0\n\t"
  440. "add %1, %0\n\t"
  441. "jmp @%0\n\t"
  442. "nop\n\t"
  443. ".balign 4\n\t"
  444. "1: .long 2f\n\t"
  445. "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
  446. /*
  447. * We know there will be >=1 iteration, so write as do-while to avoid
  448. * pointless nead-of-loop check for 0 iterations.
  449. */
  450. do {
  451. ea = base_addr + PAGE_SIZE;
  452. a = base_addr;
  453. p = phys;
  454. do {
  455. *(volatile unsigned long *)a = p;
  456. /*
  457. * Next line: intentionally not p+32, saves an add, p
  458. * will do since only the cache tag bits need to
  459. * match.
  460. */
  461. *(volatile unsigned long *)(a+32) = p;
  462. a += 64;
  463. p += 64;
  464. } while (a < ea);
  465. base_addr += way_incr;
  466. } while (--way_count != 0);
  467. }
  468. /*
  469. * Break the 1, 2 and 4 way variants of this out into separate functions to
  470. * avoid nearly all the overhead of having the conditional stuff in the function
  471. * bodies (+ the 1 and 2 way cases avoid saving any registers too).
  472. */
  473. static void __flush_dcache_segment_1way(unsigned long start,
  474. unsigned long extent_per_way)
  475. {
  476. unsigned long orig_sr, sr_with_bl;
  477. unsigned long base_addr;
  478. unsigned long way_incr, linesz, way_size;
  479. struct cache_info *dcache;
  480. register unsigned long a0, a0e;
  481. asm volatile("stc sr, %0" : "=r" (orig_sr));
  482. sr_with_bl = orig_sr | (1<<28);
  483. base_addr = ((unsigned long)&empty_zero_page[0]);
  484. /*
  485. * The previous code aligned base_addr to 16k, i.e. the way_size of all
  486. * existing SH-4 D-caches. Whilst I don't see a need to have this
  487. * aligned to any better than the cache line size (which it will be
  488. * anyway by construction), let's align it to at least the way_size of
  489. * any existing or conceivable SH-4 D-cache. -- RPC
  490. */
  491. base_addr = ((base_addr >> 16) << 16);
  492. base_addr |= start;
  493. dcache = &boot_cpu_data.dcache;
  494. linesz = dcache->linesz;
  495. way_incr = dcache->way_incr;
  496. way_size = dcache->way_size;
  497. a0 = base_addr;
  498. a0e = base_addr + extent_per_way;
  499. do {
  500. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  501. asm volatile("movca.l r0, @%0\n\t"
  502. "ocbi @%0" : : "r" (a0));
  503. a0 += linesz;
  504. asm volatile("movca.l r0, @%0\n\t"
  505. "ocbi @%0" : : "r" (a0));
  506. a0 += linesz;
  507. asm volatile("movca.l r0, @%0\n\t"
  508. "ocbi @%0" : : "r" (a0));
  509. a0 += linesz;
  510. asm volatile("movca.l r0, @%0\n\t"
  511. "ocbi @%0" : : "r" (a0));
  512. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  513. a0 += linesz;
  514. } while (a0 < a0e);
  515. }
  516. static void __flush_dcache_segment_2way(unsigned long start,
  517. unsigned long extent_per_way)
  518. {
  519. unsigned long orig_sr, sr_with_bl;
  520. unsigned long base_addr;
  521. unsigned long way_incr, linesz, way_size;
  522. struct cache_info *dcache;
  523. register unsigned long a0, a1, a0e;
  524. asm volatile("stc sr, %0" : "=r" (orig_sr));
  525. sr_with_bl = orig_sr | (1<<28);
  526. base_addr = ((unsigned long)&empty_zero_page[0]);
  527. /* See comment under 1-way above */
  528. base_addr = ((base_addr >> 16) << 16);
  529. base_addr |= start;
  530. dcache = &boot_cpu_data.dcache;
  531. linesz = dcache->linesz;
  532. way_incr = dcache->way_incr;
  533. way_size = dcache->way_size;
  534. a0 = base_addr;
  535. a1 = a0 + way_incr;
  536. a0e = base_addr + extent_per_way;
  537. do {
  538. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  539. asm volatile("movca.l r0, @%0\n\t"
  540. "movca.l r0, @%1\n\t"
  541. "ocbi @%0\n\t"
  542. "ocbi @%1" : :
  543. "r" (a0), "r" (a1));
  544. a0 += linesz;
  545. a1 += linesz;
  546. asm volatile("movca.l r0, @%0\n\t"
  547. "movca.l r0, @%1\n\t"
  548. "ocbi @%0\n\t"
  549. "ocbi @%1" : :
  550. "r" (a0), "r" (a1));
  551. a0 += linesz;
  552. a1 += linesz;
  553. asm volatile("movca.l r0, @%0\n\t"
  554. "movca.l r0, @%1\n\t"
  555. "ocbi @%0\n\t"
  556. "ocbi @%1" : :
  557. "r" (a0), "r" (a1));
  558. a0 += linesz;
  559. a1 += linesz;
  560. asm volatile("movca.l r0, @%0\n\t"
  561. "movca.l r0, @%1\n\t"
  562. "ocbi @%0\n\t"
  563. "ocbi @%1" : :
  564. "r" (a0), "r" (a1));
  565. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  566. a0 += linesz;
  567. a1 += linesz;
  568. } while (a0 < a0e);
  569. }
  570. static void __flush_dcache_segment_4way(unsigned long start,
  571. unsigned long extent_per_way)
  572. {
  573. unsigned long orig_sr, sr_with_bl;
  574. unsigned long base_addr;
  575. unsigned long way_incr, linesz, way_size;
  576. struct cache_info *dcache;
  577. register unsigned long a0, a1, a2, a3, a0e;
  578. asm volatile("stc sr, %0" : "=r" (orig_sr));
  579. sr_with_bl = orig_sr | (1<<28);
  580. base_addr = ((unsigned long)&empty_zero_page[0]);
  581. /* See comment under 1-way above */
  582. base_addr = ((base_addr >> 16) << 16);
  583. base_addr |= start;
  584. dcache = &boot_cpu_data.dcache;
  585. linesz = dcache->linesz;
  586. way_incr = dcache->way_incr;
  587. way_size = dcache->way_size;
  588. a0 = base_addr;
  589. a1 = a0 + way_incr;
  590. a2 = a1 + way_incr;
  591. a3 = a2 + way_incr;
  592. a0e = base_addr + extent_per_way;
  593. do {
  594. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  595. asm volatile("movca.l r0, @%0\n\t"
  596. "movca.l r0, @%1\n\t"
  597. "movca.l r0, @%2\n\t"
  598. "movca.l r0, @%3\n\t"
  599. "ocbi @%0\n\t"
  600. "ocbi @%1\n\t"
  601. "ocbi @%2\n\t"
  602. "ocbi @%3\n\t" : :
  603. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  604. a0 += linesz;
  605. a1 += linesz;
  606. a2 += linesz;
  607. a3 += linesz;
  608. asm volatile("movca.l r0, @%0\n\t"
  609. "movca.l r0, @%1\n\t"
  610. "movca.l r0, @%2\n\t"
  611. "movca.l r0, @%3\n\t"
  612. "ocbi @%0\n\t"
  613. "ocbi @%1\n\t"
  614. "ocbi @%2\n\t"
  615. "ocbi @%3\n\t" : :
  616. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  617. a0 += linesz;
  618. a1 += linesz;
  619. a2 += linesz;
  620. a3 += linesz;
  621. asm volatile("movca.l r0, @%0\n\t"
  622. "movca.l r0, @%1\n\t"
  623. "movca.l r0, @%2\n\t"
  624. "movca.l r0, @%3\n\t"
  625. "ocbi @%0\n\t"
  626. "ocbi @%1\n\t"
  627. "ocbi @%2\n\t"
  628. "ocbi @%3\n\t" : :
  629. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  630. a0 += linesz;
  631. a1 += linesz;
  632. a2 += linesz;
  633. a3 += linesz;
  634. asm volatile("movca.l r0, @%0\n\t"
  635. "movca.l r0, @%1\n\t"
  636. "movca.l r0, @%2\n\t"
  637. "movca.l r0, @%3\n\t"
  638. "ocbi @%0\n\t"
  639. "ocbi @%1\n\t"
  640. "ocbi @%2\n\t"
  641. "ocbi @%3\n\t" : :
  642. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  643. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  644. a0 += linesz;
  645. a1 += linesz;
  646. a2 += linesz;
  647. a3 += linesz;
  648. } while (a0 < a0e);
  649. }