cache-sh4.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744
  1. /*
  2. * arch/sh/mm/cache-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2001 - 2007 Paul Mundt
  6. * Copyright (C) 2003 Richard Curnow
  7. * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/io.h>
  16. #include <linux/mutex.h>
  17. #include <linux/fs.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/cacheflush.h>
  20. /*
  21. * The maximum number of pages we support up to when doing ranged dcache
  22. * flushing. Anything exceeding this will simply flush the dcache in its
  23. * entirety.
  24. */
  25. #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
  26. #define MAX_ICACHE_PAGES 32
  27. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  28. unsigned long exec_offset);
  29. /*
  30. * This is initialised here to ensure that it is not placed in the BSS. If
  31. * that were to happen, note that cache_init gets called before the BSS is
  32. * cleared, so this would get nulled out which would be hopeless.
  33. */
  34. static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
  35. (void (*)(unsigned long, unsigned long))0xdeadbeef;
  36. /*
  37. * Write back the range of D-cache, and purge the I-cache.
  38. *
  39. * Called from kernel/module.c:sys_init_module and routine for a.out format,
  40. * signal handler code and kprobes code
  41. */
  42. static void sh4_flush_icache_range(void *args)
  43. {
  44. struct flusher_data *data = args;
  45. int icacheaddr;
  46. unsigned long start, end;
  47. unsigned long flags, v;
  48. int i;
  49. start = data->addr1;
  50. end = data->addr2;
  51. /* If there are too many pages then just blow the caches */
  52. if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  53. local_flush_cache_all(args);
  54. } else {
  55. /* selectively flush d-cache then invalidate the i-cache */
  56. /* this is inefficient, so only use for small ranges */
  57. start &= ~(L1_CACHE_BYTES-1);
  58. end += L1_CACHE_BYTES-1;
  59. end &= ~(L1_CACHE_BYTES-1);
  60. local_irq_save(flags);
  61. jump_to_uncached();
  62. for (v = start; v < end; v+=L1_CACHE_BYTES) {
  63. asm volatile("ocbwb %0"
  64. : /* no output */
  65. : "m" (__m(v)));
  66. icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
  67. v & cpu_data->icache.entry_mask);
  68. for (i = 0; i < cpu_data->icache.ways;
  69. i++, icacheaddr += cpu_data->icache.way_incr)
  70. /* Clear i-cache line valid-bit */
  71. ctrl_outl(0, icacheaddr);
  72. }
  73. back_to_cached();
  74. local_irq_restore(flags);
  75. }
  76. }
  77. static inline void flush_cache_4096(unsigned long start,
  78. unsigned long phys)
  79. {
  80. unsigned long flags, exec_offset = 0;
  81. /*
  82. * All types of SH-4 require PC to be in P2 to operate on the I-cache.
  83. * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
  84. */
  85. if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
  86. (start < CACHE_OC_ADDRESS_ARRAY))
  87. exec_offset = 0x20000000;
  88. local_irq_save(flags);
  89. __flush_cache_4096(start | SH_CACHE_ASSOC,
  90. P1SEGADDR(phys), exec_offset);
  91. local_irq_restore(flags);
  92. }
  93. /*
  94. * Write back & invalidate the D-cache of the page.
  95. * (To avoid "alias" issues)
  96. */
  97. static void sh4_flush_dcache_page(void *arg)
  98. {
  99. struct page *page = arg;
  100. #ifndef CONFIG_SMP
  101. struct address_space *mapping = page_mapping(page);
  102. if (mapping && !mapping_mapped(mapping))
  103. set_bit(PG_dcache_dirty, &page->flags);
  104. else
  105. #endif
  106. {
  107. unsigned long phys = PHYSADDR(page_address(page));
  108. unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
  109. int i, n;
  110. /* Loop all the D-cache */
  111. n = boot_cpu_data.dcache.n_aliases;
  112. for (i = 0; i < n; i++, addr += 4096)
  113. flush_cache_4096(addr, phys);
  114. }
  115. wmb();
  116. }
  117. /* TODO: Selective icache invalidation through IC address array.. */
  118. static void __uses_jump_to_uncached flush_icache_all(void)
  119. {
  120. unsigned long flags, ccr;
  121. local_irq_save(flags);
  122. jump_to_uncached();
  123. /* Flush I-cache */
  124. ccr = ctrl_inl(CCR);
  125. ccr |= CCR_CACHE_ICI;
  126. ctrl_outl(ccr, CCR);
  127. /*
  128. * back_to_cached() will take care of the barrier for us, don't add
  129. * another one!
  130. */
  131. back_to_cached();
  132. local_irq_restore(flags);
  133. }
  134. static inline void flush_dcache_all(void)
  135. {
  136. (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
  137. wmb();
  138. }
  139. static void sh4_flush_cache_all(void *unused)
  140. {
  141. flush_dcache_all();
  142. flush_icache_all();
  143. }
  144. static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
  145. unsigned long end)
  146. {
  147. unsigned long d = 0, p = start & PAGE_MASK;
  148. unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
  149. unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
  150. unsigned long select_bit;
  151. unsigned long all_aliases_mask;
  152. unsigned long addr_offset;
  153. pgd_t *dir;
  154. pmd_t *pmd;
  155. pud_t *pud;
  156. pte_t *pte;
  157. int i;
  158. dir = pgd_offset(mm, p);
  159. pud = pud_offset(dir, p);
  160. pmd = pmd_offset(pud, p);
  161. end = PAGE_ALIGN(end);
  162. all_aliases_mask = (1 << n_aliases) - 1;
  163. do {
  164. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
  165. p &= PMD_MASK;
  166. p += PMD_SIZE;
  167. pmd++;
  168. continue;
  169. }
  170. pte = pte_offset_kernel(pmd, p);
  171. do {
  172. unsigned long phys;
  173. pte_t entry = *pte;
  174. if (!(pte_val(entry) & _PAGE_PRESENT)) {
  175. pte++;
  176. p += PAGE_SIZE;
  177. continue;
  178. }
  179. phys = pte_val(entry) & PTE_PHYS_MASK;
  180. if ((p ^ phys) & alias_mask) {
  181. d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
  182. d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
  183. if (d == all_aliases_mask)
  184. goto loop_exit;
  185. }
  186. pte++;
  187. p += PAGE_SIZE;
  188. } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
  189. pmd++;
  190. } while (p < end);
  191. loop_exit:
  192. addr_offset = 0;
  193. select_bit = 1;
  194. for (i = 0; i < n_aliases; i++) {
  195. if (d & select_bit) {
  196. (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
  197. wmb();
  198. }
  199. select_bit <<= 1;
  200. addr_offset += PAGE_SIZE;
  201. }
  202. }
  203. /*
  204. * Note : (RPC) since the caches are physically tagged, the only point
  205. * of flush_cache_mm for SH-4 is to get rid of aliases from the
  206. * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
  207. * lines can stay resident so long as the virtual address they were
  208. * accessed with (hence cache set) is in accord with the physical
  209. * address (i.e. tag). It's no different here. So I reckon we don't
  210. * need to flush the I-cache, since aliases don't matter for that. We
  211. * should try that.
  212. *
  213. * Caller takes mm->mmap_sem.
  214. */
  215. static void sh4_flush_cache_mm(void *arg)
  216. {
  217. struct mm_struct *mm = arg;
  218. if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
  219. return;
  220. /*
  221. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  222. * the cache is physically tagged, the data can just be left in there.
  223. */
  224. if (boot_cpu_data.dcache.n_aliases == 0)
  225. return;
  226. /*
  227. * Don't bother groveling around the dcache for the VMA ranges
  228. * if there are too many PTEs to make it worthwhile.
  229. */
  230. if (mm->nr_ptes >= MAX_DCACHE_PAGES)
  231. flush_dcache_all();
  232. else {
  233. struct vm_area_struct *vma;
  234. /*
  235. * In this case there are reasonably sized ranges to flush,
  236. * iterate through the VMA list and take care of any aliases.
  237. */
  238. for (vma = mm->mmap; vma; vma = vma->vm_next)
  239. __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
  240. }
  241. /* Only touch the icache if one of the VMAs has VM_EXEC set. */
  242. if (mm->exec_vm)
  243. flush_icache_all();
  244. }
  245. /*
  246. * Write back and invalidate I/D-caches for the page.
  247. *
  248. * ADDR: Virtual Address (U0 address)
  249. * PFN: Physical page number
  250. */
  251. static void sh4_flush_cache_page(void *args)
  252. {
  253. struct flusher_data *data = args;
  254. struct vm_area_struct *vma;
  255. unsigned long address, pfn, phys;
  256. unsigned int alias_mask;
  257. vma = data->vma;
  258. address = data->addr1;
  259. pfn = data->addr2;
  260. phys = pfn << PAGE_SHIFT;
  261. if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  262. return;
  263. alias_mask = boot_cpu_data.dcache.alias_mask;
  264. /* We only need to flush D-cache when we have alias */
  265. if ((address^phys) & alias_mask) {
  266. /* Loop 4K of the D-cache */
  267. flush_cache_4096(
  268. CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
  269. phys);
  270. /* Loop another 4K of the D-cache */
  271. flush_cache_4096(
  272. CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
  273. phys);
  274. }
  275. alias_mask = boot_cpu_data.icache.alias_mask;
  276. if (vma->vm_flags & VM_EXEC) {
  277. /*
  278. * Evict entries from the portion of the cache from which code
  279. * may have been executed at this address (virtual). There's
  280. * no need to evict from the portion corresponding to the
  281. * physical address as for the D-cache, because we know the
  282. * kernel has never executed the code through its identity
  283. * translation.
  284. */
  285. flush_cache_4096(
  286. CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
  287. phys);
  288. }
  289. }
  290. /*
  291. * Write back and invalidate D-caches.
  292. *
  293. * START, END: Virtual Address (U0 address)
  294. *
  295. * NOTE: We need to flush the _physical_ page entry.
  296. * Flushing the cache lines for U0 only isn't enough.
  297. * We need to flush for P1 too, which may contain aliases.
  298. */
  299. static void sh4_flush_cache_range(void *args)
  300. {
  301. struct flusher_data *data = args;
  302. struct vm_area_struct *vma;
  303. unsigned long start, end;
  304. vma = data->vma;
  305. start = data->addr1;
  306. end = data->addr2;
  307. if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  308. return;
  309. /*
  310. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  311. * the cache is physically tagged, the data can just be left in there.
  312. */
  313. if (boot_cpu_data.dcache.n_aliases == 0)
  314. return;
  315. /*
  316. * Don't bother with the lookup and alias check if we have a
  317. * wide range to cover, just blow away the dcache in its
  318. * entirety instead. -- PFM.
  319. */
  320. if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
  321. flush_dcache_all();
  322. else
  323. __flush_cache_mm(vma->vm_mm, start, end);
  324. if (vma->vm_flags & VM_EXEC) {
  325. /*
  326. * TODO: Is this required??? Need to look at how I-cache
  327. * coherency is assured when new programs are loaded to see if
  328. * this matters.
  329. */
  330. flush_icache_all();
  331. }
  332. }
  333. /**
  334. * __flush_cache_4096
  335. *
  336. * @addr: address in memory mapped cache array
  337. * @phys: P1 address to flush (has to match tags if addr has 'A' bit
  338. * set i.e. associative write)
  339. * @exec_offset: set to 0x20000000 if flush has to be executed from P2
  340. * region else 0x0
  341. *
  342. * The offset into the cache array implied by 'addr' selects the
  343. * 'colour' of the virtual address range that will be flushed. The
  344. * operation (purge/write-back) is selected by the lower 2 bits of
  345. * 'phys'.
  346. */
  347. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  348. unsigned long exec_offset)
  349. {
  350. int way_count;
  351. unsigned long base_addr = addr;
  352. struct cache_info *dcache;
  353. unsigned long way_incr;
  354. unsigned long a, ea, p;
  355. unsigned long temp_pc;
  356. dcache = &boot_cpu_data.dcache;
  357. /* Write this way for better assembly. */
  358. way_count = dcache->ways;
  359. way_incr = dcache->way_incr;
  360. /*
  361. * Apply exec_offset (i.e. branch to P2 if required.).
  362. *
  363. * FIXME:
  364. *
  365. * If I write "=r" for the (temp_pc), it puts this in r6 hence
  366. * trashing exec_offset before it's been added on - why? Hence
  367. * "=&r" as a 'workaround'
  368. */
  369. asm volatile("mov.l 1f, %0\n\t"
  370. "add %1, %0\n\t"
  371. "jmp @%0\n\t"
  372. "nop\n\t"
  373. ".balign 4\n\t"
  374. "1: .long 2f\n\t"
  375. "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
  376. /*
  377. * We know there will be >=1 iteration, so write as do-while to avoid
  378. * pointless nead-of-loop check for 0 iterations.
  379. */
  380. do {
  381. ea = base_addr + PAGE_SIZE;
  382. a = base_addr;
  383. p = phys;
  384. do {
  385. *(volatile unsigned long *)a = p;
  386. /*
  387. * Next line: intentionally not p+32, saves an add, p
  388. * will do since only the cache tag bits need to
  389. * match.
  390. */
  391. *(volatile unsigned long *)(a+32) = p;
  392. a += 64;
  393. p += 64;
  394. } while (a < ea);
  395. base_addr += way_incr;
  396. } while (--way_count != 0);
  397. }
  398. /*
  399. * Break the 1, 2 and 4 way variants of this out into separate functions to
  400. * avoid nearly all the overhead of having the conditional stuff in the function
  401. * bodies (+ the 1 and 2 way cases avoid saving any registers too).
  402. *
  403. * We want to eliminate unnecessary bus transactions, so this code uses
  404. * a non-obvious technique.
  405. *
  406. * Loop over a cache way sized block of, one cache line at a time. For each
  407. * line, use movca.a to cause the current cache line contents to be written
  408. * back, but without reading anything from main memory. However this has the
  409. * side effect that the cache is now caching that memory location. So follow
  410. * this with a cache invalidate to mark the cache line invalid. And do all
  411. * this with interrupts disabled, to avoid the cache line being accidently
  412. * evicted while it is holding garbage.
  413. *
  414. * This also breaks in a number of circumstances:
  415. * - if there are modifications to the region of memory just above
  416. * empty_zero_page (for example because a breakpoint has been placed
  417. * there), then these can be lost.
  418. *
  419. * This is because the the memory address which the cache temporarily
  420. * caches in the above description is empty_zero_page. So the
  421. * movca.l hits the cache (it is assumed that it misses, or at least
  422. * isn't dirty), modifies the line and then invalidates it, losing the
  423. * required change.
  424. *
  425. * - If caches are disabled or configured in write-through mode, then
  426. * the movca.l writes garbage directly into memory.
  427. */
  428. static void __flush_dcache_segment_writethrough(unsigned long start,
  429. unsigned long extent_per_way)
  430. {
  431. unsigned long addr;
  432. int i;
  433. addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
  434. while (extent_per_way) {
  435. for (i = 0; i < cpu_data->dcache.ways; i++)
  436. __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
  437. addr += cpu_data->dcache.linesz;
  438. extent_per_way -= cpu_data->dcache.linesz;
  439. }
  440. }
  441. static void __flush_dcache_segment_1way(unsigned long start,
  442. unsigned long extent_per_way)
  443. {
  444. unsigned long orig_sr, sr_with_bl;
  445. unsigned long base_addr;
  446. unsigned long way_incr, linesz, way_size;
  447. struct cache_info *dcache;
  448. register unsigned long a0, a0e;
  449. asm volatile("stc sr, %0" : "=r" (orig_sr));
  450. sr_with_bl = orig_sr | (1<<28);
  451. base_addr = ((unsigned long)&empty_zero_page[0]);
  452. /*
  453. * The previous code aligned base_addr to 16k, i.e. the way_size of all
  454. * existing SH-4 D-caches. Whilst I don't see a need to have this
  455. * aligned to any better than the cache line size (which it will be
  456. * anyway by construction), let's align it to at least the way_size of
  457. * any existing or conceivable SH-4 D-cache. -- RPC
  458. */
  459. base_addr = ((base_addr >> 16) << 16);
  460. base_addr |= start;
  461. dcache = &boot_cpu_data.dcache;
  462. linesz = dcache->linesz;
  463. way_incr = dcache->way_incr;
  464. way_size = dcache->way_size;
  465. a0 = base_addr;
  466. a0e = base_addr + extent_per_way;
  467. do {
  468. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  469. asm volatile("movca.l r0, @%0\n\t"
  470. "ocbi @%0" : : "r" (a0));
  471. a0 += linesz;
  472. asm volatile("movca.l r0, @%0\n\t"
  473. "ocbi @%0" : : "r" (a0));
  474. a0 += linesz;
  475. asm volatile("movca.l r0, @%0\n\t"
  476. "ocbi @%0" : : "r" (a0));
  477. a0 += linesz;
  478. asm volatile("movca.l r0, @%0\n\t"
  479. "ocbi @%0" : : "r" (a0));
  480. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  481. a0 += linesz;
  482. } while (a0 < a0e);
  483. }
  484. static void __flush_dcache_segment_2way(unsigned long start,
  485. unsigned long extent_per_way)
  486. {
  487. unsigned long orig_sr, sr_with_bl;
  488. unsigned long base_addr;
  489. unsigned long way_incr, linesz, way_size;
  490. struct cache_info *dcache;
  491. register unsigned long a0, a1, a0e;
  492. asm volatile("stc sr, %0" : "=r" (orig_sr));
  493. sr_with_bl = orig_sr | (1<<28);
  494. base_addr = ((unsigned long)&empty_zero_page[0]);
  495. /* See comment under 1-way above */
  496. base_addr = ((base_addr >> 16) << 16);
  497. base_addr |= start;
  498. dcache = &boot_cpu_data.dcache;
  499. linesz = dcache->linesz;
  500. way_incr = dcache->way_incr;
  501. way_size = dcache->way_size;
  502. a0 = base_addr;
  503. a1 = a0 + way_incr;
  504. a0e = base_addr + extent_per_way;
  505. do {
  506. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  507. asm volatile("movca.l r0, @%0\n\t"
  508. "movca.l r0, @%1\n\t"
  509. "ocbi @%0\n\t"
  510. "ocbi @%1" : :
  511. "r" (a0), "r" (a1));
  512. a0 += linesz;
  513. a1 += linesz;
  514. asm volatile("movca.l r0, @%0\n\t"
  515. "movca.l r0, @%1\n\t"
  516. "ocbi @%0\n\t"
  517. "ocbi @%1" : :
  518. "r" (a0), "r" (a1));
  519. a0 += linesz;
  520. a1 += linesz;
  521. asm volatile("movca.l r0, @%0\n\t"
  522. "movca.l r0, @%1\n\t"
  523. "ocbi @%0\n\t"
  524. "ocbi @%1" : :
  525. "r" (a0), "r" (a1));
  526. a0 += linesz;
  527. a1 += linesz;
  528. asm volatile("movca.l r0, @%0\n\t"
  529. "movca.l r0, @%1\n\t"
  530. "ocbi @%0\n\t"
  531. "ocbi @%1" : :
  532. "r" (a0), "r" (a1));
  533. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  534. a0 += linesz;
  535. a1 += linesz;
  536. } while (a0 < a0e);
  537. }
  538. static void __flush_dcache_segment_4way(unsigned long start,
  539. unsigned long extent_per_way)
  540. {
  541. unsigned long orig_sr, sr_with_bl;
  542. unsigned long base_addr;
  543. unsigned long way_incr, linesz, way_size;
  544. struct cache_info *dcache;
  545. register unsigned long a0, a1, a2, a3, a0e;
  546. asm volatile("stc sr, %0" : "=r" (orig_sr));
  547. sr_with_bl = orig_sr | (1<<28);
  548. base_addr = ((unsigned long)&empty_zero_page[0]);
  549. /* See comment under 1-way above */
  550. base_addr = ((base_addr >> 16) << 16);
  551. base_addr |= start;
  552. dcache = &boot_cpu_data.dcache;
  553. linesz = dcache->linesz;
  554. way_incr = dcache->way_incr;
  555. way_size = dcache->way_size;
  556. a0 = base_addr;
  557. a1 = a0 + way_incr;
  558. a2 = a1 + way_incr;
  559. a3 = a2 + way_incr;
  560. a0e = base_addr + extent_per_way;
  561. do {
  562. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  563. asm volatile("movca.l r0, @%0\n\t"
  564. "movca.l r0, @%1\n\t"
  565. "movca.l r0, @%2\n\t"
  566. "movca.l r0, @%3\n\t"
  567. "ocbi @%0\n\t"
  568. "ocbi @%1\n\t"
  569. "ocbi @%2\n\t"
  570. "ocbi @%3\n\t" : :
  571. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  572. a0 += linesz;
  573. a1 += linesz;
  574. a2 += linesz;
  575. a3 += linesz;
  576. asm volatile("movca.l r0, @%0\n\t"
  577. "movca.l r0, @%1\n\t"
  578. "movca.l r0, @%2\n\t"
  579. "movca.l r0, @%3\n\t"
  580. "ocbi @%0\n\t"
  581. "ocbi @%1\n\t"
  582. "ocbi @%2\n\t"
  583. "ocbi @%3\n\t" : :
  584. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  585. a0 += linesz;
  586. a1 += linesz;
  587. a2 += linesz;
  588. a3 += linesz;
  589. asm volatile("movca.l r0, @%0\n\t"
  590. "movca.l r0, @%1\n\t"
  591. "movca.l r0, @%2\n\t"
  592. "movca.l r0, @%3\n\t"
  593. "ocbi @%0\n\t"
  594. "ocbi @%1\n\t"
  595. "ocbi @%2\n\t"
  596. "ocbi @%3\n\t" : :
  597. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  598. a0 += linesz;
  599. a1 += linesz;
  600. a2 += linesz;
  601. a3 += linesz;
  602. asm volatile("movca.l r0, @%0\n\t"
  603. "movca.l r0, @%1\n\t"
  604. "movca.l r0, @%2\n\t"
  605. "movca.l r0, @%3\n\t"
  606. "ocbi @%0\n\t"
  607. "ocbi @%1\n\t"
  608. "ocbi @%2\n\t"
  609. "ocbi @%3\n\t" : :
  610. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  611. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  612. a0 += linesz;
  613. a1 += linesz;
  614. a2 += linesz;
  615. a3 += linesz;
  616. } while (a0 < a0e);
  617. }
  618. extern void __weak sh4__flush_region_init(void);
  619. /*
  620. * SH-4 has virtually indexed and physically tagged cache.
  621. */
  622. void __init sh4_cache_init(void)
  623. {
  624. unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
  625. printk("PVR=%08x CVR=%08x PRR=%08x\n",
  626. ctrl_inl(CCN_PVR),
  627. ctrl_inl(CCN_CVR),
  628. ctrl_inl(CCN_PRR));
  629. if (wt_enabled)
  630. __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
  631. else {
  632. switch (boot_cpu_data.dcache.ways) {
  633. case 1:
  634. __flush_dcache_segment_fn = __flush_dcache_segment_1way;
  635. break;
  636. case 2:
  637. __flush_dcache_segment_fn = __flush_dcache_segment_2way;
  638. break;
  639. case 4:
  640. __flush_dcache_segment_fn = __flush_dcache_segment_4way;
  641. break;
  642. default:
  643. panic("unknown number of cache ways\n");
  644. break;
  645. }
  646. }
  647. local_flush_icache_range = sh4_flush_icache_range;
  648. local_flush_dcache_page = sh4_flush_dcache_page;
  649. local_flush_cache_all = sh4_flush_cache_all;
  650. local_flush_cache_mm = sh4_flush_cache_mm;
  651. local_flush_cache_dup_mm = sh4_flush_cache_mm;
  652. local_flush_cache_page = sh4_flush_cache_page;
  653. local_flush_cache_range = sh4_flush_cache_range;
  654. sh4__flush_region_init();
  655. }