cache-sh4.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826
  1. /*
  2. * arch/sh/mm/cache-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2001 - 2007 Paul Mundt
  6. * Copyright (C) 2003 Richard Curnow
  7. * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/io.h>
  16. #include <linux/mutex.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/cacheflush.h>
  19. /*
  20. * The maximum number of pages we support up to when doing ranged dcache
  21. * flushing. Anything exceeding this will simply flush the dcache in its
  22. * entirety.
  23. */
  24. #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
  25. #define MAX_ICACHE_PAGES 32
  26. static void __flush_dcache_segment_writethrough(unsigned long start,
  27. unsigned long extent);
  28. static void __flush_dcache_segment_1way(unsigned long start,
  29. unsigned long extent);
  30. static void __flush_dcache_segment_2way(unsigned long start,
  31. unsigned long extent);
  32. static void __flush_dcache_segment_4way(unsigned long start,
  33. unsigned long extent);
  34. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  35. unsigned long exec_offset);
  36. /*
  37. * This is initialised here to ensure that it is not placed in the BSS. If
  38. * that were to happen, note that cache_init gets called before the BSS is
  39. * cleared, so this would get nulled out which would be hopeless.
  40. */
  41. static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
  42. (void (*)(unsigned long, unsigned long))0xdeadbeef;
  43. static void compute_alias(struct cache_info *c)
  44. {
  45. c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
  46. c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
  47. }
  48. static void __init emit_cache_params(void)
  49. {
  50. printk("PVR=%08x CVR=%08x PRR=%08x\n",
  51. ctrl_inl(CCN_PVR),
  52. ctrl_inl(CCN_CVR),
  53. ctrl_inl(CCN_PRR));
  54. printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  55. boot_cpu_data.icache.ways,
  56. boot_cpu_data.icache.sets,
  57. boot_cpu_data.icache.way_incr);
  58. printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  59. boot_cpu_data.icache.entry_mask,
  60. boot_cpu_data.icache.alias_mask,
  61. boot_cpu_data.icache.n_aliases);
  62. printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  63. boot_cpu_data.dcache.ways,
  64. boot_cpu_data.dcache.sets,
  65. boot_cpu_data.dcache.way_incr);
  66. printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  67. boot_cpu_data.dcache.entry_mask,
  68. boot_cpu_data.dcache.alias_mask,
  69. boot_cpu_data.dcache.n_aliases);
  70. /*
  71. * Emit Secondary Cache parameters if the CPU has a probed L2.
  72. */
  73. if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
  74. printk("S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  75. boot_cpu_data.scache.ways,
  76. boot_cpu_data.scache.sets,
  77. boot_cpu_data.scache.way_incr);
  78. printk("S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  79. boot_cpu_data.scache.entry_mask,
  80. boot_cpu_data.scache.alias_mask,
  81. boot_cpu_data.scache.n_aliases);
  82. }
  83. if (!__flush_dcache_segment_fn)
  84. panic("unknown number of cache ways\n");
  85. }
  86. /*
  87. * SH-4 has virtually indexed and physically tagged cache.
  88. */
  89. void __init p3_cache_init(void)
  90. {
  91. unsigned int wt_enabled = !!(__raw_readl(CCR) & CCR_CACHE_WT);
  92. compute_alias(&boot_cpu_data.icache);
  93. compute_alias(&boot_cpu_data.dcache);
  94. compute_alias(&boot_cpu_data.scache);
  95. if (wt_enabled) {
  96. __flush_dcache_segment_fn = __flush_dcache_segment_writethrough;
  97. goto out;
  98. }
  99. switch (boot_cpu_data.dcache.ways) {
  100. case 1:
  101. __flush_dcache_segment_fn = __flush_dcache_segment_1way;
  102. break;
  103. case 2:
  104. __flush_dcache_segment_fn = __flush_dcache_segment_2way;
  105. break;
  106. case 4:
  107. __flush_dcache_segment_fn = __flush_dcache_segment_4way;
  108. break;
  109. default:
  110. __flush_dcache_segment_fn = NULL;
  111. break;
  112. }
  113. out:
  114. emit_cache_params();
  115. }
  116. /*
  117. * Write back the dirty D-caches, but not invalidate them.
  118. *
  119. * START: Virtual Address (U0, P1, or P3)
  120. * SIZE: Size of the region.
  121. */
  122. void __flush_wback_region(void *start, int size)
  123. {
  124. unsigned long v;
  125. unsigned long begin, end;
  126. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  127. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  128. & ~(L1_CACHE_BYTES-1);
  129. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  130. asm volatile("ocbwb %0"
  131. : /* no output */
  132. : "m" (__m(v)));
  133. }
  134. }
  135. /*
  136. * Write back the dirty D-caches and invalidate them.
  137. *
  138. * START: Virtual Address (U0, P1, or P3)
  139. * SIZE: Size of the region.
  140. */
  141. void __flush_purge_region(void *start, int size)
  142. {
  143. unsigned long v;
  144. unsigned long begin, end;
  145. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  146. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  147. & ~(L1_CACHE_BYTES-1);
  148. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  149. asm volatile("ocbp %0"
  150. : /* no output */
  151. : "m" (__m(v)));
  152. }
  153. }
  154. /*
  155. * No write back please
  156. */
  157. void __flush_invalidate_region(void *start, int size)
  158. {
  159. unsigned long v;
  160. unsigned long begin, end;
  161. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  162. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  163. & ~(L1_CACHE_BYTES-1);
  164. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  165. asm volatile("ocbi %0"
  166. : /* no output */
  167. : "m" (__m(v)));
  168. }
  169. }
  170. /*
  171. * Write back the range of D-cache, and purge the I-cache.
  172. *
  173. * Called from kernel/module.c:sys_init_module and routine for a.out format,
  174. * signal handler code and kprobes code
  175. */
  176. void flush_icache_range(unsigned long start, unsigned long end)
  177. {
  178. int icacheaddr;
  179. unsigned long flags, v;
  180. int i;
  181. /* If there are too many pages then just blow the caches */
  182. if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  183. flush_cache_all();
  184. } else {
  185. /* selectively flush d-cache then invalidate the i-cache */
  186. /* this is inefficient, so only use for small ranges */
  187. start &= ~(L1_CACHE_BYTES-1);
  188. end += L1_CACHE_BYTES-1;
  189. end &= ~(L1_CACHE_BYTES-1);
  190. local_irq_save(flags);
  191. jump_to_uncached();
  192. for (v = start; v < end; v+=L1_CACHE_BYTES) {
  193. asm volatile("ocbwb %0"
  194. : /* no output */
  195. : "m" (__m(v)));
  196. icacheaddr = CACHE_IC_ADDRESS_ARRAY | (
  197. v & cpu_data->icache.entry_mask);
  198. for (i = 0; i < cpu_data->icache.ways;
  199. i++, icacheaddr += cpu_data->icache.way_incr)
  200. /* Clear i-cache line valid-bit */
  201. ctrl_outl(0, icacheaddr);
  202. }
  203. back_to_cached();
  204. local_irq_restore(flags);
  205. }
  206. }
  207. static inline void flush_cache_4096(unsigned long start,
  208. unsigned long phys)
  209. {
  210. unsigned long flags, exec_offset = 0;
  211. /*
  212. * All types of SH-4 require PC to be in P2 to operate on the I-cache.
  213. * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
  214. */
  215. if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
  216. (start < CACHE_OC_ADDRESS_ARRAY))
  217. exec_offset = 0x20000000;
  218. local_irq_save(flags);
  219. __flush_cache_4096(start | SH_CACHE_ASSOC,
  220. P1SEGADDR(phys), exec_offset);
  221. local_irq_restore(flags);
  222. }
  223. /*
  224. * Write back & invalidate the D-cache of the page.
  225. * (To avoid "alias" issues)
  226. */
  227. void flush_dcache_page(struct page *page)
  228. {
  229. if (test_bit(PG_mapped, &page->flags)) {
  230. unsigned long phys = PHYSADDR(page_address(page));
  231. unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
  232. int i, n;
  233. /* Loop all the D-cache */
  234. n = boot_cpu_data.dcache.n_aliases;
  235. for (i = 0; i < n; i++, addr += 4096)
  236. flush_cache_4096(addr, phys);
  237. }
  238. wmb();
  239. }
  240. /* TODO: Selective icache invalidation through IC address array.. */
  241. static void __uses_jump_to_uncached flush_icache_all(void)
  242. {
  243. unsigned long flags, ccr;
  244. local_irq_save(flags);
  245. jump_to_uncached();
  246. /* Flush I-cache */
  247. ccr = ctrl_inl(CCR);
  248. ccr |= CCR_CACHE_ICI;
  249. ctrl_outl(ccr, CCR);
  250. /*
  251. * back_to_cached() will take care of the barrier for us, don't add
  252. * another one!
  253. */
  254. back_to_cached();
  255. local_irq_restore(flags);
  256. }
  257. void flush_dcache_all(void)
  258. {
  259. (*__flush_dcache_segment_fn)(0UL, boot_cpu_data.dcache.way_size);
  260. wmb();
  261. }
  262. void flush_cache_all(void)
  263. {
  264. flush_dcache_all();
  265. flush_icache_all();
  266. }
  267. static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
  268. unsigned long end)
  269. {
  270. unsigned long d = 0, p = start & PAGE_MASK;
  271. unsigned long alias_mask = boot_cpu_data.dcache.alias_mask;
  272. unsigned long n_aliases = boot_cpu_data.dcache.n_aliases;
  273. unsigned long select_bit;
  274. unsigned long all_aliases_mask;
  275. unsigned long addr_offset;
  276. pgd_t *dir;
  277. pmd_t *pmd;
  278. pud_t *pud;
  279. pte_t *pte;
  280. int i;
  281. dir = pgd_offset(mm, p);
  282. pud = pud_offset(dir, p);
  283. pmd = pmd_offset(pud, p);
  284. end = PAGE_ALIGN(end);
  285. all_aliases_mask = (1 << n_aliases) - 1;
  286. do {
  287. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
  288. p &= PMD_MASK;
  289. p += PMD_SIZE;
  290. pmd++;
  291. continue;
  292. }
  293. pte = pte_offset_kernel(pmd, p);
  294. do {
  295. unsigned long phys;
  296. pte_t entry = *pte;
  297. if (!(pte_val(entry) & _PAGE_PRESENT)) {
  298. pte++;
  299. p += PAGE_SIZE;
  300. continue;
  301. }
  302. phys = pte_val(entry) & PTE_PHYS_MASK;
  303. if ((p ^ phys) & alias_mask) {
  304. d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
  305. d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
  306. if (d == all_aliases_mask)
  307. goto loop_exit;
  308. }
  309. pte++;
  310. p += PAGE_SIZE;
  311. } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
  312. pmd++;
  313. } while (p < end);
  314. loop_exit:
  315. addr_offset = 0;
  316. select_bit = 1;
  317. for (i = 0; i < n_aliases; i++) {
  318. if (d & select_bit) {
  319. (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
  320. wmb();
  321. }
  322. select_bit <<= 1;
  323. addr_offset += PAGE_SIZE;
  324. }
  325. }
  326. /*
  327. * Note : (RPC) since the caches are physically tagged, the only point
  328. * of flush_cache_mm for SH-4 is to get rid of aliases from the
  329. * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
  330. * lines can stay resident so long as the virtual address they were
  331. * accessed with (hence cache set) is in accord with the physical
  332. * address (i.e. tag). It's no different here. So I reckon we don't
  333. * need to flush the I-cache, since aliases don't matter for that. We
  334. * should try that.
  335. *
  336. * Caller takes mm->mmap_sem.
  337. */
  338. void flush_cache_mm(struct mm_struct *mm)
  339. {
  340. /*
  341. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  342. * the cache is physically tagged, the data can just be left in there.
  343. */
  344. if (boot_cpu_data.dcache.n_aliases == 0)
  345. return;
  346. /*
  347. * Don't bother groveling around the dcache for the VMA ranges
  348. * if there are too many PTEs to make it worthwhile.
  349. */
  350. if (mm->nr_ptes >= MAX_DCACHE_PAGES)
  351. flush_dcache_all();
  352. else {
  353. struct vm_area_struct *vma;
  354. /*
  355. * In this case there are reasonably sized ranges to flush,
  356. * iterate through the VMA list and take care of any aliases.
  357. */
  358. for (vma = mm->mmap; vma; vma = vma->vm_next)
  359. __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
  360. }
  361. /* Only touch the icache if one of the VMAs has VM_EXEC set. */
  362. if (mm->exec_vm)
  363. flush_icache_all();
  364. }
  365. /*
  366. * Write back and invalidate I/D-caches for the page.
  367. *
  368. * ADDR: Virtual Address (U0 address)
  369. * PFN: Physical page number
  370. */
  371. void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
  372. unsigned long pfn)
  373. {
  374. unsigned long phys = pfn << PAGE_SHIFT;
  375. unsigned int alias_mask;
  376. alias_mask = boot_cpu_data.dcache.alias_mask;
  377. /* We only need to flush D-cache when we have alias */
  378. if ((address^phys) & alias_mask) {
  379. /* Loop 4K of the D-cache */
  380. flush_cache_4096(
  381. CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
  382. phys);
  383. /* Loop another 4K of the D-cache */
  384. flush_cache_4096(
  385. CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
  386. phys);
  387. }
  388. alias_mask = boot_cpu_data.icache.alias_mask;
  389. if (vma->vm_flags & VM_EXEC) {
  390. /*
  391. * Evict entries from the portion of the cache from which code
  392. * may have been executed at this address (virtual). There's
  393. * no need to evict from the portion corresponding to the
  394. * physical address as for the D-cache, because we know the
  395. * kernel has never executed the code through its identity
  396. * translation.
  397. */
  398. flush_cache_4096(
  399. CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
  400. phys);
  401. }
  402. }
  403. /*
  404. * Write back and invalidate D-caches.
  405. *
  406. * START, END: Virtual Address (U0 address)
  407. *
  408. * NOTE: We need to flush the _physical_ page entry.
  409. * Flushing the cache lines for U0 only isn't enough.
  410. * We need to flush for P1 too, which may contain aliases.
  411. */
  412. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  413. unsigned long end)
  414. {
  415. /*
  416. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  417. * the cache is physically tagged, the data can just be left in there.
  418. */
  419. if (boot_cpu_data.dcache.n_aliases == 0)
  420. return;
  421. /*
  422. * Don't bother with the lookup and alias check if we have a
  423. * wide range to cover, just blow away the dcache in its
  424. * entirety instead. -- PFM.
  425. */
  426. if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
  427. flush_dcache_all();
  428. else
  429. __flush_cache_mm(vma->vm_mm, start, end);
  430. if (vma->vm_flags & VM_EXEC) {
  431. /*
  432. * TODO: Is this required??? Need to look at how I-cache
  433. * coherency is assured when new programs are loaded to see if
  434. * this matters.
  435. */
  436. flush_icache_all();
  437. }
  438. }
  439. /*
  440. * flush_icache_user_range
  441. * @vma: VMA of the process
  442. * @page: page
  443. * @addr: U0 address
  444. * @len: length of the range (< page size)
  445. */
  446. void flush_icache_user_range(struct vm_area_struct *vma,
  447. struct page *page, unsigned long addr, int len)
  448. {
  449. flush_cache_page(vma, addr, page_to_pfn(page));
  450. mb();
  451. }
  452. /**
  453. * __flush_cache_4096
  454. *
  455. * @addr: address in memory mapped cache array
  456. * @phys: P1 address to flush (has to match tags if addr has 'A' bit
  457. * set i.e. associative write)
  458. * @exec_offset: set to 0x20000000 if flush has to be executed from P2
  459. * region else 0x0
  460. *
  461. * The offset into the cache array implied by 'addr' selects the
  462. * 'colour' of the virtual address range that will be flushed. The
  463. * operation (purge/write-back) is selected by the lower 2 bits of
  464. * 'phys'.
  465. */
  466. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  467. unsigned long exec_offset)
  468. {
  469. int way_count;
  470. unsigned long base_addr = addr;
  471. struct cache_info *dcache;
  472. unsigned long way_incr;
  473. unsigned long a, ea, p;
  474. unsigned long temp_pc;
  475. dcache = &boot_cpu_data.dcache;
  476. /* Write this way for better assembly. */
  477. way_count = dcache->ways;
  478. way_incr = dcache->way_incr;
  479. /*
  480. * Apply exec_offset (i.e. branch to P2 if required.).
  481. *
  482. * FIXME:
  483. *
  484. * If I write "=r" for the (temp_pc), it puts this in r6 hence
  485. * trashing exec_offset before it's been added on - why? Hence
  486. * "=&r" as a 'workaround'
  487. */
  488. asm volatile("mov.l 1f, %0\n\t"
  489. "add %1, %0\n\t"
  490. "jmp @%0\n\t"
  491. "nop\n\t"
  492. ".balign 4\n\t"
  493. "1: .long 2f\n\t"
  494. "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
  495. /*
  496. * We know there will be >=1 iteration, so write as do-while to avoid
  497. * pointless nead-of-loop check for 0 iterations.
  498. */
  499. do {
  500. ea = base_addr + PAGE_SIZE;
  501. a = base_addr;
  502. p = phys;
  503. do {
  504. *(volatile unsigned long *)a = p;
  505. /*
  506. * Next line: intentionally not p+32, saves an add, p
  507. * will do since only the cache tag bits need to
  508. * match.
  509. */
  510. *(volatile unsigned long *)(a+32) = p;
  511. a += 64;
  512. p += 64;
  513. } while (a < ea);
  514. base_addr += way_incr;
  515. } while (--way_count != 0);
  516. }
  517. /*
  518. * Break the 1, 2 and 4 way variants of this out into separate functions to
  519. * avoid nearly all the overhead of having the conditional stuff in the function
  520. * bodies (+ the 1 and 2 way cases avoid saving any registers too).
  521. *
  522. * We want to eliminate unnecessary bus transactions, so this code uses
  523. * a non-obvious technique.
  524. *
  525. * Loop over a cache way sized block of, one cache line at a time. For each
  526. * line, use movca.a to cause the current cache line contents to be written
  527. * back, but without reading anything from main memory. However this has the
  528. * side effect that the cache is now caching that memory location. So follow
  529. * this with a cache invalidate to mark the cache line invalid. And do all
  530. * this with interrupts disabled, to avoid the cache line being accidently
  531. * evicted while it is holding garbage.
  532. *
  533. * This also breaks in a number of circumstances:
  534. * - if there are modifications to the region of memory just above
  535. * empty_zero_page (for example because a breakpoint has been placed
  536. * there), then these can be lost.
  537. *
  538. * This is because the the memory address which the cache temporarily
  539. * caches in the above description is empty_zero_page. So the
  540. * movca.l hits the cache (it is assumed that it misses, or at least
  541. * isn't dirty), modifies the line and then invalidates it, losing the
  542. * required change.
  543. *
  544. * - If caches are disabled or configured in write-through mode, then
  545. * the movca.l writes garbage directly into memory.
  546. */
  547. static void __flush_dcache_segment_writethrough(unsigned long start,
  548. unsigned long extent_per_way)
  549. {
  550. unsigned long addr;
  551. int i;
  552. addr = CACHE_OC_ADDRESS_ARRAY | (start & cpu_data->dcache.entry_mask);
  553. while (extent_per_way) {
  554. for (i = 0; i < cpu_data->dcache.ways; i++)
  555. __raw_writel(0, addr + cpu_data->dcache.way_incr * i);
  556. addr += cpu_data->dcache.linesz;
  557. extent_per_way -= cpu_data->dcache.linesz;
  558. }
  559. }
  560. static void __flush_dcache_segment_1way(unsigned long start,
  561. unsigned long extent_per_way)
  562. {
  563. unsigned long orig_sr, sr_with_bl;
  564. unsigned long base_addr;
  565. unsigned long way_incr, linesz, way_size;
  566. struct cache_info *dcache;
  567. register unsigned long a0, a0e;
  568. asm volatile("stc sr, %0" : "=r" (orig_sr));
  569. sr_with_bl = orig_sr | (1<<28);
  570. base_addr = ((unsigned long)&empty_zero_page[0]);
  571. /*
  572. * The previous code aligned base_addr to 16k, i.e. the way_size of all
  573. * existing SH-4 D-caches. Whilst I don't see a need to have this
  574. * aligned to any better than the cache line size (which it will be
  575. * anyway by construction), let's align it to at least the way_size of
  576. * any existing or conceivable SH-4 D-cache. -- RPC
  577. */
  578. base_addr = ((base_addr >> 16) << 16);
  579. base_addr |= start;
  580. dcache = &boot_cpu_data.dcache;
  581. linesz = dcache->linesz;
  582. way_incr = dcache->way_incr;
  583. way_size = dcache->way_size;
  584. a0 = base_addr;
  585. a0e = base_addr + extent_per_way;
  586. do {
  587. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  588. asm volatile("movca.l r0, @%0\n\t"
  589. "ocbi @%0" : : "r" (a0));
  590. a0 += linesz;
  591. asm volatile("movca.l r0, @%0\n\t"
  592. "ocbi @%0" : : "r" (a0));
  593. a0 += linesz;
  594. asm volatile("movca.l r0, @%0\n\t"
  595. "ocbi @%0" : : "r" (a0));
  596. a0 += linesz;
  597. asm volatile("movca.l r0, @%0\n\t"
  598. "ocbi @%0" : : "r" (a0));
  599. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  600. a0 += linesz;
  601. } while (a0 < a0e);
  602. }
  603. static void __flush_dcache_segment_2way(unsigned long start,
  604. unsigned long extent_per_way)
  605. {
  606. unsigned long orig_sr, sr_with_bl;
  607. unsigned long base_addr;
  608. unsigned long way_incr, linesz, way_size;
  609. struct cache_info *dcache;
  610. register unsigned long a0, a1, a0e;
  611. asm volatile("stc sr, %0" : "=r" (orig_sr));
  612. sr_with_bl = orig_sr | (1<<28);
  613. base_addr = ((unsigned long)&empty_zero_page[0]);
  614. /* See comment under 1-way above */
  615. base_addr = ((base_addr >> 16) << 16);
  616. base_addr |= start;
  617. dcache = &boot_cpu_data.dcache;
  618. linesz = dcache->linesz;
  619. way_incr = dcache->way_incr;
  620. way_size = dcache->way_size;
  621. a0 = base_addr;
  622. a1 = a0 + way_incr;
  623. a0e = base_addr + extent_per_way;
  624. do {
  625. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  626. asm volatile("movca.l r0, @%0\n\t"
  627. "movca.l r0, @%1\n\t"
  628. "ocbi @%0\n\t"
  629. "ocbi @%1" : :
  630. "r" (a0), "r" (a1));
  631. a0 += linesz;
  632. a1 += linesz;
  633. asm volatile("movca.l r0, @%0\n\t"
  634. "movca.l r0, @%1\n\t"
  635. "ocbi @%0\n\t"
  636. "ocbi @%1" : :
  637. "r" (a0), "r" (a1));
  638. a0 += linesz;
  639. a1 += linesz;
  640. asm volatile("movca.l r0, @%0\n\t"
  641. "movca.l r0, @%1\n\t"
  642. "ocbi @%0\n\t"
  643. "ocbi @%1" : :
  644. "r" (a0), "r" (a1));
  645. a0 += linesz;
  646. a1 += linesz;
  647. asm volatile("movca.l r0, @%0\n\t"
  648. "movca.l r0, @%1\n\t"
  649. "ocbi @%0\n\t"
  650. "ocbi @%1" : :
  651. "r" (a0), "r" (a1));
  652. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  653. a0 += linesz;
  654. a1 += linesz;
  655. } while (a0 < a0e);
  656. }
  657. static void __flush_dcache_segment_4way(unsigned long start,
  658. unsigned long extent_per_way)
  659. {
  660. unsigned long orig_sr, sr_with_bl;
  661. unsigned long base_addr;
  662. unsigned long way_incr, linesz, way_size;
  663. struct cache_info *dcache;
  664. register unsigned long a0, a1, a2, a3, a0e;
  665. asm volatile("stc sr, %0" : "=r" (orig_sr));
  666. sr_with_bl = orig_sr | (1<<28);
  667. base_addr = ((unsigned long)&empty_zero_page[0]);
  668. /* See comment under 1-way above */
  669. base_addr = ((base_addr >> 16) << 16);
  670. base_addr |= start;
  671. dcache = &boot_cpu_data.dcache;
  672. linesz = dcache->linesz;
  673. way_incr = dcache->way_incr;
  674. way_size = dcache->way_size;
  675. a0 = base_addr;
  676. a1 = a0 + way_incr;
  677. a2 = a1 + way_incr;
  678. a3 = a2 + way_incr;
  679. a0e = base_addr + extent_per_way;
  680. do {
  681. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  682. asm volatile("movca.l r0, @%0\n\t"
  683. "movca.l r0, @%1\n\t"
  684. "movca.l r0, @%2\n\t"
  685. "movca.l r0, @%3\n\t"
  686. "ocbi @%0\n\t"
  687. "ocbi @%1\n\t"
  688. "ocbi @%2\n\t"
  689. "ocbi @%3\n\t" : :
  690. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  691. a0 += linesz;
  692. a1 += linesz;
  693. a2 += linesz;
  694. a3 += linesz;
  695. asm volatile("movca.l r0, @%0\n\t"
  696. "movca.l r0, @%1\n\t"
  697. "movca.l r0, @%2\n\t"
  698. "movca.l r0, @%3\n\t"
  699. "ocbi @%0\n\t"
  700. "ocbi @%1\n\t"
  701. "ocbi @%2\n\t"
  702. "ocbi @%3\n\t" : :
  703. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  704. a0 += linesz;
  705. a1 += linesz;
  706. a2 += linesz;
  707. a3 += linesz;
  708. asm volatile("movca.l r0, @%0\n\t"
  709. "movca.l r0, @%1\n\t"
  710. "movca.l r0, @%2\n\t"
  711. "movca.l r0, @%3\n\t"
  712. "ocbi @%0\n\t"
  713. "ocbi @%1\n\t"
  714. "ocbi @%2\n\t"
  715. "ocbi @%3\n\t" : :
  716. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  717. a0 += linesz;
  718. a1 += linesz;
  719. a2 += linesz;
  720. a3 += linesz;
  721. asm volatile("movca.l r0, @%0\n\t"
  722. "movca.l r0, @%1\n\t"
  723. "movca.l r0, @%2\n\t"
  724. "movca.l r0, @%3\n\t"
  725. "ocbi @%0\n\t"
  726. "ocbi @%1\n\t"
  727. "ocbi @%2\n\t"
  728. "ocbi @%3\n\t" : :
  729. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  730. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  731. a0 += linesz;
  732. a1 += linesz;
  733. a2 += linesz;
  734. a3 += linesz;
  735. } while (a0 < a0e);
  736. }