cache-sh4.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779
  1. /*
  2. * arch/sh/mm/cache-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2001 - 2006 Paul Mundt
  6. * Copyright (C) 2003 Richard Curnow
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/init.h>
  13. #include <linux/mm.h>
  14. #include <linux/io.h>
  15. #include <linux/mutex.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/cacheflush.h>
  18. /*
  19. * The maximum number of pages we support up to when doing ranged dcache
  20. * flushing. Anything exceeding this will simply flush the dcache in its
  21. * entirety.
  22. */
  23. #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
  24. static void __flush_dcache_segment_1way(unsigned long start,
  25. unsigned long extent);
  26. static void __flush_dcache_segment_2way(unsigned long start,
  27. unsigned long extent);
  28. static void __flush_dcache_segment_4way(unsigned long start,
  29. unsigned long extent);
  30. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  31. unsigned long exec_offset);
  32. /*
  33. * This is initialised here to ensure that it is not placed in the BSS. If
  34. * that were to happen, note that cache_init gets called before the BSS is
  35. * cleared, so this would get nulled out which would be hopeless.
  36. */
  37. static void (*__flush_dcache_segment_fn)(unsigned long, unsigned long) =
  38. (void (*)(unsigned long, unsigned long))0xdeadbeef;
  39. static void compute_alias(struct cache_info *c)
  40. {
  41. c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
  42. c->n_aliases = (c->alias_mask >> PAGE_SHIFT) + 1;
  43. }
  44. static void __init emit_cache_params(void)
  45. {
  46. printk("PVR=%08x CVR=%08x PRR=%08x\n",
  47. ctrl_inl(CCN_PVR),
  48. ctrl_inl(CCN_CVR),
  49. ctrl_inl(CCN_PRR));
  50. printk("I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  51. current_cpu_data.icache.ways,
  52. current_cpu_data.icache.sets,
  53. current_cpu_data.icache.way_incr);
  54. printk("I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  55. current_cpu_data.icache.entry_mask,
  56. current_cpu_data.icache.alias_mask,
  57. current_cpu_data.icache.n_aliases);
  58. printk("D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  59. current_cpu_data.dcache.ways,
  60. current_cpu_data.dcache.sets,
  61. current_cpu_data.dcache.way_incr);
  62. printk("D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  63. current_cpu_data.dcache.entry_mask,
  64. current_cpu_data.dcache.alias_mask,
  65. current_cpu_data.dcache.n_aliases);
  66. if (!__flush_dcache_segment_fn)
  67. panic("unknown number of cache ways\n");
  68. }
  69. /*
  70. * SH-4 has virtually indexed and physically tagged cache.
  71. */
  72. /* Worst case assumed to be 64k cache, direct-mapped i.e. 4 synonym bits. */
  73. #define MAX_P3_MUTEXES 16
  74. struct mutex p3map_mutex[MAX_P3_MUTEXES];
  75. void __init p3_cache_init(void)
  76. {
  77. int i;
  78. compute_alias(&current_cpu_data.icache);
  79. compute_alias(&current_cpu_data.dcache);
  80. switch (current_cpu_data.dcache.ways) {
  81. case 1:
  82. __flush_dcache_segment_fn = __flush_dcache_segment_1way;
  83. break;
  84. case 2:
  85. __flush_dcache_segment_fn = __flush_dcache_segment_2way;
  86. break;
  87. case 4:
  88. __flush_dcache_segment_fn = __flush_dcache_segment_4way;
  89. break;
  90. default:
  91. __flush_dcache_segment_fn = NULL;
  92. break;
  93. }
  94. emit_cache_params();
  95. if (ioremap_page_range(P3SEG, P3SEG + (PAGE_SIZE * 4), 0, PAGE_KERNEL))
  96. panic("%s failed.", __FUNCTION__);
  97. for (i = 0; i < current_cpu_data.dcache.n_aliases; i++)
  98. mutex_init(&p3map_mutex[i]);
  99. }
  100. /*
  101. * Write back the dirty D-caches, but not invalidate them.
  102. *
  103. * START: Virtual Address (U0, P1, or P3)
  104. * SIZE: Size of the region.
  105. */
  106. void __flush_wback_region(void *start, int size)
  107. {
  108. unsigned long v;
  109. unsigned long begin, end;
  110. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  111. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  112. & ~(L1_CACHE_BYTES-1);
  113. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  114. asm volatile("ocbwb %0"
  115. : /* no output */
  116. : "m" (__m(v)));
  117. }
  118. }
  119. /*
  120. * Write back the dirty D-caches and invalidate them.
  121. *
  122. * START: Virtual Address (U0, P1, or P3)
  123. * SIZE: Size of the region.
  124. */
  125. void __flush_purge_region(void *start, int size)
  126. {
  127. unsigned long v;
  128. unsigned long begin, end;
  129. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  130. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  131. & ~(L1_CACHE_BYTES-1);
  132. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  133. asm volatile("ocbp %0"
  134. : /* no output */
  135. : "m" (__m(v)));
  136. }
  137. }
  138. /*
  139. * No write back please
  140. */
  141. void __flush_invalidate_region(void *start, int size)
  142. {
  143. unsigned long v;
  144. unsigned long begin, end;
  145. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  146. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  147. & ~(L1_CACHE_BYTES-1);
  148. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  149. asm volatile("ocbi %0"
  150. : /* no output */
  151. : "m" (__m(v)));
  152. }
  153. }
  154. /*
  155. * Write back the range of D-cache, and purge the I-cache.
  156. *
  157. * Called from kernel/module.c:sys_init_module and routine for a.out format.
  158. */
  159. void flush_icache_range(unsigned long start, unsigned long end)
  160. {
  161. flush_cache_all();
  162. }
  163. /*
  164. * Write back the D-cache and purge the I-cache for signal trampoline.
  165. * .. which happens to be the same behavior as flush_icache_range().
  166. * So, we simply flush out a line.
  167. */
  168. void flush_cache_sigtramp(unsigned long addr)
  169. {
  170. unsigned long v, index;
  171. unsigned long flags;
  172. int i;
  173. v = addr & ~(L1_CACHE_BYTES-1);
  174. asm volatile("ocbwb %0"
  175. : /* no output */
  176. : "m" (__m(v)));
  177. index = CACHE_IC_ADDRESS_ARRAY |
  178. (v & current_cpu_data.icache.entry_mask);
  179. local_irq_save(flags);
  180. jump_to_P2();
  181. for (i = 0; i < current_cpu_data.icache.ways;
  182. i++, index += current_cpu_data.icache.way_incr)
  183. ctrl_outl(0, index); /* Clear out Valid-bit */
  184. back_to_P1();
  185. wmb();
  186. local_irq_restore(flags);
  187. }
  188. static inline void flush_cache_4096(unsigned long start,
  189. unsigned long phys)
  190. {
  191. unsigned long flags, exec_offset = 0;
  192. /*
  193. * All types of SH-4 require PC to be in P2 to operate on the I-cache.
  194. * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
  195. */
  196. if ((current_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
  197. (start < CACHE_OC_ADDRESS_ARRAY))
  198. exec_offset = 0x20000000;
  199. local_irq_save(flags);
  200. __flush_cache_4096(start | SH_CACHE_ASSOC,
  201. P1SEGADDR(phys), exec_offset);
  202. local_irq_restore(flags);
  203. }
  204. /*
  205. * Write back & invalidate the D-cache of the page.
  206. * (To avoid "alias" issues)
  207. *
  208. * This uses a lazy write-back on UP, which is explicitly
  209. * disabled on SMP.
  210. */
  211. void flush_dcache_page(struct page *page)
  212. {
  213. #ifndef CONFIG_SMP
  214. struct address_space *mapping = page_mapping(page);
  215. if (mapping && !mapping_mapped(mapping))
  216. set_bit(PG_dcache_dirty, &page->flags);
  217. else
  218. #endif
  219. {
  220. unsigned long phys = PHYSADDR(page_address(page));
  221. unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
  222. int i, n;
  223. /* Loop all the D-cache */
  224. n = current_cpu_data.dcache.n_aliases;
  225. for (i = 0; i < n; i++, addr += 4096)
  226. flush_cache_4096(addr, phys);
  227. }
  228. wmb();
  229. }
  230. /* TODO: Selective icache invalidation through IC address array.. */
  231. static inline void flush_icache_all(void)
  232. {
  233. unsigned long flags, ccr;
  234. local_irq_save(flags);
  235. jump_to_P2();
  236. /* Flush I-cache */
  237. ccr = ctrl_inl(CCR);
  238. ccr |= CCR_CACHE_ICI;
  239. ctrl_outl(ccr, CCR);
  240. /*
  241. * back_to_P1() will take care of the barrier for us, don't add
  242. * another one!
  243. */
  244. back_to_P1();
  245. local_irq_restore(flags);
  246. }
  247. void flush_dcache_all(void)
  248. {
  249. (*__flush_dcache_segment_fn)(0UL, current_cpu_data.dcache.way_size);
  250. wmb();
  251. }
  252. void flush_cache_all(void)
  253. {
  254. flush_dcache_all();
  255. flush_icache_all();
  256. }
  257. static void __flush_cache_mm(struct mm_struct *mm, unsigned long start,
  258. unsigned long end)
  259. {
  260. unsigned long d = 0, p = start & PAGE_MASK;
  261. unsigned long alias_mask = current_cpu_data.dcache.alias_mask;
  262. unsigned long n_aliases = current_cpu_data.dcache.n_aliases;
  263. unsigned long select_bit;
  264. unsigned long all_aliases_mask;
  265. unsigned long addr_offset;
  266. pgd_t *dir;
  267. pmd_t *pmd;
  268. pud_t *pud;
  269. pte_t *pte;
  270. int i;
  271. dir = pgd_offset(mm, p);
  272. pud = pud_offset(dir, p);
  273. pmd = pmd_offset(pud, p);
  274. end = PAGE_ALIGN(end);
  275. all_aliases_mask = (1 << n_aliases) - 1;
  276. do {
  277. if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd))) {
  278. p &= PMD_MASK;
  279. p += PMD_SIZE;
  280. pmd++;
  281. continue;
  282. }
  283. pte = pte_offset_kernel(pmd, p);
  284. do {
  285. unsigned long phys;
  286. pte_t entry = *pte;
  287. if (!(pte_val(entry) & _PAGE_PRESENT)) {
  288. pte++;
  289. p += PAGE_SIZE;
  290. continue;
  291. }
  292. phys = pte_val(entry) & PTE_PHYS_MASK;
  293. if ((p ^ phys) & alias_mask) {
  294. d |= 1 << ((p & alias_mask) >> PAGE_SHIFT);
  295. d |= 1 << ((phys & alias_mask) >> PAGE_SHIFT);
  296. if (d == all_aliases_mask)
  297. goto loop_exit;
  298. }
  299. pte++;
  300. p += PAGE_SIZE;
  301. } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
  302. pmd++;
  303. } while (p < end);
  304. loop_exit:
  305. addr_offset = 0;
  306. select_bit = 1;
  307. for (i = 0; i < n_aliases; i++) {
  308. if (d & select_bit) {
  309. (*__flush_dcache_segment_fn)(addr_offset, PAGE_SIZE);
  310. wmb();
  311. }
  312. select_bit <<= 1;
  313. addr_offset += PAGE_SIZE;
  314. }
  315. }
  316. /*
  317. * Note : (RPC) since the caches are physically tagged, the only point
  318. * of flush_cache_mm for SH-4 is to get rid of aliases from the
  319. * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
  320. * lines can stay resident so long as the virtual address they were
  321. * accessed with (hence cache set) is in accord with the physical
  322. * address (i.e. tag). It's no different here. So I reckon we don't
  323. * need to flush the I-cache, since aliases don't matter for that. We
  324. * should try that.
  325. *
  326. * Caller takes mm->mmap_sem.
  327. */
  328. void flush_cache_mm(struct mm_struct *mm)
  329. {
  330. /*
  331. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  332. * the cache is physically tagged, the data can just be left in there.
  333. */
  334. if (current_cpu_data.dcache.n_aliases == 0)
  335. return;
  336. /*
  337. * Don't bother groveling around the dcache for the VMA ranges
  338. * if there are too many PTEs to make it worthwhile.
  339. */
  340. if (mm->nr_ptes >= MAX_DCACHE_PAGES)
  341. flush_dcache_all();
  342. else {
  343. struct vm_area_struct *vma;
  344. /*
  345. * In this case there are reasonably sized ranges to flush,
  346. * iterate through the VMA list and take care of any aliases.
  347. */
  348. for (vma = mm->mmap; vma; vma = vma->vm_next)
  349. __flush_cache_mm(mm, vma->vm_start, vma->vm_end);
  350. }
  351. /* Only touch the icache if one of the VMAs has VM_EXEC set. */
  352. if (mm->exec_vm)
  353. flush_icache_all();
  354. }
  355. /*
  356. * Write back and invalidate I/D-caches for the page.
  357. *
  358. * ADDR: Virtual Address (U0 address)
  359. * PFN: Physical page number
  360. */
  361. void flush_cache_page(struct vm_area_struct *vma, unsigned long address,
  362. unsigned long pfn)
  363. {
  364. unsigned long phys = pfn << PAGE_SHIFT;
  365. unsigned int alias_mask;
  366. alias_mask = current_cpu_data.dcache.alias_mask;
  367. /* We only need to flush D-cache when we have alias */
  368. if ((address^phys) & alias_mask) {
  369. /* Loop 4K of the D-cache */
  370. flush_cache_4096(
  371. CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
  372. phys);
  373. /* Loop another 4K of the D-cache */
  374. flush_cache_4096(
  375. CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
  376. phys);
  377. }
  378. alias_mask = current_cpu_data.icache.alias_mask;
  379. if (vma->vm_flags & VM_EXEC) {
  380. /*
  381. * Evict entries from the portion of the cache from which code
  382. * may have been executed at this address (virtual). There's
  383. * no need to evict from the portion corresponding to the
  384. * physical address as for the D-cache, because we know the
  385. * kernel has never executed the code through its identity
  386. * translation.
  387. */
  388. flush_cache_4096(
  389. CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
  390. phys);
  391. }
  392. }
  393. /*
  394. * Write back and invalidate D-caches.
  395. *
  396. * START, END: Virtual Address (U0 address)
  397. *
  398. * NOTE: We need to flush the _physical_ page entry.
  399. * Flushing the cache lines for U0 only isn't enough.
  400. * We need to flush for P1 too, which may contain aliases.
  401. */
  402. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  403. unsigned long end)
  404. {
  405. /*
  406. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  407. * the cache is physically tagged, the data can just be left in there.
  408. */
  409. if (current_cpu_data.dcache.n_aliases == 0)
  410. return;
  411. /*
  412. * Don't bother with the lookup and alias check if we have a
  413. * wide range to cover, just blow away the dcache in its
  414. * entirety instead. -- PFM.
  415. */
  416. if (((end - start) >> PAGE_SHIFT) >= MAX_DCACHE_PAGES)
  417. flush_dcache_all();
  418. else
  419. __flush_cache_mm(vma->vm_mm, start, end);
  420. if (vma->vm_flags & VM_EXEC) {
  421. /*
  422. * TODO: Is this required??? Need to look at how I-cache
  423. * coherency is assured when new programs are loaded to see if
  424. * this matters.
  425. */
  426. flush_icache_all();
  427. }
  428. }
  429. /*
  430. * flush_icache_user_range
  431. * @vma: VMA of the process
  432. * @page: page
  433. * @addr: U0 address
  434. * @len: length of the range (< page size)
  435. */
  436. void flush_icache_user_range(struct vm_area_struct *vma,
  437. struct page *page, unsigned long addr, int len)
  438. {
  439. flush_cache_page(vma, addr, page_to_pfn(page));
  440. mb();
  441. }
  442. /**
  443. * __flush_cache_4096
  444. *
  445. * @addr: address in memory mapped cache array
  446. * @phys: P1 address to flush (has to match tags if addr has 'A' bit
  447. * set i.e. associative write)
  448. * @exec_offset: set to 0x20000000 if flush has to be executed from P2
  449. * region else 0x0
  450. *
  451. * The offset into the cache array implied by 'addr' selects the
  452. * 'colour' of the virtual address range that will be flushed. The
  453. * operation (purge/write-back) is selected by the lower 2 bits of
  454. * 'phys'.
  455. */
  456. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  457. unsigned long exec_offset)
  458. {
  459. int way_count;
  460. unsigned long base_addr = addr;
  461. struct cache_info *dcache;
  462. unsigned long way_incr;
  463. unsigned long a, ea, p;
  464. unsigned long temp_pc;
  465. dcache = &current_cpu_data.dcache;
  466. /* Write this way for better assembly. */
  467. way_count = dcache->ways;
  468. way_incr = dcache->way_incr;
  469. /*
  470. * Apply exec_offset (i.e. branch to P2 if required.).
  471. *
  472. * FIXME:
  473. *
  474. * If I write "=r" for the (temp_pc), it puts this in r6 hence
  475. * trashing exec_offset before it's been added on - why? Hence
  476. * "=&r" as a 'workaround'
  477. */
  478. asm volatile("mov.l 1f, %0\n\t"
  479. "add %1, %0\n\t"
  480. "jmp @%0\n\t"
  481. "nop\n\t"
  482. ".balign 4\n\t"
  483. "1: .long 2f\n\t"
  484. "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
  485. /*
  486. * We know there will be >=1 iteration, so write as do-while to avoid
  487. * pointless nead-of-loop check for 0 iterations.
  488. */
  489. do {
  490. ea = base_addr + PAGE_SIZE;
  491. a = base_addr;
  492. p = phys;
  493. do {
  494. *(volatile unsigned long *)a = p;
  495. /*
  496. * Next line: intentionally not p+32, saves an add, p
  497. * will do since only the cache tag bits need to
  498. * match.
  499. */
  500. *(volatile unsigned long *)(a+32) = p;
  501. a += 64;
  502. p += 64;
  503. } while (a < ea);
  504. base_addr += way_incr;
  505. } while (--way_count != 0);
  506. }
  507. /*
  508. * Break the 1, 2 and 4 way variants of this out into separate functions to
  509. * avoid nearly all the overhead of having the conditional stuff in the function
  510. * bodies (+ the 1 and 2 way cases avoid saving any registers too).
  511. */
  512. static void __flush_dcache_segment_1way(unsigned long start,
  513. unsigned long extent_per_way)
  514. {
  515. unsigned long orig_sr, sr_with_bl;
  516. unsigned long base_addr;
  517. unsigned long way_incr, linesz, way_size;
  518. struct cache_info *dcache;
  519. register unsigned long a0, a0e;
  520. asm volatile("stc sr, %0" : "=r" (orig_sr));
  521. sr_with_bl = orig_sr | (1<<28);
  522. base_addr = ((unsigned long)&empty_zero_page[0]);
  523. /*
  524. * The previous code aligned base_addr to 16k, i.e. the way_size of all
  525. * existing SH-4 D-caches. Whilst I don't see a need to have this
  526. * aligned to any better than the cache line size (which it will be
  527. * anyway by construction), let's align it to at least the way_size of
  528. * any existing or conceivable SH-4 D-cache. -- RPC
  529. */
  530. base_addr = ((base_addr >> 16) << 16);
  531. base_addr |= start;
  532. dcache = &current_cpu_data.dcache;
  533. linesz = dcache->linesz;
  534. way_incr = dcache->way_incr;
  535. way_size = dcache->way_size;
  536. a0 = base_addr;
  537. a0e = base_addr + extent_per_way;
  538. do {
  539. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  540. asm volatile("movca.l r0, @%0\n\t"
  541. "ocbi @%0" : : "r" (a0));
  542. a0 += linesz;
  543. asm volatile("movca.l r0, @%0\n\t"
  544. "ocbi @%0" : : "r" (a0));
  545. a0 += linesz;
  546. asm volatile("movca.l r0, @%0\n\t"
  547. "ocbi @%0" : : "r" (a0));
  548. a0 += linesz;
  549. asm volatile("movca.l r0, @%0\n\t"
  550. "ocbi @%0" : : "r" (a0));
  551. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  552. a0 += linesz;
  553. } while (a0 < a0e);
  554. }
  555. static void __flush_dcache_segment_2way(unsigned long start,
  556. unsigned long extent_per_way)
  557. {
  558. unsigned long orig_sr, sr_with_bl;
  559. unsigned long base_addr;
  560. unsigned long way_incr, linesz, way_size;
  561. struct cache_info *dcache;
  562. register unsigned long a0, a1, a0e;
  563. asm volatile("stc sr, %0" : "=r" (orig_sr));
  564. sr_with_bl = orig_sr | (1<<28);
  565. base_addr = ((unsigned long)&empty_zero_page[0]);
  566. /* See comment under 1-way above */
  567. base_addr = ((base_addr >> 16) << 16);
  568. base_addr |= start;
  569. dcache = &current_cpu_data.dcache;
  570. linesz = dcache->linesz;
  571. way_incr = dcache->way_incr;
  572. way_size = dcache->way_size;
  573. a0 = base_addr;
  574. a1 = a0 + way_incr;
  575. a0e = base_addr + extent_per_way;
  576. do {
  577. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  578. asm volatile("movca.l r0, @%0\n\t"
  579. "movca.l r0, @%1\n\t"
  580. "ocbi @%0\n\t"
  581. "ocbi @%1" : :
  582. "r" (a0), "r" (a1));
  583. a0 += linesz;
  584. a1 += linesz;
  585. asm volatile("movca.l r0, @%0\n\t"
  586. "movca.l r0, @%1\n\t"
  587. "ocbi @%0\n\t"
  588. "ocbi @%1" : :
  589. "r" (a0), "r" (a1));
  590. a0 += linesz;
  591. a1 += linesz;
  592. asm volatile("movca.l r0, @%0\n\t"
  593. "movca.l r0, @%1\n\t"
  594. "ocbi @%0\n\t"
  595. "ocbi @%1" : :
  596. "r" (a0), "r" (a1));
  597. a0 += linesz;
  598. a1 += linesz;
  599. asm volatile("movca.l r0, @%0\n\t"
  600. "movca.l r0, @%1\n\t"
  601. "ocbi @%0\n\t"
  602. "ocbi @%1" : :
  603. "r" (a0), "r" (a1));
  604. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  605. a0 += linesz;
  606. a1 += linesz;
  607. } while (a0 < a0e);
  608. }
  609. static void __flush_dcache_segment_4way(unsigned long start,
  610. unsigned long extent_per_way)
  611. {
  612. unsigned long orig_sr, sr_with_bl;
  613. unsigned long base_addr;
  614. unsigned long way_incr, linesz, way_size;
  615. struct cache_info *dcache;
  616. register unsigned long a0, a1, a2, a3, a0e;
  617. asm volatile("stc sr, %0" : "=r" (orig_sr));
  618. sr_with_bl = orig_sr | (1<<28);
  619. base_addr = ((unsigned long)&empty_zero_page[0]);
  620. /* See comment under 1-way above */
  621. base_addr = ((base_addr >> 16) << 16);
  622. base_addr |= start;
  623. dcache = &current_cpu_data.dcache;
  624. linesz = dcache->linesz;
  625. way_incr = dcache->way_incr;
  626. way_size = dcache->way_size;
  627. a0 = base_addr;
  628. a1 = a0 + way_incr;
  629. a2 = a1 + way_incr;
  630. a3 = a2 + way_incr;
  631. a0e = base_addr + extent_per_way;
  632. do {
  633. asm volatile("ldc %0, sr" : : "r" (sr_with_bl));
  634. asm volatile("movca.l r0, @%0\n\t"
  635. "movca.l r0, @%1\n\t"
  636. "movca.l r0, @%2\n\t"
  637. "movca.l r0, @%3\n\t"
  638. "ocbi @%0\n\t"
  639. "ocbi @%1\n\t"
  640. "ocbi @%2\n\t"
  641. "ocbi @%3\n\t" : :
  642. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  643. a0 += linesz;
  644. a1 += linesz;
  645. a2 += linesz;
  646. a3 += linesz;
  647. asm volatile("movca.l r0, @%0\n\t"
  648. "movca.l r0, @%1\n\t"
  649. "movca.l r0, @%2\n\t"
  650. "movca.l r0, @%3\n\t"
  651. "ocbi @%0\n\t"
  652. "ocbi @%1\n\t"
  653. "ocbi @%2\n\t"
  654. "ocbi @%3\n\t" : :
  655. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  656. a0 += linesz;
  657. a1 += linesz;
  658. a2 += linesz;
  659. a3 += linesz;
  660. asm volatile("movca.l r0, @%0\n\t"
  661. "movca.l r0, @%1\n\t"
  662. "movca.l r0, @%2\n\t"
  663. "movca.l r0, @%3\n\t"
  664. "ocbi @%0\n\t"
  665. "ocbi @%1\n\t"
  666. "ocbi @%2\n\t"
  667. "ocbi @%3\n\t" : :
  668. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  669. a0 += linesz;
  670. a1 += linesz;
  671. a2 += linesz;
  672. a3 += linesz;
  673. asm volatile("movca.l r0, @%0\n\t"
  674. "movca.l r0, @%1\n\t"
  675. "movca.l r0, @%2\n\t"
  676. "movca.l r0, @%3\n\t"
  677. "ocbi @%0\n\t"
  678. "ocbi @%1\n\t"
  679. "ocbi @%2\n\t"
  680. "ocbi @%3\n\t" : :
  681. "r" (a0), "r" (a1), "r" (a2), "r" (a3));
  682. asm volatile("ldc %0, sr" : : "r" (orig_sr));
  683. a0 += linesz;
  684. a1 += linesz;
  685. a2 += linesz;
  686. a3 += linesz;
  687. } while (a0 < a0e);
  688. }