cache-sh4.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. /*
  2. * arch/sh/mm/cache-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2001 - 2007 Paul Mundt
  6. * Copyright (C) 2003 Richard Curnow
  7. * Copyright (c) 2007 STMicroelectronics (R&D) Ltd.
  8. *
  9. * This file is subject to the terms and conditions of the GNU General Public
  10. * License. See the file "COPYING" in the main directory of this archive
  11. * for more details.
  12. */
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/io.h>
  16. #include <linux/mutex.h>
  17. #include <linux/fs.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/cacheflush.h>
  20. /*
  21. * The maximum number of pages we support up to when doing ranged dcache
  22. * flushing. Anything exceeding this will simply flush the dcache in its
  23. * entirety.
  24. */
  25. #define MAX_DCACHE_PAGES 64 /* XXX: Tune for ways */
  26. #define MAX_ICACHE_PAGES 32
  27. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  28. unsigned long exec_offset);
  29. /*
  30. * Write back the range of D-cache, and purge the I-cache.
  31. *
  32. * Called from kernel/module.c:sys_init_module and routine for a.out format,
  33. * signal handler code and kprobes code
  34. */
  35. static void sh4_flush_icache_range(void *args)
  36. {
  37. struct flusher_data *data = args;
  38. unsigned long start, end;
  39. unsigned long flags, v;
  40. int i;
  41. start = data->addr1;
  42. end = data->addr2;
  43. /* If there are too many pages then just blow away the caches */
  44. if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
  45. local_flush_cache_all(NULL);
  46. return;
  47. }
  48. /*
  49. * Selectively flush d-cache then invalidate the i-cache.
  50. * This is inefficient, so only use this for small ranges.
  51. */
  52. start &= ~(L1_CACHE_BYTES-1);
  53. end += L1_CACHE_BYTES-1;
  54. end &= ~(L1_CACHE_BYTES-1);
  55. local_irq_save(flags);
  56. jump_to_uncached();
  57. for (v = start; v < end; v += L1_CACHE_BYTES) {
  58. unsigned long icacheaddr;
  59. __ocbwb(v);
  60. icacheaddr = CACHE_IC_ADDRESS_ARRAY | (v &
  61. cpu_data->icache.entry_mask);
  62. /* Clear i-cache line valid-bit */
  63. for (i = 0; i < cpu_data->icache.ways; i++) {
  64. __raw_writel(0, icacheaddr);
  65. icacheaddr += cpu_data->icache.way_incr;
  66. }
  67. }
  68. back_to_cached();
  69. local_irq_restore(flags);
  70. }
  71. static inline void flush_cache_4096(unsigned long start,
  72. unsigned long phys)
  73. {
  74. unsigned long flags, exec_offset = 0;
  75. /*
  76. * All types of SH-4 require PC to be in P2 to operate on the I-cache.
  77. * Some types of SH-4 require PC to be in P2 to operate on the D-cache.
  78. */
  79. if ((boot_cpu_data.flags & CPU_HAS_P2_FLUSH_BUG) ||
  80. (start < CACHE_OC_ADDRESS_ARRAY))
  81. exec_offset = 0x20000000;
  82. local_irq_save(flags);
  83. __flush_cache_4096(start | SH_CACHE_ASSOC,
  84. P1SEGADDR(phys), exec_offset);
  85. local_irq_restore(flags);
  86. }
  87. /*
  88. * Write back & invalidate the D-cache of the page.
  89. * (To avoid "alias" issues)
  90. */
  91. static void sh4_flush_dcache_page(void *arg)
  92. {
  93. struct page *page = arg;
  94. #ifndef CONFIG_SMP
  95. struct address_space *mapping = page_mapping(page);
  96. if (mapping && !mapping_mapped(mapping))
  97. set_bit(PG_dcache_dirty, &page->flags);
  98. else
  99. #endif
  100. {
  101. unsigned long phys = page_to_phys(page);
  102. unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
  103. int i, n;
  104. /* Loop all the D-cache */
  105. n = boot_cpu_data.dcache.way_incr >> 12;
  106. for (i = 0; i < n; i++, addr += 4096)
  107. flush_cache_4096(addr, phys);
  108. }
  109. wmb();
  110. }
  111. /* TODO: Selective icache invalidation through IC address array.. */
  112. static void __uses_jump_to_uncached flush_icache_all(void)
  113. {
  114. unsigned long flags, ccr;
  115. local_irq_save(flags);
  116. jump_to_uncached();
  117. /* Flush I-cache */
  118. ccr = ctrl_inl(CCR);
  119. ccr |= CCR_CACHE_ICI;
  120. ctrl_outl(ccr, CCR);
  121. /*
  122. * back_to_cached() will take care of the barrier for us, don't add
  123. * another one!
  124. */
  125. back_to_cached();
  126. local_irq_restore(flags);
  127. }
  128. static void flush_dcache_all(void)
  129. {
  130. unsigned long addr, end_addr, entry_offset;
  131. end_addr = CACHE_OC_ADDRESS_ARRAY +
  132. (current_cpu_data.dcache.sets <<
  133. current_cpu_data.dcache.entry_shift) *
  134. current_cpu_data.dcache.ways;
  135. entry_offset = 1 << current_cpu_data.dcache.entry_shift;
  136. for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; ) {
  137. __raw_writel(0, addr); addr += entry_offset;
  138. __raw_writel(0, addr); addr += entry_offset;
  139. __raw_writel(0, addr); addr += entry_offset;
  140. __raw_writel(0, addr); addr += entry_offset;
  141. __raw_writel(0, addr); addr += entry_offset;
  142. __raw_writel(0, addr); addr += entry_offset;
  143. __raw_writel(0, addr); addr += entry_offset;
  144. __raw_writel(0, addr); addr += entry_offset;
  145. }
  146. }
  147. static void sh4_flush_cache_all(void *unused)
  148. {
  149. flush_dcache_all();
  150. flush_icache_all();
  151. }
  152. /*
  153. * Note : (RPC) since the caches are physically tagged, the only point
  154. * of flush_cache_mm for SH-4 is to get rid of aliases from the
  155. * D-cache. The assumption elsewhere, e.g. flush_cache_range, is that
  156. * lines can stay resident so long as the virtual address they were
  157. * accessed with (hence cache set) is in accord with the physical
  158. * address (i.e. tag). It's no different here.
  159. *
  160. * Caller takes mm->mmap_sem.
  161. */
  162. static void sh4_flush_cache_mm(void *arg)
  163. {
  164. struct mm_struct *mm = arg;
  165. if (cpu_context(smp_processor_id(), mm) == NO_CONTEXT)
  166. return;
  167. flush_dcache_all();
  168. }
  169. /*
  170. * Write back and invalidate I/D-caches for the page.
  171. *
  172. * ADDR: Virtual Address (U0 address)
  173. * PFN: Physical page number
  174. */
  175. static void sh4_flush_cache_page(void *args)
  176. {
  177. struct flusher_data *data = args;
  178. struct vm_area_struct *vma;
  179. unsigned long address, pfn, phys;
  180. unsigned int alias_mask;
  181. vma = data->vma;
  182. address = data->addr1;
  183. pfn = data->addr2;
  184. phys = pfn << PAGE_SHIFT;
  185. if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  186. return;
  187. alias_mask = boot_cpu_data.dcache.alias_mask;
  188. /* We only need to flush D-cache when we have alias */
  189. if ((address^phys) & alias_mask) {
  190. /* Loop 4K of the D-cache */
  191. flush_cache_4096(
  192. CACHE_OC_ADDRESS_ARRAY | (address & alias_mask),
  193. phys);
  194. /* Loop another 4K of the D-cache */
  195. flush_cache_4096(
  196. CACHE_OC_ADDRESS_ARRAY | (phys & alias_mask),
  197. phys);
  198. }
  199. alias_mask = boot_cpu_data.icache.alias_mask;
  200. if (vma->vm_flags & VM_EXEC) {
  201. /*
  202. * Evict entries from the portion of the cache from which code
  203. * may have been executed at this address (virtual). There's
  204. * no need to evict from the portion corresponding to the
  205. * physical address as for the D-cache, because we know the
  206. * kernel has never executed the code through its identity
  207. * translation.
  208. */
  209. flush_cache_4096(
  210. CACHE_IC_ADDRESS_ARRAY | (address & alias_mask),
  211. phys);
  212. }
  213. }
  214. /*
  215. * Write back and invalidate D-caches.
  216. *
  217. * START, END: Virtual Address (U0 address)
  218. *
  219. * NOTE: We need to flush the _physical_ page entry.
  220. * Flushing the cache lines for U0 only isn't enough.
  221. * We need to flush for P1 too, which may contain aliases.
  222. */
  223. static void sh4_flush_cache_range(void *args)
  224. {
  225. struct flusher_data *data = args;
  226. struct vm_area_struct *vma;
  227. unsigned long start, end;
  228. vma = data->vma;
  229. start = data->addr1;
  230. end = data->addr2;
  231. if (cpu_context(smp_processor_id(), vma->vm_mm) == NO_CONTEXT)
  232. return;
  233. /*
  234. * If cache is only 4k-per-way, there are never any 'aliases'. Since
  235. * the cache is physically tagged, the data can just be left in there.
  236. */
  237. if (boot_cpu_data.dcache.n_aliases == 0)
  238. return;
  239. flush_dcache_all();
  240. if (vma->vm_flags & VM_EXEC)
  241. flush_icache_all();
  242. }
  243. /**
  244. * __flush_cache_4096
  245. *
  246. * @addr: address in memory mapped cache array
  247. * @phys: P1 address to flush (has to match tags if addr has 'A' bit
  248. * set i.e. associative write)
  249. * @exec_offset: set to 0x20000000 if flush has to be executed from P2
  250. * region else 0x0
  251. *
  252. * The offset into the cache array implied by 'addr' selects the
  253. * 'colour' of the virtual address range that will be flushed. The
  254. * operation (purge/write-back) is selected by the lower 2 bits of
  255. * 'phys'.
  256. */
  257. static void __flush_cache_4096(unsigned long addr, unsigned long phys,
  258. unsigned long exec_offset)
  259. {
  260. int way_count;
  261. unsigned long base_addr = addr;
  262. struct cache_info *dcache;
  263. unsigned long way_incr;
  264. unsigned long a, ea, p;
  265. unsigned long temp_pc;
  266. dcache = &boot_cpu_data.dcache;
  267. /* Write this way for better assembly. */
  268. way_count = dcache->ways;
  269. way_incr = dcache->way_incr;
  270. /*
  271. * Apply exec_offset (i.e. branch to P2 if required.).
  272. *
  273. * FIXME:
  274. *
  275. * If I write "=r" for the (temp_pc), it puts this in r6 hence
  276. * trashing exec_offset before it's been added on - why? Hence
  277. * "=&r" as a 'workaround'
  278. */
  279. asm volatile("mov.l 1f, %0\n\t"
  280. "add %1, %0\n\t"
  281. "jmp @%0\n\t"
  282. "nop\n\t"
  283. ".balign 4\n\t"
  284. "1: .long 2f\n\t"
  285. "2:\n" : "=&r" (temp_pc) : "r" (exec_offset));
  286. /*
  287. * We know there will be >=1 iteration, so write as do-while to avoid
  288. * pointless nead-of-loop check for 0 iterations.
  289. */
  290. do {
  291. ea = base_addr + PAGE_SIZE;
  292. a = base_addr;
  293. p = phys;
  294. do {
  295. *(volatile unsigned long *)a = p;
  296. /*
  297. * Next line: intentionally not p+32, saves an add, p
  298. * will do since only the cache tag bits need to
  299. * match.
  300. */
  301. *(volatile unsigned long *)(a+32) = p;
  302. a += 64;
  303. p += 64;
  304. } while (a < ea);
  305. base_addr += way_incr;
  306. } while (--way_count != 0);
  307. }
  308. extern void __weak sh4__flush_region_init(void);
  309. /*
  310. * SH-4 has virtually indexed and physically tagged cache.
  311. */
  312. void __init sh4_cache_init(void)
  313. {
  314. printk("PVR=%08x CVR=%08x PRR=%08x\n",
  315. ctrl_inl(CCN_PVR),
  316. ctrl_inl(CCN_CVR),
  317. ctrl_inl(CCN_PRR));
  318. local_flush_icache_range = sh4_flush_icache_range;
  319. local_flush_dcache_page = sh4_flush_dcache_page;
  320. local_flush_cache_all = sh4_flush_cache_all;
  321. local_flush_cache_mm = sh4_flush_cache_mm;
  322. local_flush_cache_dup_mm = sh4_flush_cache_mm;
  323. local_flush_cache_page = sh4_flush_cache_page;
  324. local_flush_cache_range = sh4_flush_cache_range;
  325. sh4__flush_region_init();
  326. }