cache-sh4.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362
  1. /*
  2. * arch/sh/mm/cache-sh4.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2001, 2002, 2003, 2004 Paul Mundt
  6. * Copyright (C) 2003 Richard Curnow
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/config.h>
  13. #include <linux/init.h>
  14. #include <linux/mman.h>
  15. #include <linux/mm.h>
  16. #include <linux/threads.h>
  17. #include <asm/addrspace.h>
  18. #include <asm/page.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/processor.h>
  21. #include <asm/cache.h>
  22. #include <asm/io.h>
  23. #include <asm/uaccess.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/mmu_context.h>
  26. #include <asm/cacheflush.h>
  27. extern void __flush_cache_4096_all(unsigned long start);
  28. static void __flush_cache_4096_all_ex(unsigned long start);
  29. extern void __flush_dcache_all(void);
  30. static void __flush_dcache_all_ex(void);
  31. /*
  32. * SH-4 has virtually indexed and physically tagged cache.
  33. */
  34. struct semaphore p3map_sem[4];
  35. void __init p3_cache_init(void)
  36. {
  37. if (remap_area_pages(P3SEG, 0, PAGE_SIZE*4, _PAGE_CACHABLE))
  38. panic("%s failed.", __FUNCTION__);
  39. sema_init (&p3map_sem[0], 1);
  40. sema_init (&p3map_sem[1], 1);
  41. sema_init (&p3map_sem[2], 1);
  42. sema_init (&p3map_sem[3], 1);
  43. }
  44. /*
  45. * Write back the dirty D-caches, but not invalidate them.
  46. *
  47. * START: Virtual Address (U0, P1, or P3)
  48. * SIZE: Size of the region.
  49. */
  50. void __flush_wback_region(void *start, int size)
  51. {
  52. unsigned long v;
  53. unsigned long begin, end;
  54. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  55. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  56. & ~(L1_CACHE_BYTES-1);
  57. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  58. asm volatile("ocbwb %0"
  59. : /* no output */
  60. : "m" (__m(v)));
  61. }
  62. }
  63. /*
  64. * Write back the dirty D-caches and invalidate them.
  65. *
  66. * START: Virtual Address (U0, P1, or P3)
  67. * SIZE: Size of the region.
  68. */
  69. void __flush_purge_region(void *start, int size)
  70. {
  71. unsigned long v;
  72. unsigned long begin, end;
  73. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  74. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  75. & ~(L1_CACHE_BYTES-1);
  76. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  77. asm volatile("ocbp %0"
  78. : /* no output */
  79. : "m" (__m(v)));
  80. }
  81. }
  82. /*
  83. * No write back please
  84. */
  85. void __flush_invalidate_region(void *start, int size)
  86. {
  87. unsigned long v;
  88. unsigned long begin, end;
  89. begin = (unsigned long)start & ~(L1_CACHE_BYTES-1);
  90. end = ((unsigned long)start + size + L1_CACHE_BYTES-1)
  91. & ~(L1_CACHE_BYTES-1);
  92. for (v = begin; v < end; v+=L1_CACHE_BYTES) {
  93. asm volatile("ocbi %0"
  94. : /* no output */
  95. : "m" (__m(v)));
  96. }
  97. }
  98. static void __flush_dcache_all_ex(void)
  99. {
  100. unsigned long addr, end_addr, entry_offset;
  101. end_addr = CACHE_OC_ADDRESS_ARRAY + (cpu_data->dcache.sets << cpu_data->dcache.entry_shift) * cpu_data->dcache.ways;
  102. entry_offset = 1 << cpu_data->dcache.entry_shift;
  103. for (addr = CACHE_OC_ADDRESS_ARRAY; addr < end_addr; addr += entry_offset) {
  104. ctrl_outl(0, addr);
  105. }
  106. }
  107. static void __flush_cache_4096_all_ex(unsigned long start)
  108. {
  109. unsigned long addr, entry_offset;
  110. int i;
  111. entry_offset = 1 << cpu_data->dcache.entry_shift;
  112. for (i = 0; i < cpu_data->dcache.ways; i++, start += cpu_data->dcache.way_incr) {
  113. for (addr = CACHE_OC_ADDRESS_ARRAY + start;
  114. addr < CACHE_OC_ADDRESS_ARRAY + 4096 + start;
  115. addr += entry_offset) {
  116. ctrl_outl(0, addr);
  117. }
  118. }
  119. }
  120. void flush_cache_4096_all(unsigned long start)
  121. {
  122. if (cpu_data->dcache.ways == 1)
  123. __flush_cache_4096_all(start);
  124. else
  125. __flush_cache_4096_all_ex(start);
  126. }
  127. /*
  128. * Write back the range of D-cache, and purge the I-cache.
  129. *
  130. * Called from kernel/module.c:sys_init_module and routine for a.out format.
  131. */
  132. void flush_icache_range(unsigned long start, unsigned long end)
  133. {
  134. flush_cache_all();
  135. }
  136. /*
  137. * Write back the D-cache and purge the I-cache for signal trampoline.
  138. * .. which happens to be the same behavior as flush_icache_range().
  139. * So, we simply flush out a line.
  140. */
  141. void flush_cache_sigtramp(unsigned long addr)
  142. {
  143. unsigned long v, index;
  144. unsigned long flags;
  145. int i;
  146. v = addr & ~(L1_CACHE_BYTES-1);
  147. asm volatile("ocbwb %0"
  148. : /* no output */
  149. : "m" (__m(v)));
  150. index = CACHE_IC_ADDRESS_ARRAY | (v & cpu_data->icache.entry_mask);
  151. local_irq_save(flags);
  152. jump_to_P2();
  153. for(i = 0; i < cpu_data->icache.ways; i++, index += cpu_data->icache.way_incr)
  154. ctrl_outl(0, index); /* Clear out Valid-bit */
  155. back_to_P1();
  156. local_irq_restore(flags);
  157. }
  158. static inline void flush_cache_4096(unsigned long start,
  159. unsigned long phys)
  160. {
  161. unsigned long flags;
  162. extern void __flush_cache_4096(unsigned long addr, unsigned long phys, unsigned long exec_offset);
  163. /*
  164. * SH7751, SH7751R, and ST40 have no restriction to handle cache.
  165. * (While SH7750 must do that at P2 area.)
  166. */
  167. if ((cpu_data->flags & CPU_HAS_P2_FLUSH_BUG)
  168. || start < CACHE_OC_ADDRESS_ARRAY) {
  169. local_irq_save(flags);
  170. __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0x20000000);
  171. local_irq_restore(flags);
  172. } else {
  173. __flush_cache_4096(start | SH_CACHE_ASSOC, P1SEGADDR(phys), 0);
  174. }
  175. }
  176. /*
  177. * Write back & invalidate the D-cache of the page.
  178. * (To avoid "alias" issues)
  179. */
  180. void flush_dcache_page(struct page *page)
  181. {
  182. if (test_bit(PG_mapped, &page->flags)) {
  183. unsigned long phys = PHYSADDR(page_address(page));
  184. /* Loop all the D-cache */
  185. flush_cache_4096(CACHE_OC_ADDRESS_ARRAY, phys);
  186. flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x1000, phys);
  187. flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x2000, phys);
  188. flush_cache_4096(CACHE_OC_ADDRESS_ARRAY | 0x3000, phys);
  189. }
  190. }
  191. static inline void flush_icache_all(void)
  192. {
  193. unsigned long flags, ccr;
  194. local_irq_save(flags);
  195. jump_to_P2();
  196. /* Flush I-cache */
  197. ccr = ctrl_inl(CCR);
  198. ccr |= CCR_CACHE_ICI;
  199. ctrl_outl(ccr, CCR);
  200. back_to_P1();
  201. local_irq_restore(flags);
  202. }
  203. void flush_cache_all(void)
  204. {
  205. if (cpu_data->dcache.ways == 1)
  206. __flush_dcache_all();
  207. else
  208. __flush_dcache_all_ex();
  209. flush_icache_all();
  210. }
  211. void flush_cache_mm(struct mm_struct *mm)
  212. {
  213. /* Is there any good way? */
  214. /* XXX: possibly call flush_cache_range for each vm area */
  215. /*
  216. * FIXME: Really, the optimal solution here would be able to flush out
  217. * individual lines created by the specified context, but this isn't
  218. * feasible for a number of architectures (such as MIPS, and some
  219. * SPARC) .. is this possible for SuperH?
  220. *
  221. * In the meantime, we'll just flush all of the caches.. this
  222. * seems to be the simplest way to avoid at least a few wasted
  223. * cache flushes. -Lethal
  224. */
  225. flush_cache_all();
  226. }
  227. /*
  228. * Write back and invalidate I/D-caches for the page.
  229. *
  230. * ADDR: Virtual Address (U0 address)
  231. * PFN: Physical page number
  232. */
  233. void flush_cache_page(struct vm_area_struct *vma, unsigned long address, unsigned long pfn)
  234. {
  235. unsigned long phys = pfn << PAGE_SHIFT;
  236. /* We only need to flush D-cache when we have alias */
  237. if ((address^phys) & CACHE_ALIAS) {
  238. /* Loop 4K of the D-cache */
  239. flush_cache_4096(
  240. CACHE_OC_ADDRESS_ARRAY | (address & CACHE_ALIAS),
  241. phys);
  242. /* Loop another 4K of the D-cache */
  243. flush_cache_4096(
  244. CACHE_OC_ADDRESS_ARRAY | (phys & CACHE_ALIAS),
  245. phys);
  246. }
  247. if (vma->vm_flags & VM_EXEC)
  248. /* Loop 4K (half) of the I-cache */
  249. flush_cache_4096(
  250. CACHE_IC_ADDRESS_ARRAY | (address & 0x1000),
  251. phys);
  252. }
  253. /*
  254. * Write back and invalidate D-caches.
  255. *
  256. * START, END: Virtual Address (U0 address)
  257. *
  258. * NOTE: We need to flush the _physical_ page entry.
  259. * Flushing the cache lines for U0 only isn't enough.
  260. * We need to flush for P1 too, which may contain aliases.
  261. */
  262. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  263. unsigned long end)
  264. {
  265. unsigned long p = start & PAGE_MASK;
  266. pgd_t *dir;
  267. pmd_t *pmd;
  268. pte_t *pte;
  269. pte_t entry;
  270. unsigned long phys;
  271. unsigned long d = 0;
  272. dir = pgd_offset(vma->vm_mm, p);
  273. pmd = pmd_offset(dir, p);
  274. do {
  275. if (pmd_none(*pmd) || pmd_bad(*pmd)) {
  276. p &= ~((1 << PMD_SHIFT) -1);
  277. p += (1 << PMD_SHIFT);
  278. pmd++;
  279. continue;
  280. }
  281. pte = pte_offset_kernel(pmd, p);
  282. do {
  283. entry = *pte;
  284. if ((pte_val(entry) & _PAGE_PRESENT)) {
  285. phys = pte_val(entry)&PTE_PHYS_MASK;
  286. if ((p^phys) & CACHE_ALIAS) {
  287. d |= 1 << ((p & CACHE_ALIAS)>>12);
  288. d |= 1 << ((phys & CACHE_ALIAS)>>12);
  289. if (d == 0x0f)
  290. goto loop_exit;
  291. }
  292. }
  293. pte++;
  294. p += PAGE_SIZE;
  295. } while (p < end && ((unsigned long)pte & ~PAGE_MASK));
  296. pmd++;
  297. } while (p < end);
  298. loop_exit:
  299. if (d & 1)
  300. flush_cache_4096_all(0);
  301. if (d & 2)
  302. flush_cache_4096_all(0x1000);
  303. if (d & 4)
  304. flush_cache_4096_all(0x2000);
  305. if (d & 8)
  306. flush_cache_4096_all(0x3000);
  307. if (vma->vm_flags & VM_EXEC)
  308. flush_icache_all();
  309. }
  310. /*
  311. * flush_icache_user_range
  312. * @vma: VMA of the process
  313. * @page: page
  314. * @addr: U0 address
  315. * @len: length of the range (< page size)
  316. */
  317. void flush_icache_user_range(struct vm_area_struct *vma,
  318. struct page *page, unsigned long addr, int len)
  319. {
  320. flush_cache_page(vma, addr, page_to_pfn(page));
  321. }