cache.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * arch/sh/mm/cache.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/mutex.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/highmem.h>
  15. #include <linux/module.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/cacheflush.h>
  18. void (*local_flush_cache_all)(void *args) = cache_noop;
  19. void (*local_flush_cache_mm)(void *args) = cache_noop;
  20. void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
  21. void (*local_flush_cache_page)(void *args) = cache_noop;
  22. void (*local_flush_cache_range)(void *args) = cache_noop;
  23. void (*local_flush_dcache_page)(void *args) = cache_noop;
  24. void (*local_flush_icache_range)(void *args) = cache_noop;
  25. void (*local_flush_icache_page)(void *args) = cache_noop;
  26. void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
  27. void (*__flush_wback_region)(void *start, int size);
  28. EXPORT_SYMBOL(__flush_wback_region);
  29. void (*__flush_purge_region)(void *start, int size);
  30. EXPORT_SYMBOL(__flush_purge_region);
  31. void (*__flush_invalidate_region)(void *start, int size);
  32. EXPORT_SYMBOL(__flush_invalidate_region);
  33. static inline void noop__flush_region(void *start, int size)
  34. {
  35. }
  36. static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info,
  37. int wait)
  38. {
  39. preempt_disable();
  40. smp_call_function(func, info, wait);
  41. func(info);
  42. preempt_enable();
  43. }
  44. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  45. unsigned long vaddr, void *dst, const void *src,
  46. unsigned long len)
  47. {
  48. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  49. !test_bit(PG_dcache_dirty, &page->flags)) {
  50. void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  51. memcpy(vto, src, len);
  52. kunmap_coherent(vto);
  53. } else {
  54. memcpy(dst, src, len);
  55. if (boot_cpu_data.dcache.n_aliases)
  56. set_bit(PG_dcache_dirty, &page->flags);
  57. }
  58. if (vma->vm_flags & VM_EXEC)
  59. flush_cache_page(vma, vaddr, page_to_pfn(page));
  60. }
  61. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  62. unsigned long vaddr, void *dst, const void *src,
  63. unsigned long len)
  64. {
  65. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  66. !test_bit(PG_dcache_dirty, &page->flags)) {
  67. void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  68. memcpy(dst, vfrom, len);
  69. kunmap_coherent(vfrom);
  70. } else {
  71. memcpy(dst, src, len);
  72. if (boot_cpu_data.dcache.n_aliases)
  73. set_bit(PG_dcache_dirty, &page->flags);
  74. }
  75. }
  76. void copy_user_highpage(struct page *to, struct page *from,
  77. unsigned long vaddr, struct vm_area_struct *vma)
  78. {
  79. void *vfrom, *vto;
  80. vto = kmap_atomic(to, KM_USER1);
  81. if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
  82. !test_bit(PG_dcache_dirty, &from->flags)) {
  83. vfrom = kmap_coherent(from, vaddr);
  84. copy_page(vto, vfrom);
  85. kunmap_coherent(vfrom);
  86. } else {
  87. vfrom = kmap_atomic(from, KM_USER0);
  88. copy_page(vto, vfrom);
  89. kunmap_atomic(vfrom, KM_USER0);
  90. }
  91. if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
  92. __flush_purge_region(vto, PAGE_SIZE);
  93. kunmap_atomic(vto, KM_USER1);
  94. /* Make sure this page is cleared on other CPU's too before using it */
  95. smp_wmb();
  96. }
  97. EXPORT_SYMBOL(copy_user_highpage);
  98. void clear_user_highpage(struct page *page, unsigned long vaddr)
  99. {
  100. void *kaddr = kmap_atomic(page, KM_USER0);
  101. clear_page(kaddr);
  102. if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
  103. __flush_purge_region(kaddr, PAGE_SIZE);
  104. kunmap_atomic(kaddr, KM_USER0);
  105. }
  106. EXPORT_SYMBOL(clear_user_highpage);
  107. void __update_cache(struct vm_area_struct *vma,
  108. unsigned long address, pte_t pte)
  109. {
  110. struct page *page;
  111. unsigned long pfn = pte_pfn(pte);
  112. if (!boot_cpu_data.dcache.n_aliases)
  113. return;
  114. page = pfn_to_page(pfn);
  115. if (pfn_valid(pfn)) {
  116. int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
  117. if (dirty)
  118. __flush_purge_region(page_address(page), PAGE_SIZE);
  119. }
  120. }
  121. void __flush_anon_page(struct page *page, unsigned long vmaddr)
  122. {
  123. unsigned long addr = (unsigned long) page_address(page);
  124. if (pages_do_alias(addr, vmaddr)) {
  125. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  126. !test_bit(PG_dcache_dirty, &page->flags)) {
  127. void *kaddr;
  128. kaddr = kmap_coherent(page, vmaddr);
  129. /* XXX.. For now kunmap_coherent() does a purge */
  130. /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */
  131. kunmap_coherent(kaddr);
  132. } else
  133. __flush_purge_region((void *)addr, PAGE_SIZE);
  134. }
  135. }
  136. void flush_cache_all(void)
  137. {
  138. cacheop_on_each_cpu(local_flush_cache_all, NULL, 1);
  139. }
  140. EXPORT_SYMBOL(flush_cache_all);
  141. void flush_cache_mm(struct mm_struct *mm)
  142. {
  143. if (boot_cpu_data.dcache.n_aliases == 0)
  144. return;
  145. cacheop_on_each_cpu(local_flush_cache_mm, mm, 1);
  146. }
  147. void flush_cache_dup_mm(struct mm_struct *mm)
  148. {
  149. if (boot_cpu_data.dcache.n_aliases == 0)
  150. return;
  151. cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1);
  152. }
  153. void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
  154. unsigned long pfn)
  155. {
  156. struct flusher_data data;
  157. data.vma = vma;
  158. data.addr1 = addr;
  159. data.addr2 = pfn;
  160. cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1);
  161. }
  162. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  163. unsigned long end)
  164. {
  165. struct flusher_data data;
  166. data.vma = vma;
  167. data.addr1 = start;
  168. data.addr2 = end;
  169. cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1);
  170. }
  171. EXPORT_SYMBOL(flush_cache_range);
  172. void flush_dcache_page(struct page *page)
  173. {
  174. cacheop_on_each_cpu(local_flush_dcache_page, page, 1);
  175. }
  176. EXPORT_SYMBOL(flush_dcache_page);
  177. void flush_icache_range(unsigned long start, unsigned long end)
  178. {
  179. struct flusher_data data;
  180. data.vma = NULL;
  181. data.addr1 = start;
  182. data.addr2 = end;
  183. cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1);
  184. }
  185. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  186. {
  187. /* Nothing uses the VMA, so just pass the struct page along */
  188. cacheop_on_each_cpu(local_flush_icache_page, page, 1);
  189. }
  190. void flush_cache_sigtramp(unsigned long address)
  191. {
  192. cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
  193. }
  194. static void compute_alias(struct cache_info *c)
  195. {
  196. c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
  197. c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
  198. }
  199. static void __init emit_cache_params(void)
  200. {
  201. printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  202. boot_cpu_data.icache.ways,
  203. boot_cpu_data.icache.sets,
  204. boot_cpu_data.icache.way_incr);
  205. printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  206. boot_cpu_data.icache.entry_mask,
  207. boot_cpu_data.icache.alias_mask,
  208. boot_cpu_data.icache.n_aliases);
  209. printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  210. boot_cpu_data.dcache.ways,
  211. boot_cpu_data.dcache.sets,
  212. boot_cpu_data.dcache.way_incr);
  213. printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  214. boot_cpu_data.dcache.entry_mask,
  215. boot_cpu_data.dcache.alias_mask,
  216. boot_cpu_data.dcache.n_aliases);
  217. /*
  218. * Emit Secondary Cache parameters if the CPU has a probed L2.
  219. */
  220. if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
  221. printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  222. boot_cpu_data.scache.ways,
  223. boot_cpu_data.scache.sets,
  224. boot_cpu_data.scache.way_incr);
  225. printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  226. boot_cpu_data.scache.entry_mask,
  227. boot_cpu_data.scache.alias_mask,
  228. boot_cpu_data.scache.n_aliases);
  229. }
  230. }
  231. void __init cpu_cache_init(void)
  232. {
  233. unsigned int cache_disabled = 0;
  234. #ifdef CCR
  235. cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
  236. #endif
  237. compute_alias(&boot_cpu_data.icache);
  238. compute_alias(&boot_cpu_data.dcache);
  239. compute_alias(&boot_cpu_data.scache);
  240. __flush_wback_region = noop__flush_region;
  241. __flush_purge_region = noop__flush_region;
  242. __flush_invalidate_region = noop__flush_region;
  243. /*
  244. * No flushing is necessary in the disabled cache case so we can
  245. * just keep the noop functions in local_flush_..() and __flush_..()
  246. */
  247. if (unlikely(cache_disabled))
  248. goto skip;
  249. if (boot_cpu_data.family == CPU_FAMILY_SH2) {
  250. extern void __weak sh2_cache_init(void);
  251. sh2_cache_init();
  252. }
  253. if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
  254. extern void __weak sh2a_cache_init(void);
  255. sh2a_cache_init();
  256. }
  257. if (boot_cpu_data.family == CPU_FAMILY_SH3) {
  258. extern void __weak sh3_cache_init(void);
  259. sh3_cache_init();
  260. if ((boot_cpu_data.type == CPU_SH7705) &&
  261. (boot_cpu_data.dcache.sets == 512)) {
  262. extern void __weak sh7705_cache_init(void);
  263. sh7705_cache_init();
  264. }
  265. }
  266. if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
  267. (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
  268. (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
  269. extern void __weak sh4_cache_init(void);
  270. sh4_cache_init();
  271. }
  272. if (boot_cpu_data.family == CPU_FAMILY_SH5) {
  273. extern void __weak sh5_cache_init(void);
  274. sh5_cache_init();
  275. }
  276. skip:
  277. emit_cache_params();
  278. }