cache.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306
  1. /*
  2. * arch/sh/mm/cache.c
  3. *
  4. * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
  5. * Copyright (C) 2002 - 2009 Paul Mundt
  6. *
  7. * Released under the terms of the GNU GPL v2.0.
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/init.h>
  11. #include <linux/mutex.h>
  12. #include <linux/fs.h>
  13. #include <linux/smp.h>
  14. #include <linux/highmem.h>
  15. #include <linux/module.h>
  16. #include <asm/mmu_context.h>
  17. #include <asm/cacheflush.h>
  18. void (*local_flush_cache_all)(void *args) = cache_noop;
  19. void (*local_flush_cache_mm)(void *args) = cache_noop;
  20. void (*local_flush_cache_dup_mm)(void *args) = cache_noop;
  21. void (*local_flush_cache_page)(void *args) = cache_noop;
  22. void (*local_flush_cache_range)(void *args) = cache_noop;
  23. void (*local_flush_dcache_page)(void *args) = cache_noop;
  24. void (*local_flush_icache_range)(void *args) = cache_noop;
  25. void (*local_flush_icache_page)(void *args) = cache_noop;
  26. void (*local_flush_cache_sigtramp)(void *args) = cache_noop;
  27. void (*__flush_wback_region)(void *start, int size);
  28. void (*__flush_purge_region)(void *start, int size);
  29. void (*__flush_invalidate_region)(void *start, int size);
  30. static inline void noop__flush_region(void *start, int size)
  31. {
  32. }
  33. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  34. unsigned long vaddr, void *dst, const void *src,
  35. unsigned long len)
  36. {
  37. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  38. !test_bit(PG_dcache_dirty, &page->flags)) {
  39. void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  40. memcpy(vto, src, len);
  41. kunmap_coherent();
  42. } else {
  43. memcpy(dst, src, len);
  44. if (boot_cpu_data.dcache.n_aliases)
  45. set_bit(PG_dcache_dirty, &page->flags);
  46. }
  47. if (vma->vm_flags & VM_EXEC)
  48. flush_cache_page(vma, vaddr, page_to_pfn(page));
  49. }
  50. void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
  51. unsigned long vaddr, void *dst, const void *src,
  52. unsigned long len)
  53. {
  54. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  55. !test_bit(PG_dcache_dirty, &page->flags)) {
  56. void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
  57. memcpy(dst, vfrom, len);
  58. kunmap_coherent();
  59. } else {
  60. memcpy(dst, src, len);
  61. if (boot_cpu_data.dcache.n_aliases)
  62. set_bit(PG_dcache_dirty, &page->flags);
  63. }
  64. }
  65. void copy_user_highpage(struct page *to, struct page *from,
  66. unsigned long vaddr, struct vm_area_struct *vma)
  67. {
  68. void *vfrom, *vto;
  69. vto = kmap_atomic(to, KM_USER1);
  70. if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
  71. !test_bit(PG_dcache_dirty, &from->flags)) {
  72. vfrom = kmap_coherent(from, vaddr);
  73. copy_page(vto, vfrom);
  74. kunmap_coherent();
  75. } else {
  76. vfrom = kmap_atomic(from, KM_USER0);
  77. copy_page(vto, vfrom);
  78. kunmap_atomic(vfrom, KM_USER0);
  79. }
  80. if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
  81. __flush_wback_region(vto, PAGE_SIZE);
  82. kunmap_atomic(vto, KM_USER1);
  83. /* Make sure this page is cleared on other CPU's too before using it */
  84. smp_wmb();
  85. }
  86. EXPORT_SYMBOL(copy_user_highpage);
  87. void clear_user_highpage(struct page *page, unsigned long vaddr)
  88. {
  89. void *kaddr = kmap_atomic(page, KM_USER0);
  90. clear_page(kaddr);
  91. if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
  92. __flush_wback_region(kaddr, PAGE_SIZE);
  93. kunmap_atomic(kaddr, KM_USER0);
  94. }
  95. EXPORT_SYMBOL(clear_user_highpage);
  96. void __update_cache(struct vm_area_struct *vma,
  97. unsigned long address, pte_t pte)
  98. {
  99. struct page *page;
  100. unsigned long pfn = pte_pfn(pte);
  101. if (!boot_cpu_data.dcache.n_aliases)
  102. return;
  103. page = pfn_to_page(pfn);
  104. if (pfn_valid(pfn) && page_mapping(page)) {
  105. int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
  106. if (dirty) {
  107. unsigned long addr = (unsigned long)page_address(page);
  108. if (pages_do_alias(addr, address & PAGE_MASK))
  109. __flush_wback_region((void *)addr, PAGE_SIZE);
  110. }
  111. }
  112. }
  113. void __flush_anon_page(struct page *page, unsigned long vmaddr)
  114. {
  115. unsigned long addr = (unsigned long) page_address(page);
  116. if (pages_do_alias(addr, vmaddr)) {
  117. if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
  118. !test_bit(PG_dcache_dirty, &page->flags)) {
  119. void *kaddr;
  120. kaddr = kmap_coherent(page, vmaddr);
  121. __flush_wback_region((void *)kaddr, PAGE_SIZE);
  122. kunmap_coherent();
  123. } else
  124. __flush_wback_region((void *)addr, PAGE_SIZE);
  125. }
  126. }
  127. void flush_cache_all(void)
  128. {
  129. on_each_cpu(local_flush_cache_all, NULL, 1);
  130. }
  131. void flush_cache_mm(struct mm_struct *mm)
  132. {
  133. on_each_cpu(local_flush_cache_mm, mm, 1);
  134. }
  135. void flush_cache_dup_mm(struct mm_struct *mm)
  136. {
  137. on_each_cpu(local_flush_cache_dup_mm, mm, 1);
  138. }
  139. void flush_cache_page(struct vm_area_struct *vma, unsigned long addr,
  140. unsigned long pfn)
  141. {
  142. struct flusher_data data;
  143. data.vma = vma;
  144. data.addr1 = addr;
  145. data.addr2 = pfn;
  146. on_each_cpu(local_flush_cache_page, (void *)&data, 1);
  147. }
  148. void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  149. unsigned long end)
  150. {
  151. struct flusher_data data;
  152. data.vma = vma;
  153. data.addr1 = start;
  154. data.addr2 = end;
  155. on_each_cpu(local_flush_cache_range, (void *)&data, 1);
  156. }
  157. void flush_dcache_page(struct page *page)
  158. {
  159. on_each_cpu(local_flush_dcache_page, page, 1);
  160. }
  161. void flush_icache_range(unsigned long start, unsigned long end)
  162. {
  163. struct flusher_data data;
  164. data.vma = NULL;
  165. data.addr1 = start;
  166. data.addr2 = end;
  167. on_each_cpu(local_flush_icache_range, (void *)&data, 1);
  168. }
  169. void flush_icache_page(struct vm_area_struct *vma, struct page *page)
  170. {
  171. /* Nothing uses the VMA, so just pass the struct page along */
  172. on_each_cpu(local_flush_icache_page, page, 1);
  173. }
  174. void flush_cache_sigtramp(unsigned long address)
  175. {
  176. on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1);
  177. }
  178. static void compute_alias(struct cache_info *c)
  179. {
  180. c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1);
  181. c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0;
  182. }
  183. static void __init emit_cache_params(void)
  184. {
  185. printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  186. boot_cpu_data.icache.ways,
  187. boot_cpu_data.icache.sets,
  188. boot_cpu_data.icache.way_incr);
  189. printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  190. boot_cpu_data.icache.entry_mask,
  191. boot_cpu_data.icache.alias_mask,
  192. boot_cpu_data.icache.n_aliases);
  193. printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  194. boot_cpu_data.dcache.ways,
  195. boot_cpu_data.dcache.sets,
  196. boot_cpu_data.dcache.way_incr);
  197. printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  198. boot_cpu_data.dcache.entry_mask,
  199. boot_cpu_data.dcache.alias_mask,
  200. boot_cpu_data.dcache.n_aliases);
  201. /*
  202. * Emit Secondary Cache parameters if the CPU has a probed L2.
  203. */
  204. if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) {
  205. printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n",
  206. boot_cpu_data.scache.ways,
  207. boot_cpu_data.scache.sets,
  208. boot_cpu_data.scache.way_incr);
  209. printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n",
  210. boot_cpu_data.scache.entry_mask,
  211. boot_cpu_data.scache.alias_mask,
  212. boot_cpu_data.scache.n_aliases);
  213. }
  214. }
  215. void __init cpu_cache_init(void)
  216. {
  217. compute_alias(&boot_cpu_data.icache);
  218. compute_alias(&boot_cpu_data.dcache);
  219. compute_alias(&boot_cpu_data.scache);
  220. __flush_wback_region = noop__flush_region;
  221. __flush_purge_region = noop__flush_region;
  222. __flush_invalidate_region = noop__flush_region;
  223. if (boot_cpu_data.family == CPU_FAMILY_SH2) {
  224. extern void __weak sh2_cache_init(void);
  225. sh2_cache_init();
  226. }
  227. if (boot_cpu_data.family == CPU_FAMILY_SH2A) {
  228. extern void __weak sh2a_cache_init(void);
  229. sh2a_cache_init();
  230. }
  231. if (boot_cpu_data.family == CPU_FAMILY_SH3) {
  232. extern void __weak sh3_cache_init(void);
  233. sh3_cache_init();
  234. if ((boot_cpu_data.type == CPU_SH7705) &&
  235. (boot_cpu_data.dcache.sets == 512)) {
  236. extern void __weak sh7705_cache_init(void);
  237. sh7705_cache_init();
  238. }
  239. }
  240. if ((boot_cpu_data.family == CPU_FAMILY_SH4) ||
  241. (boot_cpu_data.family == CPU_FAMILY_SH4A) ||
  242. (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) {
  243. extern void __weak sh4_cache_init(void);
  244. sh4_cache_init();
  245. }
  246. if (boot_cpu_data.family == CPU_FAMILY_SH5) {
  247. extern void __weak sh5_cache_init(void);
  248. sh5_cache_init();
  249. }
  250. emit_cache_params();
  251. }