cacheflush_mm.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. #ifndef _M68K_CACHEFLUSH_H
  2. #define _M68K_CACHEFLUSH_H
  3. #include <linux/mm.h>
  4. #ifdef CONFIG_COLDFIRE
  5. #include <asm/mcfsim.h>
  6. #endif
  7. /* cache code */
  8. #define FLUSH_I_AND_D (0x00000808)
  9. #define FLUSH_I (0x00000008)
  10. #ifndef ICACHE_MAX_ADDR
  11. #define ICACHE_MAX_ADDR 0
  12. #define ICACHE_SET_MASK 0
  13. #define DCACHE_MAX_ADDR 0
  14. #define DCACHE_SETMASK 0
  15. #endif
  16. #ifndef CACHE_MODE
  17. #define CACHE_MODE 0
  18. #define CACR_ICINVA 0
  19. #define CACR_DCINVA 0
  20. #define CACR_BCINVA 0
  21. #endif
  22. /*
  23. * ColdFire architecture has no way to clear individual cache lines, so we
  24. * are stuck invalidating all the cache entries when we want a clear operation.
  25. */
  26. static inline void clear_cf_icache(unsigned long start, unsigned long end)
  27. {
  28. __asm__ __volatile__ (
  29. "movec %0,%%cacr\n\t"
  30. "nop"
  31. :
  32. : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA));
  33. }
  34. static inline void clear_cf_dcache(unsigned long start, unsigned long end)
  35. {
  36. __asm__ __volatile__ (
  37. "movec %0,%%cacr\n\t"
  38. "nop"
  39. :
  40. : "r" (CACHE_MODE | CACR_DCINVA));
  41. }
  42. static inline void clear_cf_bcache(unsigned long start, unsigned long end)
  43. {
  44. __asm__ __volatile__ (
  45. "movec %0,%%cacr\n\t"
  46. "nop"
  47. :
  48. : "r" (CACHE_MODE | CACR_ICINVA | CACR_BCINVA | CACR_DCINVA));
  49. }
  50. /*
  51. * Use the ColdFire cpushl instruction to push (and invalidate) cache lines.
  52. * The start and end addresses are cache line numbers not memory addresses.
  53. */
  54. static inline void flush_cf_icache(unsigned long start, unsigned long end)
  55. {
  56. unsigned long set;
  57. for (set = start; set <= end; set += (0x10 - 3)) {
  58. __asm__ __volatile__ (
  59. "cpushl %%ic,(%0)\n\t"
  60. "addq%.l #1,%0\n\t"
  61. "cpushl %%ic,(%0)\n\t"
  62. "addq%.l #1,%0\n\t"
  63. "cpushl %%ic,(%0)\n\t"
  64. "addq%.l #1,%0\n\t"
  65. "cpushl %%ic,(%0)"
  66. : "=a" (set)
  67. : "a" (set));
  68. }
  69. }
  70. static inline void flush_cf_dcache(unsigned long start, unsigned long end)
  71. {
  72. unsigned long set;
  73. for (set = start; set <= end; set += (0x10 - 3)) {
  74. __asm__ __volatile__ (
  75. "cpushl %%dc,(%0)\n\t"
  76. "addq%.l #1,%0\n\t"
  77. "cpushl %%dc,(%0)\n\t"
  78. "addq%.l #1,%0\n\t"
  79. "cpushl %%dc,(%0)\n\t"
  80. "addq%.l #1,%0\n\t"
  81. "cpushl %%dc,(%0)"
  82. : "=a" (set)
  83. : "a" (set));
  84. }
  85. }
  86. static inline void flush_cf_bcache(unsigned long start, unsigned long end)
  87. {
  88. unsigned long set;
  89. for (set = start; set <= end; set += (0x10 - 3)) {
  90. __asm__ __volatile__ (
  91. "cpushl %%bc,(%0)\n\t"
  92. "addq%.l #1,%0\n\t"
  93. "cpushl %%bc,(%0)\n\t"
  94. "addq%.l #1,%0\n\t"
  95. "cpushl %%bc,(%0)\n\t"
  96. "addq%.l #1,%0\n\t"
  97. "cpushl %%bc,(%0)"
  98. : "=a" (set)
  99. : "a" (set));
  100. }
  101. }
  102. /*
  103. * Cache handling functions
  104. */
  105. static inline void flush_icache(void)
  106. {
  107. if (CPU_IS_COLDFIRE) {
  108. flush_cf_icache(0, ICACHE_MAX_ADDR);
  109. } else if (CPU_IS_040_OR_060) {
  110. asm volatile ( "nop\n"
  111. " .chip 68040\n"
  112. " cpusha %bc\n"
  113. " .chip 68k");
  114. } else {
  115. unsigned long tmp;
  116. asm volatile ( "movec %%cacr,%0\n"
  117. " or.w %1,%0\n"
  118. " movec %0,%%cacr"
  119. : "=&d" (tmp)
  120. : "id" (FLUSH_I));
  121. }
  122. }
  123. /*
  124. * invalidate the cache for the specified memory range.
  125. * It starts at the physical address specified for
  126. * the given number of bytes.
  127. */
  128. extern void cache_clear(unsigned long paddr, int len);
  129. /*
  130. * push any dirty cache in the specified memory range.
  131. * It starts at the physical address specified for
  132. * the given number of bytes.
  133. */
  134. extern void cache_push(unsigned long paddr, int len);
  135. /*
  136. * push and invalidate pages in the specified user virtual
  137. * memory range.
  138. */
  139. extern void cache_push_v(unsigned long vaddr, int len);
  140. /* This is needed whenever the virtual mapping of the current
  141. process changes. */
  142. #define __flush_cache_all() \
  143. ({ \
  144. if (CPU_IS_COLDFIRE) { \
  145. flush_cf_dcache(0, DCACHE_MAX_ADDR); \
  146. } else if (CPU_IS_040_OR_060) { \
  147. __asm__ __volatile__("nop\n\t" \
  148. ".chip 68040\n\t" \
  149. "cpusha %dc\n\t" \
  150. ".chip 68k"); \
  151. } else { \
  152. unsigned long _tmp; \
  153. __asm__ __volatile__("movec %%cacr,%0\n\t" \
  154. "orw %1,%0\n\t" \
  155. "movec %0,%%cacr" \
  156. : "=&d" (_tmp) \
  157. : "di" (FLUSH_I_AND_D)); \
  158. } \
  159. })
  160. #define __flush_cache_030() \
  161. ({ \
  162. if (CPU_IS_020_OR_030) { \
  163. unsigned long _tmp; \
  164. __asm__ __volatile__("movec %%cacr,%0\n\t" \
  165. "orw %1,%0\n\t" \
  166. "movec %0,%%cacr" \
  167. : "=&d" (_tmp) \
  168. : "di" (FLUSH_I_AND_D)); \
  169. } \
  170. })
  171. #define flush_cache_all() __flush_cache_all()
  172. #define flush_cache_vmap(start, end) flush_cache_all()
  173. #define flush_cache_vunmap(start, end) flush_cache_all()
  174. static inline void flush_cache_mm(struct mm_struct *mm)
  175. {
  176. if (mm == current->mm)
  177. __flush_cache_030();
  178. }
  179. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  180. /* flush_cache_range/flush_cache_page must be macros to avoid
  181. a dependency on linux/mm.h, which includes this file... */
  182. static inline void flush_cache_range(struct vm_area_struct *vma,
  183. unsigned long start,
  184. unsigned long end)
  185. {
  186. if (vma->vm_mm == current->mm)
  187. __flush_cache_030();
  188. }
  189. static inline void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  190. {
  191. if (vma->vm_mm == current->mm)
  192. __flush_cache_030();
  193. }
  194. /* Push the page at kernel virtual address and clear the icache */
  195. /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */
  196. static inline void __flush_page_to_ram(void *vaddr)
  197. {
  198. if (CPU_IS_COLDFIRE) {
  199. unsigned long addr, start, end;
  200. addr = ((unsigned long) vaddr) & ~(PAGE_SIZE - 1);
  201. start = addr & ICACHE_SET_MASK;
  202. end = (addr + PAGE_SIZE - 1) & ICACHE_SET_MASK;
  203. if (start > end) {
  204. flush_cf_bcache(0, end);
  205. end = ICACHE_MAX_ADDR;
  206. }
  207. flush_cf_bcache(start, end);
  208. } else if (CPU_IS_040_OR_060) {
  209. __asm__ __volatile__("nop\n\t"
  210. ".chip 68040\n\t"
  211. "cpushp %%bc,(%0)\n\t"
  212. ".chip 68k"
  213. : : "a" (__pa(vaddr)));
  214. } else {
  215. unsigned long _tmp;
  216. __asm__ __volatile__("movec %%cacr,%0\n\t"
  217. "orw %1,%0\n\t"
  218. "movec %0,%%cacr"
  219. : "=&d" (_tmp)
  220. : "di" (FLUSH_I));
  221. }
  222. }
  223. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  224. #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
  225. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  226. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  227. #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
  228. extern void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
  229. unsigned long addr, int len);
  230. extern void flush_icache_range(unsigned long address, unsigned long endaddr);
  231. static inline void copy_to_user_page(struct vm_area_struct *vma,
  232. struct page *page, unsigned long vaddr,
  233. void *dst, void *src, int len)
  234. {
  235. flush_cache_page(vma, vaddr, page_to_pfn(page));
  236. memcpy(dst, src, len);
  237. flush_icache_user_range(vma, page, vaddr, len);
  238. }
  239. static inline void copy_from_user_page(struct vm_area_struct *vma,
  240. struct page *page, unsigned long vaddr,
  241. void *dst, void *src, int len)
  242. {
  243. flush_cache_page(vma, vaddr, page_to_pfn(page));
  244. memcpy(dst, src, len);
  245. }
  246. #endif /* _M68K_CACHEFLUSH_H */