cacheflush.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479
  1. /*
  2. * arch/arm/include/asm/cacheflush.h
  3. *
  4. * Copyright (C) 1999-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_CACHEFLUSH_H
  11. #define _ASMARM_CACHEFLUSH_H
  12. #include <linux/mm.h>
  13. #include <asm/glue.h>
  14. #include <asm/shmparam.h>
  15. #include <asm/cachetype.h>
  16. #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
  17. /*
  18. * Cache Model
  19. * ===========
  20. */
  21. #undef _CACHE
  22. #undef MULTI_CACHE
  23. #if defined(CONFIG_CPU_CACHE_V3)
  24. # ifdef _CACHE
  25. # define MULTI_CACHE 1
  26. # else
  27. # define _CACHE v3
  28. # endif
  29. #endif
  30. #if defined(CONFIG_CPU_CACHE_V4)
  31. # ifdef _CACHE
  32. # define MULTI_CACHE 1
  33. # else
  34. # define _CACHE v4
  35. # endif
  36. #endif
  37. #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
  38. defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
  39. # define MULTI_CACHE 1
  40. #endif
  41. #if defined(CONFIG_CPU_FA526)
  42. # ifdef _CACHE
  43. # define MULTI_CACHE 1
  44. # else
  45. # define _CACHE fa
  46. # endif
  47. #endif
  48. #if defined(CONFIG_CPU_ARM926T)
  49. # ifdef _CACHE
  50. # define MULTI_CACHE 1
  51. # else
  52. # define _CACHE arm926
  53. # endif
  54. #endif
  55. #if defined(CONFIG_CPU_ARM940T)
  56. # ifdef _CACHE
  57. # define MULTI_CACHE 1
  58. # else
  59. # define _CACHE arm940
  60. # endif
  61. #endif
  62. #if defined(CONFIG_CPU_ARM946E)
  63. # ifdef _CACHE
  64. # define MULTI_CACHE 1
  65. # else
  66. # define _CACHE arm946
  67. # endif
  68. #endif
  69. #if defined(CONFIG_CPU_CACHE_V4WB)
  70. # ifdef _CACHE
  71. # define MULTI_CACHE 1
  72. # else
  73. # define _CACHE v4wb
  74. # endif
  75. #endif
  76. #if defined(CONFIG_CPU_XSCALE)
  77. # ifdef _CACHE
  78. # define MULTI_CACHE 1
  79. # else
  80. # define _CACHE xscale
  81. # endif
  82. #endif
  83. #if defined(CONFIG_CPU_XSC3)
  84. # ifdef _CACHE
  85. # define MULTI_CACHE 1
  86. # else
  87. # define _CACHE xsc3
  88. # endif
  89. #endif
  90. #if defined(CONFIG_CPU_MOHAWK)
  91. # ifdef _CACHE
  92. # define MULTI_CACHE 1
  93. # else
  94. # define _CACHE mohawk
  95. # endif
  96. #endif
  97. #if defined(CONFIG_CPU_FEROCEON)
  98. # define MULTI_CACHE 1
  99. #endif
  100. #if defined(CONFIG_CPU_V6)
  101. //# ifdef _CACHE
  102. # define MULTI_CACHE 1
  103. //# else
  104. //# define _CACHE v6
  105. //# endif
  106. #endif
  107. #if defined(CONFIG_CPU_V7)
  108. //# ifdef _CACHE
  109. # define MULTI_CACHE 1
  110. //# else
  111. //# define _CACHE v7
  112. //# endif
  113. #endif
  114. #if !defined(_CACHE) && !defined(MULTI_CACHE)
  115. #error Unknown cache maintainence model
  116. #endif
  117. /*
  118. * This flag is used to indicate that the page pointed to by a pte
  119. * is dirty and requires cleaning before returning it to the user.
  120. */
  121. #define PG_dcache_dirty PG_arch_1
  122. /*
  123. * MM Cache Management
  124. * ===================
  125. *
  126. * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
  127. * implement these methods.
  128. *
  129. * Start addresses are inclusive and end addresses are exclusive;
  130. * start addresses should be rounded down, end addresses up.
  131. *
  132. * See Documentation/cachetlb.txt for more information.
  133. * Please note that the implementation of these, and the required
  134. * effects are cache-type (VIVT/VIPT/PIPT) specific.
  135. *
  136. * flush_cache_kern_all()
  137. *
  138. * Unconditionally clean and invalidate the entire cache.
  139. *
  140. * flush_cache_user_mm(mm)
  141. *
  142. * Clean and invalidate all user space cache entries
  143. * before a change of page tables.
  144. *
  145. * flush_cache_user_range(start, end, flags)
  146. *
  147. * Clean and invalidate a range of cache entries in the
  148. * specified address space before a change of page tables.
  149. * - start - user start address (inclusive, page aligned)
  150. * - end - user end address (exclusive, page aligned)
  151. * - flags - vma->vm_flags field
  152. *
  153. * coherent_kern_range(start, end)
  154. *
  155. * Ensure coherency between the Icache and the Dcache in the
  156. * region described by start, end. If you have non-snooping
  157. * Harvard caches, you need to implement this function.
  158. * - start - virtual start address
  159. * - end - virtual end address
  160. *
  161. * DMA Cache Coherency
  162. * ===================
  163. *
  164. * dma_flush_range(start, end)
  165. *
  166. * Clean and invalidate the specified virtual address range.
  167. * - start - virtual start address
  168. * - end - virtual end address
  169. */
  170. struct cpu_cache_fns {
  171. void (*flush_kern_all)(void);
  172. void (*flush_user_all)(void);
  173. void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
  174. void (*coherent_kern_range)(unsigned long, unsigned long);
  175. void (*coherent_user_range)(unsigned long, unsigned long);
  176. void (*flush_kern_dcache_area)(void *, size_t);
  177. void (*dma_map_area)(const void *, size_t, int);
  178. void (*dma_unmap_area)(const void *, size_t, int);
  179. void (*dma_flush_range)(const void *, const void *);
  180. };
  181. struct outer_cache_fns {
  182. void (*inv_range)(unsigned long, unsigned long);
  183. void (*clean_range)(unsigned long, unsigned long);
  184. void (*flush_range)(unsigned long, unsigned long);
  185. };
  186. /*
  187. * Select the calling method
  188. */
  189. #ifdef MULTI_CACHE
  190. extern struct cpu_cache_fns cpu_cache;
  191. #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
  192. #define __cpuc_flush_user_all cpu_cache.flush_user_all
  193. #define __cpuc_flush_user_range cpu_cache.flush_user_range
  194. #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
  195. #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
  196. #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
  197. /*
  198. * These are private to the dma-mapping API. Do not use directly.
  199. * Their sole purpose is to ensure that data held in the cache
  200. * is visible to DMA, or data written by DMA to system memory is
  201. * visible to the CPU.
  202. */
  203. #define dmac_map_area cpu_cache.dma_map_area
  204. #define dmac_unmap_area cpu_cache.dma_unmap_area
  205. #define dmac_flush_range cpu_cache.dma_flush_range
  206. #else
  207. #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
  208. #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
  209. #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
  210. #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
  211. #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
  212. #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
  213. extern void __cpuc_flush_kern_all(void);
  214. extern void __cpuc_flush_user_all(void);
  215. extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
  216. extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
  217. extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
  218. extern void __cpuc_flush_dcache_area(void *, size_t);
  219. /*
  220. * These are private to the dma-mapping API. Do not use directly.
  221. * Their sole purpose is to ensure that data held in the cache
  222. * is visible to DMA, or data written by DMA to system memory is
  223. * visible to the CPU.
  224. */
  225. #define dmac_map_area __glue(_CACHE,_dma_map_area)
  226. #define dmac_unmap_area __glue(_CACHE,_dma_unmap_area)
  227. #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
  228. extern void dmac_map_area(const void *, size_t, int);
  229. extern void dmac_unmap_area(const void *, size_t, int);
  230. extern void dmac_flush_range(const void *, const void *);
  231. #endif
  232. #ifdef CONFIG_OUTER_CACHE
  233. extern struct outer_cache_fns outer_cache;
  234. static inline void outer_inv_range(unsigned long start, unsigned long end)
  235. {
  236. if (outer_cache.inv_range)
  237. outer_cache.inv_range(start, end);
  238. }
  239. static inline void outer_clean_range(unsigned long start, unsigned long end)
  240. {
  241. if (outer_cache.clean_range)
  242. outer_cache.clean_range(start, end);
  243. }
  244. static inline void outer_flush_range(unsigned long start, unsigned long end)
  245. {
  246. if (outer_cache.flush_range)
  247. outer_cache.flush_range(start, end);
  248. }
  249. #else
  250. static inline void outer_inv_range(unsigned long start, unsigned long end)
  251. { }
  252. static inline void outer_clean_range(unsigned long start, unsigned long end)
  253. { }
  254. static inline void outer_flush_range(unsigned long start, unsigned long end)
  255. { }
  256. #endif
  257. /*
  258. * Copy user data from/to a page which is mapped into a different
  259. * processes address space. Really, we want to allow our "user
  260. * space" model to handle this.
  261. */
  262. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  263. do { \
  264. memcpy(dst, src, len); \
  265. flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
  266. } while (0)
  267. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  268. do { \
  269. memcpy(dst, src, len); \
  270. } while (0)
  271. /*
  272. * Convert calls to our calling convention.
  273. */
  274. #define flush_cache_all() __cpuc_flush_kern_all()
  275. static inline void vivt_flush_cache_mm(struct mm_struct *mm)
  276. {
  277. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
  278. __cpuc_flush_user_all();
  279. }
  280. static inline void
  281. vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  282. {
  283. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
  284. __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
  285. vma->vm_flags);
  286. }
  287. static inline void
  288. vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
  289. {
  290. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  291. unsigned long addr = user_addr & PAGE_MASK;
  292. __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
  293. }
  294. }
  295. static inline void
  296. vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  297. unsigned long uaddr, void *kaddr,
  298. unsigned long len, int write)
  299. {
  300. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  301. unsigned long addr = (unsigned long)kaddr;
  302. __cpuc_coherent_kern_range(addr, addr + len);
  303. }
  304. }
  305. #ifndef CONFIG_CPU_CACHE_VIPT
  306. #define flush_cache_mm(mm) \
  307. vivt_flush_cache_mm(mm)
  308. #define flush_cache_range(vma,start,end) \
  309. vivt_flush_cache_range(vma,start,end)
  310. #define flush_cache_page(vma,addr,pfn) \
  311. vivt_flush_cache_page(vma,addr,pfn)
  312. #define flush_ptrace_access(vma,page,ua,ka,len,write) \
  313. vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
  314. #else
  315. extern void flush_cache_mm(struct mm_struct *mm);
  316. extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  317. extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
  318. extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  319. unsigned long uaddr, void *kaddr,
  320. unsigned long len, int write);
  321. #endif
  322. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  323. /*
  324. * flush_cache_user_range is used when we want to ensure that the
  325. * Harvard caches are synchronised for the user space address range.
  326. * This is used for the ARM private sys_cacheflush system call.
  327. */
  328. #define flush_cache_user_range(vma,start,end) \
  329. __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
  330. /*
  331. * Perform necessary cache operations to ensure that data previously
  332. * stored within this range of addresses can be executed by the CPU.
  333. */
  334. #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
  335. /*
  336. * Perform necessary cache operations to ensure that the TLB will
  337. * see data written in the specified area.
  338. */
  339. #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
  340. /*
  341. * flush_dcache_page is used when the kernel has written to the page
  342. * cache page at virtual address page->virtual.
  343. *
  344. * If this page isn't mapped (ie, page_mapping == NULL), or it might
  345. * have userspace mappings, then we _must_ always clean + invalidate
  346. * the dcache entries associated with the kernel mapping.
  347. *
  348. * Otherwise we can defer the operation, and clean the cache when we are
  349. * about to change to user space. This is the same method as used on SPARC64.
  350. * See update_mmu_cache for the user space part.
  351. */
  352. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  353. extern void flush_dcache_page(struct page *);
  354. static inline void __flush_icache_all(void)
  355. {
  356. #ifdef CONFIG_ARM_ERRATA_411920
  357. extern void v6_icache_inval_all(void);
  358. v6_icache_inval_all();
  359. #else
  360. asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
  361. :
  362. : "r" (0));
  363. #endif
  364. }
  365. #define ARCH_HAS_FLUSH_ANON_PAGE
  366. static inline void flush_anon_page(struct vm_area_struct *vma,
  367. struct page *page, unsigned long vmaddr)
  368. {
  369. extern void __flush_anon_page(struct vm_area_struct *vma,
  370. struct page *, unsigned long);
  371. if (PageAnon(page))
  372. __flush_anon_page(vma, page, vmaddr);
  373. }
  374. #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  375. static inline void flush_kernel_dcache_page(struct page *page)
  376. {
  377. /* highmem pages are always flushed upon kunmap already */
  378. if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
  379. __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
  380. }
  381. #define flush_dcache_mmap_lock(mapping) \
  382. spin_lock_irq(&(mapping)->tree_lock)
  383. #define flush_dcache_mmap_unlock(mapping) \
  384. spin_unlock_irq(&(mapping)->tree_lock)
  385. #define flush_icache_user_range(vma,page,addr,len) \
  386. flush_dcache_page(page)
  387. /*
  388. * We don't appear to need to do anything here. In fact, if we did, we'd
  389. * duplicate cache flushing elsewhere performed by flush_dcache_page().
  390. */
  391. #define flush_icache_page(vma,page) do { } while (0)
  392. /*
  393. * flush_cache_vmap() is used when creating mappings (eg, via vmap,
  394. * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
  395. * caches, since the direct-mappings of these pages may contain cached
  396. * data, we need to do a full cache flush to ensure that writebacks
  397. * don't corrupt data placed into these pages via the new mappings.
  398. */
  399. static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  400. {
  401. if (!cache_is_vipt_nonaliasing())
  402. flush_cache_all();
  403. else
  404. /*
  405. * set_pte_at() called from vmap_pte_range() does not
  406. * have a DSB after cleaning the cache line.
  407. */
  408. dsb();
  409. }
  410. static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  411. {
  412. if (!cache_is_vipt_nonaliasing())
  413. flush_cache_all();
  414. }
  415. #endif