cacheflush.h 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /*
  2. * arch/arm/include/asm/cacheflush.h
  3. *
  4. * Copyright (C) 1999-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_CACHEFLUSH_H
  11. #define _ASMARM_CACHEFLUSH_H
  12. #include <linux/sched.h>
  13. #include <linux/mm.h>
  14. #include <asm/glue.h>
  15. #include <asm/shmparam.h>
  16. #include <asm/cachetype.h>
  17. #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
  18. /*
  19. * Cache Model
  20. * ===========
  21. */
  22. #undef _CACHE
  23. #undef MULTI_CACHE
  24. #if defined(CONFIG_CPU_CACHE_V3)
  25. # ifdef _CACHE
  26. # define MULTI_CACHE 1
  27. # else
  28. # define _CACHE v3
  29. # endif
  30. #endif
  31. #if defined(CONFIG_CPU_CACHE_V4)
  32. # ifdef _CACHE
  33. # define MULTI_CACHE 1
  34. # else
  35. # define _CACHE v4
  36. # endif
  37. #endif
  38. #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
  39. defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
  40. # define MULTI_CACHE 1
  41. #endif
  42. #if defined(CONFIG_CPU_ARM926T)
  43. # ifdef _CACHE
  44. # define MULTI_CACHE 1
  45. # else
  46. # define _CACHE arm926
  47. # endif
  48. #endif
  49. #if defined(CONFIG_CPU_ARM940T)
  50. # ifdef _CACHE
  51. # define MULTI_CACHE 1
  52. # else
  53. # define _CACHE arm940
  54. # endif
  55. #endif
  56. #if defined(CONFIG_CPU_ARM946E)
  57. # ifdef _CACHE
  58. # define MULTI_CACHE 1
  59. # else
  60. # define _CACHE arm946
  61. # endif
  62. #endif
  63. #if defined(CONFIG_CPU_CACHE_V4WB)
  64. # ifdef _CACHE
  65. # define MULTI_CACHE 1
  66. # else
  67. # define _CACHE v4wb
  68. # endif
  69. #endif
  70. #if defined(CONFIG_CPU_XSCALE)
  71. # ifdef _CACHE
  72. # define MULTI_CACHE 1
  73. # else
  74. # define _CACHE xscale
  75. # endif
  76. #endif
  77. #if defined(CONFIG_CPU_XSC3)
  78. # ifdef _CACHE
  79. # define MULTI_CACHE 1
  80. # else
  81. # define _CACHE xsc3
  82. # endif
  83. #endif
  84. #if defined(CONFIG_CPU_FEROCEON)
  85. # define MULTI_CACHE 1
  86. #endif
  87. #if defined(CONFIG_CPU_V6)
  88. //# ifdef _CACHE
  89. # define MULTI_CACHE 1
  90. //# else
  91. //# define _CACHE v6
  92. //# endif
  93. #endif
  94. #if defined(CONFIG_CPU_V7)
  95. //# ifdef _CACHE
  96. # define MULTI_CACHE 1
  97. //# else
  98. //# define _CACHE v7
  99. //# endif
  100. #endif
  101. #if !defined(_CACHE) && !defined(MULTI_CACHE)
  102. #error Unknown cache maintainence model
  103. #endif
  104. /*
  105. * This flag is used to indicate that the page pointed to by a pte
  106. * is dirty and requires cleaning before returning it to the user.
  107. */
  108. #define PG_dcache_dirty PG_arch_1
  109. /*
  110. * MM Cache Management
  111. * ===================
  112. *
  113. * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
  114. * implement these methods.
  115. *
  116. * Start addresses are inclusive and end addresses are exclusive;
  117. * start addresses should be rounded down, end addresses up.
  118. *
  119. * See Documentation/cachetlb.txt for more information.
  120. * Please note that the implementation of these, and the required
  121. * effects are cache-type (VIVT/VIPT/PIPT) specific.
  122. *
  123. * flush_cache_kern_all()
  124. *
  125. * Unconditionally clean and invalidate the entire cache.
  126. *
  127. * flush_cache_user_mm(mm)
  128. *
  129. * Clean and invalidate all user space cache entries
  130. * before a change of page tables.
  131. *
  132. * flush_cache_user_range(start, end, flags)
  133. *
  134. * Clean and invalidate a range of cache entries in the
  135. * specified address space before a change of page tables.
  136. * - start - user start address (inclusive, page aligned)
  137. * - end - user end address (exclusive, page aligned)
  138. * - flags - vma->vm_flags field
  139. *
  140. * coherent_kern_range(start, end)
  141. *
  142. * Ensure coherency between the Icache and the Dcache in the
  143. * region described by start, end. If you have non-snooping
  144. * Harvard caches, you need to implement this function.
  145. * - start - virtual start address
  146. * - end - virtual end address
  147. *
  148. * DMA Cache Coherency
  149. * ===================
  150. *
  151. * dma_inv_range(start, end)
  152. *
  153. * Invalidate (discard) the specified virtual address range.
  154. * May not write back any entries. If 'start' or 'end'
  155. * are not cache line aligned, those lines must be written
  156. * back.
  157. * - start - virtual start address
  158. * - end - virtual end address
  159. *
  160. * dma_clean_range(start, end)
  161. *
  162. * Clean (write back) the specified virtual address range.
  163. * - start - virtual start address
  164. * - end - virtual end address
  165. *
  166. * dma_flush_range(start, end)
  167. *
  168. * Clean and invalidate the specified virtual address range.
  169. * - start - virtual start address
  170. * - end - virtual end address
  171. */
  172. struct cpu_cache_fns {
  173. void (*flush_kern_all)(void);
  174. void (*flush_user_all)(void);
  175. void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
  176. void (*coherent_kern_range)(unsigned long, unsigned long);
  177. void (*coherent_user_range)(unsigned long, unsigned long);
  178. void (*flush_kern_dcache_page)(void *);
  179. void (*dma_inv_range)(const void *, const void *);
  180. void (*dma_clean_range)(const void *, const void *);
  181. void (*dma_flush_range)(const void *, const void *);
  182. };
  183. struct outer_cache_fns {
  184. void (*inv_range)(unsigned long, unsigned long);
  185. void (*clean_range)(unsigned long, unsigned long);
  186. void (*flush_range)(unsigned long, unsigned long);
  187. };
  188. /*
  189. * Select the calling method
  190. */
  191. #ifdef MULTI_CACHE
  192. extern struct cpu_cache_fns cpu_cache;
  193. #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
  194. #define __cpuc_flush_user_all cpu_cache.flush_user_all
  195. #define __cpuc_flush_user_range cpu_cache.flush_user_range
  196. #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
  197. #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
  198. #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
  199. /*
  200. * These are private to the dma-mapping API. Do not use directly.
  201. * Their sole purpose is to ensure that data held in the cache
  202. * is visible to DMA, or data written by DMA to system memory is
  203. * visible to the CPU.
  204. */
  205. #define dmac_inv_range cpu_cache.dma_inv_range
  206. #define dmac_clean_range cpu_cache.dma_clean_range
  207. #define dmac_flush_range cpu_cache.dma_flush_range
  208. #else
  209. #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
  210. #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
  211. #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
  212. #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
  213. #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
  214. #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
  215. extern void __cpuc_flush_kern_all(void);
  216. extern void __cpuc_flush_user_all(void);
  217. extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
  218. extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
  219. extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
  220. extern void __cpuc_flush_dcache_page(void *);
  221. /*
  222. * These are private to the dma-mapping API. Do not use directly.
  223. * Their sole purpose is to ensure that data held in the cache
  224. * is visible to DMA, or data written by DMA to system memory is
  225. * visible to the CPU.
  226. */
  227. #define dmac_inv_range __glue(_CACHE,_dma_inv_range)
  228. #define dmac_clean_range __glue(_CACHE,_dma_clean_range)
  229. #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
  230. extern void dmac_inv_range(const void *, const void *);
  231. extern void dmac_clean_range(const void *, const void *);
  232. extern void dmac_flush_range(const void *, const void *);
  233. #endif
  234. #ifdef CONFIG_OUTER_CACHE
  235. extern struct outer_cache_fns outer_cache;
  236. static inline void outer_inv_range(unsigned long start, unsigned long end)
  237. {
  238. if (outer_cache.inv_range)
  239. outer_cache.inv_range(start, end);
  240. }
  241. static inline void outer_clean_range(unsigned long start, unsigned long end)
  242. {
  243. if (outer_cache.clean_range)
  244. outer_cache.clean_range(start, end);
  245. }
  246. static inline void outer_flush_range(unsigned long start, unsigned long end)
  247. {
  248. if (outer_cache.flush_range)
  249. outer_cache.flush_range(start, end);
  250. }
  251. #else
  252. static inline void outer_inv_range(unsigned long start, unsigned long end)
  253. { }
  254. static inline void outer_clean_range(unsigned long start, unsigned long end)
  255. { }
  256. static inline void outer_flush_range(unsigned long start, unsigned long end)
  257. { }
  258. #endif
  259. /*
  260. * Copy user data from/to a page which is mapped into a different
  261. * processes address space. Really, we want to allow our "user
  262. * space" model to handle this.
  263. */
  264. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  265. do { \
  266. memcpy(dst, src, len); \
  267. flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
  268. } while (0)
  269. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  270. do { \
  271. memcpy(dst, src, len); \
  272. } while (0)
  273. /*
  274. * Convert calls to our calling convention.
  275. */
  276. #define flush_cache_all() __cpuc_flush_kern_all()
  277. #ifndef CONFIG_CPU_CACHE_VIPT
  278. static inline void flush_cache_mm(struct mm_struct *mm)
  279. {
  280. if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
  281. __cpuc_flush_user_all();
  282. }
  283. static inline void
  284. flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  285. {
  286. if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
  287. __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
  288. vma->vm_flags);
  289. }
  290. static inline void
  291. flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
  292. {
  293. if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
  294. unsigned long addr = user_addr & PAGE_MASK;
  295. __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
  296. }
  297. }
  298. static inline void
  299. flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  300. unsigned long uaddr, void *kaddr,
  301. unsigned long len, int write)
  302. {
  303. if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
  304. unsigned long addr = (unsigned long)kaddr;
  305. __cpuc_coherent_kern_range(addr, addr + len);
  306. }
  307. }
  308. #else
  309. extern void flush_cache_mm(struct mm_struct *mm);
  310. extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  311. extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
  312. extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  313. unsigned long uaddr, void *kaddr,
  314. unsigned long len, int write);
  315. #endif
  316. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  317. /*
  318. * flush_cache_user_range is used when we want to ensure that the
  319. * Harvard caches are synchronised for the user space address range.
  320. * This is used for the ARM private sys_cacheflush system call.
  321. */
  322. #define flush_cache_user_range(vma,start,end) \
  323. __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
  324. /*
  325. * Perform necessary cache operations to ensure that data previously
  326. * stored within this range of addresses can be executed by the CPU.
  327. */
  328. #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
  329. /*
  330. * Perform necessary cache operations to ensure that the TLB will
  331. * see data written in the specified area.
  332. */
  333. #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
  334. /*
  335. * flush_dcache_page is used when the kernel has written to the page
  336. * cache page at virtual address page->virtual.
  337. *
  338. * If this page isn't mapped (ie, page_mapping == NULL), or it might
  339. * have userspace mappings, then we _must_ always clean + invalidate
  340. * the dcache entries associated with the kernel mapping.
  341. *
  342. * Otherwise we can defer the operation, and clean the cache when we are
  343. * about to change to user space. This is the same method as used on SPARC64.
  344. * See update_mmu_cache for the user space part.
  345. */
  346. extern void flush_dcache_page(struct page *);
  347. extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
  348. static inline void __flush_icache_all(void)
  349. {
  350. asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
  351. :
  352. : "r" (0));
  353. }
  354. #define ARCH_HAS_FLUSH_ANON_PAGE
  355. static inline void flush_anon_page(struct vm_area_struct *vma,
  356. struct page *page, unsigned long vmaddr)
  357. {
  358. extern void __flush_anon_page(struct vm_area_struct *vma,
  359. struct page *, unsigned long);
  360. if (PageAnon(page))
  361. __flush_anon_page(vma, page, vmaddr);
  362. }
  363. #define flush_dcache_mmap_lock(mapping) \
  364. spin_lock_irq(&(mapping)->tree_lock)
  365. #define flush_dcache_mmap_unlock(mapping) \
  366. spin_unlock_irq(&(mapping)->tree_lock)
  367. #define flush_icache_user_range(vma,page,addr,len) \
  368. flush_dcache_page(page)
  369. /*
  370. * We don't appear to need to do anything here. In fact, if we did, we'd
  371. * duplicate cache flushing elsewhere performed by flush_dcache_page().
  372. */
  373. #define flush_icache_page(vma,page) do { } while (0)
  374. static inline void flush_ioremap_region(unsigned long phys, void __iomem *virt,
  375. unsigned offset, size_t size)
  376. {
  377. const void *start = (void __force *)virt + offset;
  378. dmac_inv_range(start, start + size);
  379. }
  380. /*
  381. * flush_cache_vmap() is used when creating mappings (eg, via vmap,
  382. * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
  383. * caches, since the direct-mappings of these pages may contain cached
  384. * data, we need to do a full cache flush to ensure that writebacks
  385. * don't corrupt data placed into these pages via the new mappings.
  386. */
  387. static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  388. {
  389. if (!cache_is_vipt_nonaliasing())
  390. flush_cache_all();
  391. else
  392. /*
  393. * set_pte_at() called from vmap_pte_range() does not
  394. * have a DSB after cleaning the cache line.
  395. */
  396. dsb();
  397. }
  398. static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  399. {
  400. if (!cache_is_vipt_nonaliasing())
  401. flush_cache_all();
  402. }
  403. #endif