cacheflush.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429
  1. /*
  2. * linux/include/asm-arm/cacheflush.h
  3. *
  4. * Copyright (C) 1999-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_CACHEFLUSH_H
  11. #define _ASMARM_CACHEFLUSH_H
  12. #include <linux/sched.h>
  13. #include <linux/mm.h>
  14. #include <asm/glue.h>
  15. #include <asm/shmparam.h>
  16. #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
  17. /*
  18. * Cache Model
  19. * ===========
  20. */
  21. #undef _CACHE
  22. #undef MULTI_CACHE
  23. #if defined(CONFIG_CPU_CACHE_V3)
  24. # ifdef _CACHE
  25. # define MULTI_CACHE 1
  26. # else
  27. # define _CACHE v3
  28. # endif
  29. #endif
  30. #if defined(CONFIG_CPU_CACHE_V4)
  31. # ifdef _CACHE
  32. # define MULTI_CACHE 1
  33. # else
  34. # define _CACHE v4
  35. # endif
  36. #endif
  37. #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
  38. defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020)
  39. # define MULTI_CACHE 1
  40. #endif
  41. #if defined(CONFIG_CPU_ARM926T)
  42. # ifdef _CACHE
  43. # define MULTI_CACHE 1
  44. # else
  45. # define _CACHE arm926
  46. # endif
  47. #endif
  48. #if defined(CONFIG_CPU_ARM940T)
  49. # ifdef _CACHE
  50. # define MULTI_CACHE 1
  51. # else
  52. # define _CACHE arm940
  53. # endif
  54. #endif
  55. #if defined(CONFIG_CPU_ARM946E)
  56. # ifdef _CACHE
  57. # define MULTI_CACHE 1
  58. # else
  59. # define _CACHE arm946
  60. # endif
  61. #endif
  62. #if defined(CONFIG_CPU_CACHE_V4WB)
  63. # ifdef _CACHE
  64. # define MULTI_CACHE 1
  65. # else
  66. # define _CACHE v4wb
  67. # endif
  68. #endif
  69. #if defined(CONFIG_CPU_XSCALE)
  70. # ifdef _CACHE
  71. # define MULTI_CACHE 1
  72. # else
  73. # define _CACHE xscale
  74. # endif
  75. #endif
  76. #if defined(CONFIG_CPU_XSC3)
  77. # ifdef _CACHE
  78. # define MULTI_CACHE 1
  79. # else
  80. # define _CACHE xsc3
  81. # endif
  82. #endif
  83. #if defined(CONFIG_CPU_V6)
  84. //# ifdef _CACHE
  85. # define MULTI_CACHE 1
  86. //# else
  87. //# define _CACHE v6
  88. //# endif
  89. #endif
  90. #if !defined(_CACHE) && !defined(MULTI_CACHE)
  91. #error Unknown cache maintainence model
  92. #endif
  93. /*
  94. * This flag is used to indicate that the page pointed to by a pte
  95. * is dirty and requires cleaning before returning it to the user.
  96. */
  97. #define PG_dcache_dirty PG_arch_1
  98. /*
  99. * MM Cache Management
  100. * ===================
  101. *
  102. * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
  103. * implement these methods.
  104. *
  105. * Start addresses are inclusive and end addresses are exclusive;
  106. * start addresses should be rounded down, end addresses up.
  107. *
  108. * See Documentation/cachetlb.txt for more information.
  109. * Please note that the implementation of these, and the required
  110. * effects are cache-type (VIVT/VIPT/PIPT) specific.
  111. *
  112. * flush_cache_kern_all()
  113. *
  114. * Unconditionally clean and invalidate the entire cache.
  115. *
  116. * flush_cache_user_mm(mm)
  117. *
  118. * Clean and invalidate all user space cache entries
  119. * before a change of page tables.
  120. *
  121. * flush_cache_user_range(start, end, flags)
  122. *
  123. * Clean and invalidate a range of cache entries in the
  124. * specified address space before a change of page tables.
  125. * - start - user start address (inclusive, page aligned)
  126. * - end - user end address (exclusive, page aligned)
  127. * - flags - vma->vm_flags field
  128. *
  129. * coherent_kern_range(start, end)
  130. *
  131. * Ensure coherency between the Icache and the Dcache in the
  132. * region described by start, end. If you have non-snooping
  133. * Harvard caches, you need to implement this function.
  134. * - start - virtual start address
  135. * - end - virtual end address
  136. *
  137. * DMA Cache Coherency
  138. * ===================
  139. *
  140. * dma_inv_range(start, end)
  141. *
  142. * Invalidate (discard) the specified virtual address range.
  143. * May not write back any entries. If 'start' or 'end'
  144. * are not cache line aligned, those lines must be written
  145. * back.
  146. * - start - virtual start address
  147. * - end - virtual end address
  148. *
  149. * dma_clean_range(start, end)
  150. *
  151. * Clean (write back) the specified virtual address range.
  152. * - start - virtual start address
  153. * - end - virtual end address
  154. *
  155. * dma_flush_range(start, end)
  156. *
  157. * Clean and invalidate the specified virtual address range.
  158. * - start - virtual start address
  159. * - end - virtual end address
  160. */
  161. struct cpu_cache_fns {
  162. void (*flush_kern_all)(void);
  163. void (*flush_user_all)(void);
  164. void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
  165. void (*coherent_kern_range)(unsigned long, unsigned long);
  166. void (*coherent_user_range)(unsigned long, unsigned long);
  167. void (*flush_kern_dcache_page)(void *);
  168. void (*dma_inv_range)(unsigned long, unsigned long);
  169. void (*dma_clean_range)(unsigned long, unsigned long);
  170. void (*dma_flush_range)(unsigned long, unsigned long);
  171. };
  172. /*
  173. * Select the calling method
  174. */
  175. #ifdef MULTI_CACHE
  176. extern struct cpu_cache_fns cpu_cache;
  177. #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
  178. #define __cpuc_flush_user_all cpu_cache.flush_user_all
  179. #define __cpuc_flush_user_range cpu_cache.flush_user_range
  180. #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
  181. #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
  182. #define __cpuc_flush_dcache_page cpu_cache.flush_kern_dcache_page
  183. /*
  184. * These are private to the dma-mapping API. Do not use directly.
  185. * Their sole purpose is to ensure that data held in the cache
  186. * is visible to DMA, or data written by DMA to system memory is
  187. * visible to the CPU.
  188. */
  189. #define dmac_inv_range cpu_cache.dma_inv_range
  190. #define dmac_clean_range cpu_cache.dma_clean_range
  191. #define dmac_flush_range cpu_cache.dma_flush_range
  192. #else
  193. #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
  194. #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
  195. #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
  196. #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
  197. #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
  198. #define __cpuc_flush_dcache_page __glue(_CACHE,_flush_kern_dcache_page)
  199. extern void __cpuc_flush_kern_all(void);
  200. extern void __cpuc_flush_user_all(void);
  201. extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
  202. extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
  203. extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
  204. extern void __cpuc_flush_dcache_page(void *);
  205. /*
  206. * These are private to the dma-mapping API. Do not use directly.
  207. * Their sole purpose is to ensure that data held in the cache
  208. * is visible to DMA, or data written by DMA to system memory is
  209. * visible to the CPU.
  210. */
  211. #define dmac_inv_range __glue(_CACHE,_dma_inv_range)
  212. #define dmac_clean_range __glue(_CACHE,_dma_clean_range)
  213. #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
  214. extern void dmac_inv_range(unsigned long, unsigned long);
  215. extern void dmac_clean_range(unsigned long, unsigned long);
  216. extern void dmac_flush_range(unsigned long, unsigned long);
  217. #endif
  218. /*
  219. * flush_cache_vmap() is used when creating mappings (eg, via vmap,
  220. * vmalloc, ioremap etc) in kernel space for pages. Since the
  221. * direct-mappings of these pages may contain cached data, we need
  222. * to do a full cache flush to ensure that writebacks don't corrupt
  223. * data placed into these pages via the new mappings.
  224. */
  225. #define flush_cache_vmap(start, end) flush_cache_all()
  226. #define flush_cache_vunmap(start, end) flush_cache_all()
  227. /*
  228. * Copy user data from/to a page which is mapped into a different
  229. * processes address space. Really, we want to allow our "user
  230. * space" model to handle this.
  231. */
  232. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  233. do { \
  234. memcpy(dst, src, len); \
  235. flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
  236. } while (0)
  237. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  238. do { \
  239. memcpy(dst, src, len); \
  240. } while (0)
  241. /*
  242. * Convert calls to our calling convention.
  243. */
  244. #define flush_cache_all() __cpuc_flush_kern_all()
  245. #ifndef CONFIG_CPU_CACHE_VIPT
  246. static inline void flush_cache_mm(struct mm_struct *mm)
  247. {
  248. if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
  249. __cpuc_flush_user_all();
  250. }
  251. static inline void
  252. flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  253. {
  254. if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask))
  255. __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
  256. vma->vm_flags);
  257. }
  258. static inline void
  259. flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
  260. {
  261. if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
  262. unsigned long addr = user_addr & PAGE_MASK;
  263. __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
  264. }
  265. }
  266. static inline void
  267. flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  268. unsigned long uaddr, void *kaddr,
  269. unsigned long len, int write)
  270. {
  271. if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) {
  272. unsigned long addr = (unsigned long)kaddr;
  273. __cpuc_coherent_kern_range(addr, addr + len);
  274. }
  275. }
  276. #else
  277. extern void flush_cache_mm(struct mm_struct *mm);
  278. extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  279. extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
  280. extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  281. unsigned long uaddr, void *kaddr,
  282. unsigned long len, int write);
  283. #endif
  284. /*
  285. * flush_cache_user_range is used when we want to ensure that the
  286. * Harvard caches are synchronised for the user space address range.
  287. * This is used for the ARM private sys_cacheflush system call.
  288. */
  289. #define flush_cache_user_range(vma,start,end) \
  290. __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
  291. /*
  292. * Perform necessary cache operations to ensure that data previously
  293. * stored within this range of addresses can be executed by the CPU.
  294. */
  295. #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
  296. /*
  297. * Perform necessary cache operations to ensure that the TLB will
  298. * see data written in the specified area.
  299. */
  300. #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
  301. /*
  302. * flush_dcache_page is used when the kernel has written to the page
  303. * cache page at virtual address page->virtual.
  304. *
  305. * If this page isn't mapped (ie, page_mapping == NULL), or it might
  306. * have userspace mappings, then we _must_ always clean + invalidate
  307. * the dcache entries associated with the kernel mapping.
  308. *
  309. * Otherwise we can defer the operation, and clean the cache when we are
  310. * about to change to user space. This is the same method as used on SPARC64.
  311. * See update_mmu_cache for the user space part.
  312. */
  313. extern void flush_dcache_page(struct page *);
  314. #define flush_dcache_mmap_lock(mapping) \
  315. write_lock_irq(&(mapping)->tree_lock)
  316. #define flush_dcache_mmap_unlock(mapping) \
  317. write_unlock_irq(&(mapping)->tree_lock)
  318. #define flush_icache_user_range(vma,page,addr,len) \
  319. flush_dcache_page(page)
  320. /*
  321. * We don't appear to need to do anything here. In fact, if we did, we'd
  322. * duplicate cache flushing elsewhere performed by flush_dcache_page().
  323. */
  324. #define flush_icache_page(vma,page) do { } while (0)
  325. #define __cacheid_present(val) (val != read_cpuid(CPUID_ID))
  326. #define __cacheid_vivt(val) ((val & (15 << 25)) != (14 << 25))
  327. #define __cacheid_vipt(val) ((val & (15 << 25)) == (14 << 25))
  328. #define __cacheid_vipt_nonaliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25))
  329. #define __cacheid_vipt_aliasing(val) ((val & (15 << 25 | 1 << 23)) == (14 << 25 | 1 << 23))
  330. #if defined(CONFIG_CPU_CACHE_VIVT) && !defined(CONFIG_CPU_CACHE_VIPT)
  331. #define cache_is_vivt() 1
  332. #define cache_is_vipt() 0
  333. #define cache_is_vipt_nonaliasing() 0
  334. #define cache_is_vipt_aliasing() 0
  335. #elif defined(CONFIG_CPU_CACHE_VIPT)
  336. #define cache_is_vivt() 0
  337. #define cache_is_vipt() 1
  338. #define cache_is_vipt_nonaliasing() \
  339. ({ \
  340. unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
  341. __cacheid_vipt_nonaliasing(__val); \
  342. })
  343. #define cache_is_vipt_aliasing() \
  344. ({ \
  345. unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
  346. __cacheid_vipt_aliasing(__val); \
  347. })
  348. #else
  349. #define cache_is_vivt() \
  350. ({ \
  351. unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
  352. (!__cacheid_present(__val)) || __cacheid_vivt(__val); \
  353. })
  354. #define cache_is_vipt() \
  355. ({ \
  356. unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
  357. __cacheid_present(__val) && __cacheid_vipt(__val); \
  358. })
  359. #define cache_is_vipt_nonaliasing() \
  360. ({ \
  361. unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
  362. __cacheid_present(__val) && \
  363. __cacheid_vipt_nonaliasing(__val); \
  364. })
  365. #define cache_is_vipt_aliasing() \
  366. ({ \
  367. unsigned int __val = read_cpuid(CPUID_CACHETYPE); \
  368. __cacheid_present(__val) && \
  369. __cacheid_vipt_aliasing(__val); \
  370. })
  371. #endif
  372. #endif