cacheflush.h 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. /*
  2. * arch/arm/include/asm/cacheflush.h
  3. *
  4. * Copyright (C) 1999-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #ifndef _ASMARM_CACHEFLUSH_H
  11. #define _ASMARM_CACHEFLUSH_H
  12. #include <linux/mm.h>
  13. #include <asm/glue.h>
  14. #include <asm/shmparam.h>
  15. #include <asm/cachetype.h>
  16. #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
  17. /*
  18. * Cache Model
  19. * ===========
  20. */
  21. #undef _CACHE
  22. #undef MULTI_CACHE
  23. #if defined(CONFIG_CPU_CACHE_V3)
  24. # ifdef _CACHE
  25. # define MULTI_CACHE 1
  26. # else
  27. # define _CACHE v3
  28. # endif
  29. #endif
  30. #if defined(CONFIG_CPU_CACHE_V4)
  31. # ifdef _CACHE
  32. # define MULTI_CACHE 1
  33. # else
  34. # define _CACHE v4
  35. # endif
  36. #endif
  37. #if defined(CONFIG_CPU_ARM920T) || defined(CONFIG_CPU_ARM922T) || \
  38. defined(CONFIG_CPU_ARM925T) || defined(CONFIG_CPU_ARM1020) || \
  39. defined(CONFIG_CPU_ARM1026)
  40. # define MULTI_CACHE 1
  41. #endif
  42. #if defined(CONFIG_CPU_FA526)
  43. # ifdef _CACHE
  44. # define MULTI_CACHE 1
  45. # else
  46. # define _CACHE fa
  47. # endif
  48. #endif
  49. #if defined(CONFIG_CPU_ARM926T)
  50. # ifdef _CACHE
  51. # define MULTI_CACHE 1
  52. # else
  53. # define _CACHE arm926
  54. # endif
  55. #endif
  56. #if defined(CONFIG_CPU_ARM940T)
  57. # ifdef _CACHE
  58. # define MULTI_CACHE 1
  59. # else
  60. # define _CACHE arm940
  61. # endif
  62. #endif
  63. #if defined(CONFIG_CPU_ARM946E)
  64. # ifdef _CACHE
  65. # define MULTI_CACHE 1
  66. # else
  67. # define _CACHE arm946
  68. # endif
  69. #endif
  70. #if defined(CONFIG_CPU_CACHE_V4WB)
  71. # ifdef _CACHE
  72. # define MULTI_CACHE 1
  73. # else
  74. # define _CACHE v4wb
  75. # endif
  76. #endif
  77. #if defined(CONFIG_CPU_XSCALE)
  78. # ifdef _CACHE
  79. # define MULTI_CACHE 1
  80. # else
  81. # define _CACHE xscale
  82. # endif
  83. #endif
  84. #if defined(CONFIG_CPU_XSC3)
  85. # ifdef _CACHE
  86. # define MULTI_CACHE 1
  87. # else
  88. # define _CACHE xsc3
  89. # endif
  90. #endif
  91. #if defined(CONFIG_CPU_MOHAWK)
  92. # ifdef _CACHE
  93. # define MULTI_CACHE 1
  94. # else
  95. # define _CACHE mohawk
  96. # endif
  97. #endif
  98. #if defined(CONFIG_CPU_FEROCEON)
  99. # define MULTI_CACHE 1
  100. #endif
  101. #if defined(CONFIG_CPU_V6)
  102. //# ifdef _CACHE
  103. # define MULTI_CACHE 1
  104. //# else
  105. //# define _CACHE v6
  106. //# endif
  107. #endif
  108. #if defined(CONFIG_CPU_V7)
  109. //# ifdef _CACHE
  110. # define MULTI_CACHE 1
  111. //# else
  112. //# define _CACHE v7
  113. //# endif
  114. #endif
  115. #if !defined(_CACHE) && !defined(MULTI_CACHE)
  116. #error Unknown cache maintainence model
  117. #endif
  118. /*
  119. * This flag is used to indicate that the page pointed to by a pte
  120. * is dirty and requires cleaning before returning it to the user.
  121. */
  122. #define PG_dcache_dirty PG_arch_1
  123. /*
  124. * MM Cache Management
  125. * ===================
  126. *
  127. * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
  128. * implement these methods.
  129. *
  130. * Start addresses are inclusive and end addresses are exclusive;
  131. * start addresses should be rounded down, end addresses up.
  132. *
  133. * See Documentation/cachetlb.txt for more information.
  134. * Please note that the implementation of these, and the required
  135. * effects are cache-type (VIVT/VIPT/PIPT) specific.
  136. *
  137. * flush_kern_all()
  138. *
  139. * Unconditionally clean and invalidate the entire cache.
  140. *
  141. * flush_user_all()
  142. *
  143. * Clean and invalidate all user space cache entries
  144. * before a change of page tables.
  145. *
  146. * flush_user_range(start, end, flags)
  147. *
  148. * Clean and invalidate a range of cache entries in the
  149. * specified address space before a change of page tables.
  150. * - start - user start address (inclusive, page aligned)
  151. * - end - user end address (exclusive, page aligned)
  152. * - flags - vma->vm_flags field
  153. *
  154. * coherent_kern_range(start, end)
  155. *
  156. * Ensure coherency between the Icache and the Dcache in the
  157. * region described by start, end. If you have non-snooping
  158. * Harvard caches, you need to implement this function.
  159. * - start - virtual start address
  160. * - end - virtual end address
  161. *
  162. * coherent_user_range(start, end)
  163. *
  164. * Ensure coherency between the Icache and the Dcache in the
  165. * region described by start, end. If you have non-snooping
  166. * Harvard caches, you need to implement this function.
  167. * - start - virtual start address
  168. * - end - virtual end address
  169. *
  170. * flush_kern_dcache_area(kaddr, size)
  171. *
  172. * Ensure that the data held in page is written back.
  173. * - kaddr - page address
  174. * - size - region size
  175. *
  176. * DMA Cache Coherency
  177. * ===================
  178. *
  179. * dma_inv_range(start, end)
  180. *
  181. * Invalidate (discard) the specified virtual address range.
  182. * May not write back any entries. If 'start' or 'end'
  183. * are not cache line aligned, those lines must be written
  184. * back.
  185. * - start - virtual start address
  186. * - end - virtual end address
  187. *
  188. * dma_clean_range(start, end)
  189. *
  190. * Clean (write back) the specified virtual address range.
  191. * - start - virtual start address
  192. * - end - virtual end address
  193. *
  194. * dma_flush_range(start, end)
  195. *
  196. * Clean and invalidate the specified virtual address range.
  197. * - start - virtual start address
  198. * - end - virtual end address
  199. */
  200. struct cpu_cache_fns {
  201. void (*flush_kern_all)(void);
  202. void (*flush_user_all)(void);
  203. void (*flush_user_range)(unsigned long, unsigned long, unsigned int);
  204. void (*coherent_kern_range)(unsigned long, unsigned long);
  205. void (*coherent_user_range)(unsigned long, unsigned long);
  206. void (*flush_kern_dcache_area)(void *, size_t);
  207. void (*dma_inv_range)(const void *, const void *);
  208. void (*dma_clean_range)(const void *, const void *);
  209. void (*dma_flush_range)(const void *, const void *);
  210. };
  211. struct outer_cache_fns {
  212. void (*inv_range)(unsigned long, unsigned long);
  213. void (*clean_range)(unsigned long, unsigned long);
  214. void (*flush_range)(unsigned long, unsigned long);
  215. };
  216. /*
  217. * Select the calling method
  218. */
  219. #ifdef MULTI_CACHE
  220. extern struct cpu_cache_fns cpu_cache;
  221. #define __cpuc_flush_kern_all cpu_cache.flush_kern_all
  222. #define __cpuc_flush_user_all cpu_cache.flush_user_all
  223. #define __cpuc_flush_user_range cpu_cache.flush_user_range
  224. #define __cpuc_coherent_kern_range cpu_cache.coherent_kern_range
  225. #define __cpuc_coherent_user_range cpu_cache.coherent_user_range
  226. #define __cpuc_flush_dcache_area cpu_cache.flush_kern_dcache_area
  227. /*
  228. * These are private to the dma-mapping API. Do not use directly.
  229. * Their sole purpose is to ensure that data held in the cache
  230. * is visible to DMA, or data written by DMA to system memory is
  231. * visible to the CPU.
  232. */
  233. #define dmac_inv_range cpu_cache.dma_inv_range
  234. #define dmac_clean_range cpu_cache.dma_clean_range
  235. #define dmac_flush_range cpu_cache.dma_flush_range
  236. #else
  237. #define __cpuc_flush_kern_all __glue(_CACHE,_flush_kern_cache_all)
  238. #define __cpuc_flush_user_all __glue(_CACHE,_flush_user_cache_all)
  239. #define __cpuc_flush_user_range __glue(_CACHE,_flush_user_cache_range)
  240. #define __cpuc_coherent_kern_range __glue(_CACHE,_coherent_kern_range)
  241. #define __cpuc_coherent_user_range __glue(_CACHE,_coherent_user_range)
  242. #define __cpuc_flush_dcache_area __glue(_CACHE,_flush_kern_dcache_area)
  243. extern void __cpuc_flush_kern_all(void);
  244. extern void __cpuc_flush_user_all(void);
  245. extern void __cpuc_flush_user_range(unsigned long, unsigned long, unsigned int);
  246. extern void __cpuc_coherent_kern_range(unsigned long, unsigned long);
  247. extern void __cpuc_coherent_user_range(unsigned long, unsigned long);
  248. extern void __cpuc_flush_dcache_area(void *, size_t);
  249. /*
  250. * These are private to the dma-mapping API. Do not use directly.
  251. * Their sole purpose is to ensure that data held in the cache
  252. * is visible to DMA, or data written by DMA to system memory is
  253. * visible to the CPU.
  254. */
  255. #define dmac_inv_range __glue(_CACHE,_dma_inv_range)
  256. #define dmac_clean_range __glue(_CACHE,_dma_clean_range)
  257. #define dmac_flush_range __glue(_CACHE,_dma_flush_range)
  258. extern void dmac_inv_range(const void *, const void *);
  259. extern void dmac_clean_range(const void *, const void *);
  260. extern void dmac_flush_range(const void *, const void *);
  261. #endif
  262. #ifdef CONFIG_OUTER_CACHE
  263. extern struct outer_cache_fns outer_cache;
  264. static inline void outer_inv_range(unsigned long start, unsigned long end)
  265. {
  266. if (outer_cache.inv_range)
  267. outer_cache.inv_range(start, end);
  268. }
  269. static inline void outer_clean_range(unsigned long start, unsigned long end)
  270. {
  271. if (outer_cache.clean_range)
  272. outer_cache.clean_range(start, end);
  273. }
  274. static inline void outer_flush_range(unsigned long start, unsigned long end)
  275. {
  276. if (outer_cache.flush_range)
  277. outer_cache.flush_range(start, end);
  278. }
  279. #else
  280. static inline void outer_inv_range(unsigned long start, unsigned long end)
  281. { }
  282. static inline void outer_clean_range(unsigned long start, unsigned long end)
  283. { }
  284. static inline void outer_flush_range(unsigned long start, unsigned long end)
  285. { }
  286. #endif
  287. /*
  288. * Copy user data from/to a page which is mapped into a different
  289. * processes address space. Really, we want to allow our "user
  290. * space" model to handle this.
  291. */
  292. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  293. do { \
  294. memcpy(dst, src, len); \
  295. flush_ptrace_access(vma, page, vaddr, dst, len, 1);\
  296. } while (0)
  297. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  298. do { \
  299. memcpy(dst, src, len); \
  300. } while (0)
  301. /*
  302. * Convert calls to our calling convention.
  303. */
  304. #define flush_cache_all() __cpuc_flush_kern_all()
  305. static inline void vivt_flush_cache_mm(struct mm_struct *mm)
  306. {
  307. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
  308. __cpuc_flush_user_all();
  309. }
  310. static inline void
  311. vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  312. {
  313. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
  314. __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
  315. vma->vm_flags);
  316. }
  317. static inline void
  318. vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
  319. {
  320. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  321. unsigned long addr = user_addr & PAGE_MASK;
  322. __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
  323. }
  324. }
  325. static inline void
  326. vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  327. unsigned long uaddr, void *kaddr,
  328. unsigned long len, int write)
  329. {
  330. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  331. unsigned long addr = (unsigned long)kaddr;
  332. __cpuc_coherent_kern_range(addr, addr + len);
  333. }
  334. }
  335. #ifndef CONFIG_CPU_CACHE_VIPT
  336. #define flush_cache_mm(mm) \
  337. vivt_flush_cache_mm(mm)
  338. #define flush_cache_range(vma,start,end) \
  339. vivt_flush_cache_range(vma,start,end)
  340. #define flush_cache_page(vma,addr,pfn) \
  341. vivt_flush_cache_page(vma,addr,pfn)
  342. #define flush_ptrace_access(vma,page,ua,ka,len,write) \
  343. vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
  344. #else
  345. extern void flush_cache_mm(struct mm_struct *mm);
  346. extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
  347. extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn);
  348. extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  349. unsigned long uaddr, void *kaddr,
  350. unsigned long len, int write);
  351. #endif
  352. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  353. /*
  354. * flush_cache_user_range is used when we want to ensure that the
  355. * Harvard caches are synchronised for the user space address range.
  356. * This is used for the ARM private sys_cacheflush system call.
  357. */
  358. #define flush_cache_user_range(vma,start,end) \
  359. __cpuc_coherent_user_range((start) & PAGE_MASK, PAGE_ALIGN(end))
  360. /*
  361. * Perform necessary cache operations to ensure that data previously
  362. * stored within this range of addresses can be executed by the CPU.
  363. */
  364. #define flush_icache_range(s,e) __cpuc_coherent_kern_range(s,e)
  365. /*
  366. * Perform necessary cache operations to ensure that the TLB will
  367. * see data written in the specified area.
  368. */
  369. #define clean_dcache_area(start,size) cpu_dcache_clean_area(start, size)
  370. /*
  371. * flush_dcache_page is used when the kernel has written to the page
  372. * cache page at virtual address page->virtual.
  373. *
  374. * If this page isn't mapped (ie, page_mapping == NULL), or it might
  375. * have userspace mappings, then we _must_ always clean + invalidate
  376. * the dcache entries associated with the kernel mapping.
  377. *
  378. * Otherwise we can defer the operation, and clean the cache when we are
  379. * about to change to user space. This is the same method as used on SPARC64.
  380. * See update_mmu_cache for the user space part.
  381. */
  382. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  383. extern void flush_dcache_page(struct page *);
  384. static inline void __flush_icache_all(void)
  385. {
  386. #ifdef CONFIG_ARM_ERRATA_411920
  387. extern void v6_icache_inval_all(void);
  388. v6_icache_inval_all();
  389. #else
  390. asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n"
  391. :
  392. : "r" (0));
  393. #endif
  394. }
  395. static inline void flush_kernel_vmap_range(void *addr, int size)
  396. {
  397. if ((cache_is_vivt() || cache_is_vipt_aliasing()))
  398. __cpuc_flush_dcache_area(addr, (size_t)size);
  399. }
  400. static inline void invalidate_kernel_vmap_range(void *addr, int size)
  401. {
  402. if ((cache_is_vivt() || cache_is_vipt_aliasing()))
  403. __cpuc_flush_dcache_area(addr, (size_t)size);
  404. }
  405. #define ARCH_HAS_FLUSH_ANON_PAGE
  406. static inline void flush_anon_page(struct vm_area_struct *vma,
  407. struct page *page, unsigned long vmaddr)
  408. {
  409. extern void __flush_anon_page(struct vm_area_struct *vma,
  410. struct page *, unsigned long);
  411. if (PageAnon(page))
  412. __flush_anon_page(vma, page, vmaddr);
  413. }
  414. #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  415. static inline void flush_kernel_dcache_page(struct page *page)
  416. {
  417. /* highmem pages are always flushed upon kunmap already */
  418. if ((cache_is_vivt() || cache_is_vipt_aliasing()) && !PageHighMem(page))
  419. __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
  420. }
  421. #define flush_dcache_mmap_lock(mapping) \
  422. spin_lock_irq(&(mapping)->tree_lock)
  423. #define flush_dcache_mmap_unlock(mapping) \
  424. spin_unlock_irq(&(mapping)->tree_lock)
  425. #define flush_icache_user_range(vma,page,addr,len) \
  426. flush_dcache_page(page)
  427. /*
  428. * We don't appear to need to do anything here. In fact, if we did, we'd
  429. * duplicate cache flushing elsewhere performed by flush_dcache_page().
  430. */
  431. #define flush_icache_page(vma,page) do { } while (0)
  432. /*
  433. * flush_cache_vmap() is used when creating mappings (eg, via vmap,
  434. * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
  435. * caches, since the direct-mappings of these pages may contain cached
  436. * data, we need to do a full cache flush to ensure that writebacks
  437. * don't corrupt data placed into these pages via the new mappings.
  438. */
  439. static inline void flush_cache_vmap(unsigned long start, unsigned long end)
  440. {
  441. if (!cache_is_vipt_nonaliasing())
  442. flush_cache_all();
  443. else
  444. /*
  445. * set_pte_at() called from vmap_pte_range() does not
  446. * have a DSB after cleaning the cache line.
  447. */
  448. dsb();
  449. }
  450. static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
  451. {
  452. if (!cache_is_vipt_nonaliasing())
  453. flush_cache_all();
  454. }
  455. #endif