flush.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328
  1. /*
  2. * linux/arch/arm/mm/flush.c
  3. *
  4. * Copyright (C) 1995-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/mm.h>
  12. #include <linux/pagemap.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/cachetype.h>
  15. #include <asm/highmem.h>
  16. #include <asm/smp_plat.h>
  17. #include <asm/system.h>
  18. #include <asm/tlbflush.h>
  19. #include <asm/smp_plat.h>
  20. #include "mm.h"
  21. #ifdef CONFIG_CPU_CACHE_VIPT
  22. #define ALIAS_FLUSH_START 0xffff4000
  23. static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
  24. {
  25. unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
  26. const int zero = 0;
  27. set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
  28. flush_tlb_kernel_page(to);
  29. asm( "mcrr p15, 0, %1, %0, c14\n"
  30. " mcr p15, 0, %2, c7, c10, 4"
  31. :
  32. : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
  33. : "cc");
  34. }
  35. void flush_cache_mm(struct mm_struct *mm)
  36. {
  37. if (cache_is_vivt()) {
  38. vivt_flush_cache_mm(mm);
  39. return;
  40. }
  41. if (cache_is_vipt_aliasing()) {
  42. asm( "mcr p15, 0, %0, c7, c14, 0\n"
  43. " mcr p15, 0, %0, c7, c10, 4"
  44. :
  45. : "r" (0)
  46. : "cc");
  47. }
  48. }
  49. void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  50. {
  51. if (cache_is_vivt()) {
  52. vivt_flush_cache_range(vma, start, end);
  53. return;
  54. }
  55. if (cache_is_vipt_aliasing()) {
  56. asm( "mcr p15, 0, %0, c7, c14, 0\n"
  57. " mcr p15, 0, %0, c7, c10, 4"
  58. :
  59. : "r" (0)
  60. : "cc");
  61. }
  62. if (vma->vm_flags & VM_EXEC)
  63. __flush_icache_all();
  64. }
  65. void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
  66. {
  67. if (cache_is_vivt()) {
  68. vivt_flush_cache_page(vma, user_addr, pfn);
  69. return;
  70. }
  71. if (cache_is_vipt_aliasing()) {
  72. flush_pfn_alias(pfn, user_addr);
  73. __flush_icache_all();
  74. }
  75. if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
  76. __flush_icache_all();
  77. }
  78. #else
  79. #define flush_pfn_alias(pfn,vaddr) do { } while (0)
  80. #endif
  81. static void flush_ptrace_access_other(void *args)
  82. {
  83. __flush_icache_all();
  84. }
  85. static
  86. void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  87. unsigned long uaddr, void *kaddr, unsigned long len)
  88. {
  89. if (cache_is_vivt()) {
  90. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
  91. unsigned long addr = (unsigned long)kaddr;
  92. __cpuc_coherent_kern_range(addr, addr + len);
  93. }
  94. return;
  95. }
  96. if (cache_is_vipt_aliasing()) {
  97. flush_pfn_alias(page_to_pfn(page), uaddr);
  98. __flush_icache_all();
  99. return;
  100. }
  101. /* VIPT non-aliasing cache */
  102. if (vma->vm_flags & VM_EXEC) {
  103. unsigned long addr = (unsigned long)kaddr;
  104. __cpuc_coherent_kern_range(addr, addr + len);
  105. if (cache_ops_need_broadcast())
  106. smp_call_function(flush_ptrace_access_other,
  107. NULL, 1);
  108. }
  109. }
  110. /*
  111. * Copy user data from/to a page which is mapped into a different
  112. * processes address space. Really, we want to allow our "user
  113. * space" model to handle this.
  114. *
  115. * Note that this code needs to run on the current CPU.
  116. */
  117. void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
  118. unsigned long uaddr, void *dst, const void *src,
  119. unsigned long len)
  120. {
  121. #ifdef CONFIG_SMP
  122. preempt_disable();
  123. #endif
  124. memcpy(dst, src, len);
  125. flush_ptrace_access(vma, page, uaddr, dst, len);
  126. #ifdef CONFIG_SMP
  127. preempt_enable();
  128. #endif
  129. }
  130. void __flush_dcache_page(struct address_space *mapping, struct page *page)
  131. {
  132. /*
  133. * Writeback any data associated with the kernel mapping of this
  134. * page. This ensures that data in the physical page is mutually
  135. * coherent with the kernels mapping.
  136. */
  137. if (!PageHighMem(page)) {
  138. __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
  139. } else {
  140. void *addr = kmap_high_get(page);
  141. if (addr) {
  142. __cpuc_flush_dcache_area(addr, PAGE_SIZE);
  143. kunmap_high(page);
  144. } else if (cache_is_vipt()) {
  145. pte_t saved_pte;
  146. addr = kmap_high_l1_vipt(page, &saved_pte);
  147. __cpuc_flush_dcache_area(addr, PAGE_SIZE);
  148. kunmap_high_l1_vipt(page, saved_pte);
  149. }
  150. }
  151. /*
  152. * If this is a page cache page, and we have an aliasing VIPT cache,
  153. * we only need to do one flush - which would be at the relevant
  154. * userspace colour, which is congruent with page->index.
  155. */
  156. if (mapping && cache_is_vipt_aliasing())
  157. flush_pfn_alias(page_to_pfn(page),
  158. page->index << PAGE_CACHE_SHIFT);
  159. }
  160. static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
  161. {
  162. struct mm_struct *mm = current->active_mm;
  163. struct vm_area_struct *mpnt;
  164. struct prio_tree_iter iter;
  165. pgoff_t pgoff;
  166. /*
  167. * There are possible user space mappings of this page:
  168. * - VIVT cache: we need to also write back and invalidate all user
  169. * data in the current VM view associated with this page.
  170. * - aliasing VIPT: we only need to find one mapping of this page.
  171. */
  172. pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  173. flush_dcache_mmap_lock(mapping);
  174. vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
  175. unsigned long offset;
  176. /*
  177. * If this VMA is not in our MM, we can ignore it.
  178. */
  179. if (mpnt->vm_mm != mm)
  180. continue;
  181. if (!(mpnt->vm_flags & VM_MAYSHARE))
  182. continue;
  183. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  184. flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
  185. }
  186. flush_dcache_mmap_unlock(mapping);
  187. }
  188. #if __LINUX_ARM_ARCH__ >= 6
  189. void __sync_icache_dcache(pte_t pteval)
  190. {
  191. unsigned long pfn;
  192. struct page *page;
  193. struct address_space *mapping;
  194. if (!pte_present_user(pteval))
  195. return;
  196. if (cache_is_vipt_nonaliasing() && !pte_exec(pteval))
  197. /* only flush non-aliasing VIPT caches for exec mappings */
  198. return;
  199. pfn = pte_pfn(pteval);
  200. if (!pfn_valid(pfn))
  201. return;
  202. page = pfn_to_page(pfn);
  203. if (cache_is_vipt_aliasing())
  204. mapping = page_mapping(page);
  205. else
  206. mapping = NULL;
  207. if (!test_and_set_bit(PG_dcache_clean, &page->flags))
  208. __flush_dcache_page(mapping, page);
  209. /* pte_exec() already checked above for non-aliasing VIPT cache */
  210. if (cache_is_vipt_nonaliasing() || pte_exec(pteval))
  211. __flush_icache_all();
  212. }
  213. #endif
  214. /*
  215. * Ensure cache coherency between kernel mapping and userspace mapping
  216. * of this page.
  217. *
  218. * We have three cases to consider:
  219. * - VIPT non-aliasing cache: fully coherent so nothing required.
  220. * - VIVT: fully aliasing, so we need to handle every alias in our
  221. * current VM view.
  222. * - VIPT aliasing: need to handle one alias in our current VM view.
  223. *
  224. * If we need to handle aliasing:
  225. * If the page only exists in the page cache and there are no user
  226. * space mappings, we can be lazy and remember that we may have dirty
  227. * kernel cache lines for later. Otherwise, we assume we have
  228. * aliasing mappings.
  229. *
  230. * Note that we disable the lazy flush for SMP.
  231. */
  232. void flush_dcache_page(struct page *page)
  233. {
  234. struct address_space *mapping;
  235. /*
  236. * The zero page is never written to, so never has any dirty
  237. * cache lines, and therefore never needs to be flushed.
  238. */
  239. if (page == ZERO_PAGE(0))
  240. return;
  241. mapping = page_mapping(page);
  242. if (!cache_ops_need_broadcast() &&
  243. mapping && !mapping_mapped(mapping))
  244. clear_bit(PG_dcache_clean, &page->flags);
  245. else {
  246. __flush_dcache_page(mapping, page);
  247. if (mapping && cache_is_vivt())
  248. __flush_dcache_aliases(mapping, page);
  249. else if (mapping)
  250. __flush_icache_all();
  251. set_bit(PG_dcache_clean, &page->flags);
  252. }
  253. }
  254. EXPORT_SYMBOL(flush_dcache_page);
  255. /*
  256. * Flush an anonymous page so that users of get_user_pages()
  257. * can safely access the data. The expected sequence is:
  258. *
  259. * get_user_pages()
  260. * -> flush_anon_page
  261. * memcpy() to/from page
  262. * if written to page, flush_dcache_page()
  263. */
  264. void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  265. {
  266. unsigned long pfn;
  267. /* VIPT non-aliasing caches need do nothing */
  268. if (cache_is_vipt_nonaliasing())
  269. return;
  270. /*
  271. * Write back and invalidate userspace mapping.
  272. */
  273. pfn = page_to_pfn(page);
  274. if (cache_is_vivt()) {
  275. flush_cache_page(vma, vmaddr, pfn);
  276. } else {
  277. /*
  278. * For aliasing VIPT, we can flush an alias of the
  279. * userspace address only.
  280. */
  281. flush_pfn_alias(pfn, vmaddr);
  282. __flush_icache_all();
  283. }
  284. /*
  285. * Invalidate kernel mapping. No data should be contained
  286. * in this mapping of the page. FIXME: this is overkill
  287. * since we actually ask for a write-back and invalidate.
  288. */
  289. __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
  290. }