flush.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244
  1. /*
  2. * linux/arch/arm/mm/flush.c
  3. *
  4. * Copyright (C) 1995-2002 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/module.h>
  11. #include <linux/mm.h>
  12. #include <linux/pagemap.h>
  13. #include <asm/cacheflush.h>
  14. #include <asm/cachetype.h>
  15. #include <asm/system.h>
  16. #include <asm/tlbflush.h>
  17. #include "mm.h"
  18. #ifdef CONFIG_CPU_CACHE_VIPT
  19. #define ALIAS_FLUSH_START 0xffff4000
  20. static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
  21. {
  22. unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
  23. const int zero = 0;
  24. set_pte_ext(TOP_PTE(to), pfn_pte(pfn, PAGE_KERNEL), 0);
  25. flush_tlb_kernel_page(to);
  26. asm( "mcrr p15, 0, %1, %0, c14\n"
  27. " mcr p15, 0, %2, c7, c10, 4"
  28. :
  29. : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
  30. : "cc");
  31. __flush_icache_all();
  32. }
  33. void flush_cache_mm(struct mm_struct *mm)
  34. {
  35. if (cache_is_vivt()) {
  36. vivt_flush_cache_mm(mm);
  37. return;
  38. }
  39. if (cache_is_vipt_aliasing()) {
  40. asm( "mcr p15, 0, %0, c7, c14, 0\n"
  41. " mcr p15, 0, %0, c7, c10, 4"
  42. :
  43. : "r" (0)
  44. : "cc");
  45. __flush_icache_all();
  46. }
  47. }
  48. void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
  49. {
  50. if (cache_is_vivt()) {
  51. vivt_flush_cache_range(vma, start, end);
  52. return;
  53. }
  54. if (cache_is_vipt_aliasing()) {
  55. asm( "mcr p15, 0, %0, c7, c14, 0\n"
  56. " mcr p15, 0, %0, c7, c10, 4"
  57. :
  58. : "r" (0)
  59. : "cc");
  60. __flush_icache_all();
  61. }
  62. }
  63. void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
  64. {
  65. if (cache_is_vivt()) {
  66. vivt_flush_cache_page(vma, user_addr, pfn);
  67. return;
  68. }
  69. if (cache_is_vipt_aliasing())
  70. flush_pfn_alias(pfn, user_addr);
  71. }
  72. void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  73. unsigned long uaddr, void *kaddr,
  74. unsigned long len, int write)
  75. {
  76. if (cache_is_vivt()) {
  77. vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
  78. return;
  79. }
  80. if (cache_is_vipt_aliasing()) {
  81. flush_pfn_alias(page_to_pfn(page), uaddr);
  82. return;
  83. }
  84. /* VIPT non-aliasing cache */
  85. if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) &&
  86. vma->vm_flags & VM_EXEC) {
  87. unsigned long addr = (unsigned long)kaddr;
  88. /* only flushing the kernel mapping on non-aliasing VIPT */
  89. __cpuc_coherent_kern_range(addr, addr + len);
  90. }
  91. }
  92. #else
  93. #define flush_pfn_alias(pfn,vaddr) do { } while (0)
  94. #endif
  95. void __flush_dcache_page(struct address_space *mapping, struct page *page)
  96. {
  97. /*
  98. * Writeback any data associated with the kernel mapping of this
  99. * page. This ensures that data in the physical page is mutually
  100. * coherent with the kernels mapping.
  101. */
  102. #ifdef CONFIG_HIGHMEM
  103. /*
  104. * kmap_atomic() doesn't set the page virtual address, and
  105. * kunmap_atomic() takes care of cache flushing already.
  106. */
  107. if (page_address(page))
  108. #endif
  109. __cpuc_flush_dcache_page(page_address(page));
  110. /*
  111. * If this is a page cache page, and we have an aliasing VIPT cache,
  112. * we only need to do one flush - which would be at the relevant
  113. * userspace colour, which is congruent with page->index.
  114. */
  115. if (mapping && cache_is_vipt_aliasing())
  116. flush_pfn_alias(page_to_pfn(page),
  117. page->index << PAGE_CACHE_SHIFT);
  118. }
  119. static void __flush_dcache_aliases(struct address_space *mapping, struct page *page)
  120. {
  121. struct mm_struct *mm = current->active_mm;
  122. struct vm_area_struct *mpnt;
  123. struct prio_tree_iter iter;
  124. pgoff_t pgoff;
  125. /*
  126. * There are possible user space mappings of this page:
  127. * - VIVT cache: we need to also write back and invalidate all user
  128. * data in the current VM view associated with this page.
  129. * - aliasing VIPT: we only need to find one mapping of this page.
  130. */
  131. pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
  132. flush_dcache_mmap_lock(mapping);
  133. vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
  134. unsigned long offset;
  135. /*
  136. * If this VMA is not in our MM, we can ignore it.
  137. */
  138. if (mpnt->vm_mm != mm)
  139. continue;
  140. if (!(mpnt->vm_flags & VM_MAYSHARE))
  141. continue;
  142. offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
  143. flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page));
  144. }
  145. flush_dcache_mmap_unlock(mapping);
  146. }
  147. /*
  148. * Ensure cache coherency between kernel mapping and userspace mapping
  149. * of this page.
  150. *
  151. * We have three cases to consider:
  152. * - VIPT non-aliasing cache: fully coherent so nothing required.
  153. * - VIVT: fully aliasing, so we need to handle every alias in our
  154. * current VM view.
  155. * - VIPT aliasing: need to handle one alias in our current VM view.
  156. *
  157. * If we need to handle aliasing:
  158. * If the page only exists in the page cache and there are no user
  159. * space mappings, we can be lazy and remember that we may have dirty
  160. * kernel cache lines for later. Otherwise, we assume we have
  161. * aliasing mappings.
  162. *
  163. * Note that we disable the lazy flush for SMP.
  164. */
  165. void flush_dcache_page(struct page *page)
  166. {
  167. struct address_space *mapping = page_mapping(page);
  168. #ifndef CONFIG_SMP
  169. if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
  170. set_bit(PG_dcache_dirty, &page->flags);
  171. else
  172. #endif
  173. {
  174. __flush_dcache_page(mapping, page);
  175. if (mapping && cache_is_vivt())
  176. __flush_dcache_aliases(mapping, page);
  177. else if (mapping)
  178. __flush_icache_all();
  179. }
  180. }
  181. EXPORT_SYMBOL(flush_dcache_page);
  182. /*
  183. * Flush an anonymous page so that users of get_user_pages()
  184. * can safely access the data. The expected sequence is:
  185. *
  186. * get_user_pages()
  187. * -> flush_anon_page
  188. * memcpy() to/from page
  189. * if written to page, flush_dcache_page()
  190. */
  191. void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  192. {
  193. unsigned long pfn;
  194. /* VIPT non-aliasing caches need do nothing */
  195. if (cache_is_vipt_nonaliasing())
  196. return;
  197. /*
  198. * Write back and invalidate userspace mapping.
  199. */
  200. pfn = page_to_pfn(page);
  201. if (cache_is_vivt()) {
  202. flush_cache_page(vma, vmaddr, pfn);
  203. } else {
  204. /*
  205. * For aliasing VIPT, we can flush an alias of the
  206. * userspace address only.
  207. */
  208. flush_pfn_alias(pfn, vmaddr);
  209. }
  210. /*
  211. * Invalidate kernel mapping. No data should be contained
  212. * in this mapping of the page. FIXME: this is overkill
  213. * since we actually ask for a write-back and invalidate.
  214. */
  215. __cpuc_flush_dcache_page(page_address(page));
  216. }