cacheflush.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. #ifndef _PARISC_CACHEFLUSH_H
  2. #define _PARISC_CACHEFLUSH_H
  3. #include <linux/mm.h>
  4. #include <asm/cache.h> /* for flush_user_dcache_range_asm() proto */
  5. /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  6. * Unfortunately, that doesn't apply to PA-RISC. */
  7. /* Cache flush operations */
  8. #ifdef CONFIG_SMP
  9. #define flush_cache_mm(mm) flush_cache_all()
  10. #else
  11. #define flush_cache_mm(mm) flush_cache_all_local()
  12. #endif
  13. #define flush_kernel_dcache_range(start,size) \
  14. flush_kernel_dcache_range_asm((start), (start)+(size));
  15. extern void flush_cache_all_local(void);
  16. static inline void cacheflush_h_tmp_function(void *dummy)
  17. {
  18. flush_cache_all_local();
  19. }
  20. static inline void flush_cache_all(void)
  21. {
  22. on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1);
  23. }
  24. #define flush_cache_vmap(start, end) flush_cache_all()
  25. #define flush_cache_vunmap(start, end) flush_cache_all()
  26. extern int parisc_cache_flush_threshold;
  27. void parisc_setup_cache_timing(void);
  28. static inline void
  29. flush_user_dcache_range(unsigned long start, unsigned long end)
  30. {
  31. if ((end - start) < parisc_cache_flush_threshold)
  32. flush_user_dcache_range_asm(start,end);
  33. else
  34. flush_data_cache();
  35. }
  36. static inline void
  37. flush_user_icache_range(unsigned long start, unsigned long end)
  38. {
  39. if ((end - start) < parisc_cache_flush_threshold)
  40. flush_user_icache_range_asm(start,end);
  41. else
  42. flush_instruction_cache();
  43. }
  44. extern void flush_dcache_page(struct page *page);
  45. #define flush_dcache_mmap_lock(mapping) \
  46. write_lock_irq(&(mapping)->tree_lock)
  47. #define flush_dcache_mmap_unlock(mapping) \
  48. write_unlock_irq(&(mapping)->tree_lock)
  49. #define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page); flush_kernel_icache_page(page_address(page)); } while (0)
  50. #define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
  51. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  52. do { \
  53. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  54. memcpy(dst, src, len); \
  55. flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
  56. } while (0)
  57. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  58. do { \
  59. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  60. memcpy(dst, src, len); \
  61. } while (0)
  62. static inline void flush_cache_range(struct vm_area_struct *vma,
  63. unsigned long start, unsigned long end)
  64. {
  65. int sr3;
  66. if (!vma->vm_mm->context) {
  67. BUG();
  68. return;
  69. }
  70. sr3 = mfsp(3);
  71. if (vma->vm_mm->context == sr3) {
  72. flush_user_dcache_range(start,end);
  73. flush_user_icache_range(start,end);
  74. } else {
  75. flush_cache_all();
  76. }
  77. }
  78. /* Simple function to work out if we have an existing address translation
  79. * for a user space vma. */
  80. static inline int translation_exists(struct vm_area_struct *vma,
  81. unsigned long addr, unsigned long pfn)
  82. {
  83. pgd_t *pgd = pgd_offset(vma->vm_mm, addr);
  84. pmd_t *pmd;
  85. pte_t pte;
  86. if(pgd_none(*pgd))
  87. return 0;
  88. pmd = pmd_offset(pgd, addr);
  89. if(pmd_none(*pmd) || pmd_bad(*pmd))
  90. return 0;
  91. /* We cannot take the pte lock here: flush_cache_page is usually
  92. * called with pte lock already held. Whereas flush_dcache_page
  93. * takes flush_dcache_mmap_lock, which is lower in the hierarchy:
  94. * the vma itself is secure, but the pte might come or go racily.
  95. */
  96. pte = *pte_offset_map(pmd, addr);
  97. /* But pte_unmap() does nothing on this architecture */
  98. /* Filter out coincidental file entries and swap entries */
  99. if (!(pte_val(pte) & (_PAGE_FLUSH|_PAGE_PRESENT)))
  100. return 0;
  101. return pte_pfn(pte) == pfn;
  102. }
  103. /* Private function to flush a page from the cache of a non-current
  104. * process. cr25 contains the Page Directory of the current user
  105. * process; we're going to hijack both it and the user space %sr3 to
  106. * temporarily make the non-current process current. We have to do
  107. * this because cache flushing may cause a non-access tlb miss which
  108. * the handlers have to fill in from the pgd of the non-current
  109. * process. */
  110. static inline void
  111. flush_user_cache_page_non_current(struct vm_area_struct *vma,
  112. unsigned long vmaddr)
  113. {
  114. /* save the current process space and pgd */
  115. unsigned long space = mfsp(3), pgd = mfctl(25);
  116. /* we don't mind taking interrups since they may not
  117. * do anything with user space, but we can't
  118. * be preempted here */
  119. preempt_disable();
  120. /* make us current */
  121. mtctl(__pa(vma->vm_mm->pgd), 25);
  122. mtsp(vma->vm_mm->context, 3);
  123. flush_user_dcache_page(vmaddr);
  124. if(vma->vm_flags & VM_EXEC)
  125. flush_user_icache_page(vmaddr);
  126. /* put the old current process back */
  127. mtsp(space, 3);
  128. mtctl(pgd, 25);
  129. preempt_enable();
  130. }
  131. static inline void
  132. __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
  133. {
  134. if (likely(vma->vm_mm->context == mfsp(3))) {
  135. flush_user_dcache_page(vmaddr);
  136. if (vma->vm_flags & VM_EXEC)
  137. flush_user_icache_page(vmaddr);
  138. } else {
  139. flush_user_cache_page_non_current(vma, vmaddr);
  140. }
  141. }
  142. static inline void
  143. flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
  144. {
  145. BUG_ON(!vma->vm_mm->context);
  146. if (likely(translation_exists(vma, vmaddr, pfn)))
  147. __flush_cache_page(vma, vmaddr);
  148. }
  149. static inline void
  150. flush_anon_page(struct page *page, unsigned long vmaddr)
  151. {
  152. if (PageAnon(page))
  153. flush_user_dcache_page(vmaddr);
  154. }
  155. #define ARCH_HAS_FLUSH_ANON_PAGE
  156. static inline void
  157. flush_kernel_dcache_page(struct page *page)
  158. {
  159. flush_kernel_dcache_page_asm(page_address(page));
  160. }
  161. #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  162. #ifdef CONFIG_DEBUG_RODATA
  163. void mark_rodata_ro(void);
  164. #endif
  165. #endif /* _PARISC_CACHEFLUSH_H */