cacheflush.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. #ifndef __ASM_SH64_CACHEFLUSH_H
  2. #define __ASM_SH64_CACHEFLUSH_H
  3. #ifndef __ASSEMBLY__
  4. #include <asm/page.h>
  5. struct vm_area_struct;
  6. struct page;
  7. struct mm_struct;
  8. extern void flush_cache_all(void);
  9. extern void flush_cache_mm(struct mm_struct *mm);
  10. extern void flush_cache_sigtramp(unsigned long start, unsigned long end);
  11. extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
  12. unsigned long end);
  13. extern void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn);
  14. extern void flush_dcache_page(struct page *pg);
  15. extern void flush_icache_range(unsigned long start, unsigned long end);
  16. extern void flush_icache_user_range(struct vm_area_struct *vma,
  17. struct page *page, unsigned long addr,
  18. int len);
  19. #define flush_dcache_mmap_lock(mapping) do { } while (0)
  20. #define flush_dcache_mmap_unlock(mapping) do { } while (0)
  21. #define flush_cache_vmap(start, end) flush_cache_all()
  22. #define flush_cache_vunmap(start, end) flush_cache_all()
  23. #define flush_icache_page(vma, page) do { } while (0)
  24. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  25. do { \
  26. flush_cache_page(vma, vaddr, page_to_pfn(page));\
  27. memcpy(dst, src, len); \
  28. flush_icache_user_range(vma, page, vaddr, len); \
  29. } while (0)
  30. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  31. do { \
  32. flush_cache_page(vma, vaddr, page_to_pfn(page));\
  33. memcpy(dst, src, len); \
  34. } while (0)
  35. #endif /* __ASSEMBLY__ */
  36. #endif /* __ASM_SH64_CACHEFLUSH_H */