cacheflush.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147
  1. #ifndef _PARISC_CACHEFLUSH_H
  2. #define _PARISC_CACHEFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/uaccess.h>
  5. /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  6. * Unfortunately, that doesn't apply to PA-RISC. */
  7. /* Internal implementation */
  8. void flush_data_cache_local(void *); /* flushes local data-cache only */
  9. void flush_instruction_cache_local(void *); /* flushes local code-cache only */
  10. #ifdef CONFIG_SMP
  11. void flush_data_cache(void); /* flushes data-cache only (all processors) */
  12. void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
  13. #else
  14. #define flush_data_cache() flush_data_cache_local(NULL)
  15. #define flush_instruction_cache() flush_instruction_cache_local(NULL)
  16. #endif
  17. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  18. void flush_user_icache_range_asm(unsigned long, unsigned long);
  19. void flush_kernel_icache_range_asm(unsigned long, unsigned long);
  20. void flush_user_dcache_range_asm(unsigned long, unsigned long);
  21. void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
  22. void flush_kernel_dcache_page_asm(void *);
  23. void flush_kernel_icache_page(void *);
  24. void flush_user_dcache_page(unsigned long);
  25. void flush_user_icache_page(unsigned long);
  26. void flush_user_dcache_range(unsigned long, unsigned long);
  27. void flush_user_icache_range(unsigned long, unsigned long);
  28. /* Cache flush operations */
  29. void flush_cache_all_local(void);
  30. void flush_cache_all(void);
  31. void flush_cache_mm(struct mm_struct *mm);
  32. #define flush_kernel_dcache_range(start,size) \
  33. flush_kernel_dcache_range_asm((start), (start)+(size));
  34. /* vmap range flushes and invalidates. Architecturally, we don't need
  35. * the invalidate, because the CPU should refuse to speculate once an
  36. * area has been flushed, so invalidate is left empty */
  37. static inline void flush_kernel_vmap_range(void *vaddr, int size)
  38. {
  39. unsigned long start = (unsigned long)vaddr;
  40. flush_kernel_dcache_range_asm(start, start + size);
  41. }
  42. static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
  43. {
  44. }
  45. #define flush_cache_vmap(start, end) flush_cache_all()
  46. #define flush_cache_vunmap(start, end) flush_cache_all()
  47. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  48. extern void flush_dcache_page(struct page *page);
  49. #define flush_dcache_mmap_lock(mapping) \
  50. spin_lock_irq(&(mapping)->tree_lock)
  51. #define flush_dcache_mmap_unlock(mapping) \
  52. spin_unlock_irq(&(mapping)->tree_lock)
  53. #define flush_icache_page(vma,page) do { \
  54. flush_kernel_dcache_page(page); \
  55. flush_kernel_icache_page(page_address(page)); \
  56. } while (0)
  57. #define flush_icache_range(s,e) do { \
  58. flush_kernel_dcache_range_asm(s,e); \
  59. flush_kernel_icache_range_asm(s,e); \
  60. } while (0)
  61. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  62. do { \
  63. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  64. memcpy(dst, src, len); \
  65. flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
  66. } while (0)
  67. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  68. do { \
  69. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  70. memcpy(dst, src, len); \
  71. } while (0)
  72. void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
  73. void flush_cache_range(struct vm_area_struct *vma,
  74. unsigned long start, unsigned long end);
  75. #define ARCH_HAS_FLUSH_ANON_PAGE
  76. static inline void
  77. flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  78. {
  79. if (PageAnon(page))
  80. flush_user_dcache_page(vmaddr);
  81. }
  82. #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  83. void flush_kernel_dcache_page_addr(void *addr);
  84. static inline void flush_kernel_dcache_page(struct page *page)
  85. {
  86. flush_kernel_dcache_page_addr(page_address(page));
  87. }
  88. #ifdef CONFIG_DEBUG_RODATA
  89. void mark_rodata_ro(void);
  90. #endif
  91. #ifdef CONFIG_PA8X00
  92. /* Only pa8800, pa8900 needs this */
  93. #include <asm/kmap_types.h>
  94. #define ARCH_HAS_KMAP
  95. void kunmap_parisc(void *addr);
  96. static inline void *kmap(struct page *page)
  97. {
  98. might_sleep();
  99. return page_address(page);
  100. }
  101. #define kunmap(page) kunmap_parisc(page_address(page))
  102. static inline void *kmap_atomic(struct page *page, enum km_type idx)
  103. {
  104. pagefault_disable();
  105. return page_address(page);
  106. }
  107. static inline void kunmap_atomic(void *addr, enum km_type idx)
  108. {
  109. kunmap_parisc(addr);
  110. pagefault_enable();
  111. }
  112. #define kmap_atomic_prot(page, idx, prot) kmap_atomic(page, idx)
  113. #define kmap_atomic_pfn(pfn, idx) kmap_atomic(pfn_to_page(pfn), (idx))
  114. #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
  115. #endif
  116. #endif /* _PARISC_CACHEFLUSH_H */