cacheflush.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. #ifndef _PARISC_CACHEFLUSH_H
  2. #define _PARISC_CACHEFLUSH_H
  3. #include <linux/mm.h>
  4. #include <linux/uaccess.h>
  5. /* The usual comment is "Caches aren't brain-dead on the <architecture>".
  6. * Unfortunately, that doesn't apply to PA-RISC. */
  7. /* Internal implementation */
  8. void flush_data_cache_local(void *); /* flushes local data-cache only */
  9. void flush_instruction_cache_local(void *); /* flushes local code-cache only */
  10. #ifdef CONFIG_SMP
  11. void flush_data_cache(void); /* flushes data-cache only (all processors) */
  12. void flush_instruction_cache(void); /* flushes i-cache only (all processors) */
  13. #else
  14. #define flush_data_cache() flush_data_cache_local(NULL)
  15. #define flush_instruction_cache() flush_instruction_cache_local(NULL)
  16. #endif
  17. #define flush_cache_dup_mm(mm) flush_cache_mm(mm)
  18. void flush_user_icache_range_asm(unsigned long, unsigned long);
  19. void flush_kernel_icache_range_asm(unsigned long, unsigned long);
  20. void flush_user_dcache_range_asm(unsigned long, unsigned long);
  21. void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
  22. void flush_kernel_dcache_page_asm(void *);
  23. void flush_kernel_icache_page(void *);
  24. void flush_user_dcache_range(unsigned long, unsigned long);
  25. void flush_user_icache_range(unsigned long, unsigned long);
  26. /* Cache flush operations */
  27. void flush_cache_all_local(void);
  28. void flush_cache_all(void);
  29. void flush_cache_mm(struct mm_struct *mm);
  30. #define flush_kernel_dcache_range(start,size) \
  31. flush_kernel_dcache_range_asm((start), (start)+(size));
  32. /* vmap range flushes and invalidates. Architecturally, we don't need
  33. * the invalidate, because the CPU should refuse to speculate once an
  34. * area has been flushed, so invalidate is left empty */
  35. static inline void flush_kernel_vmap_range(void *vaddr, int size)
  36. {
  37. unsigned long start = (unsigned long)vaddr;
  38. flush_kernel_dcache_range_asm(start, start + size);
  39. }
  40. static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
  41. {
  42. }
  43. #define flush_cache_vmap(start, end) flush_cache_all()
  44. #define flush_cache_vunmap(start, end) flush_cache_all()
  45. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
  46. extern void flush_dcache_page(struct page *page);
  47. #define flush_dcache_mmap_lock(mapping) \
  48. spin_lock_irq(&(mapping)->tree_lock)
  49. #define flush_dcache_mmap_unlock(mapping) \
  50. spin_unlock_irq(&(mapping)->tree_lock)
  51. #define flush_icache_page(vma,page) do { \
  52. flush_kernel_dcache_page(page); \
  53. flush_kernel_icache_page(page_address(page)); \
  54. } while (0)
  55. #define flush_icache_range(s,e) do { \
  56. flush_kernel_dcache_range_asm(s,e); \
  57. flush_kernel_icache_range_asm(s,e); \
  58. } while (0)
  59. #define copy_to_user_page(vma, page, vaddr, dst, src, len) \
  60. do { \
  61. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  62. memcpy(dst, src, len); \
  63. flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \
  64. } while (0)
  65. #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
  66. do { \
  67. flush_cache_page(vma, vaddr, page_to_pfn(page)); \
  68. memcpy(dst, src, len); \
  69. } while (0)
  70. void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
  71. void flush_cache_range(struct vm_area_struct *vma,
  72. unsigned long start, unsigned long end);
  73. /* defined in pacache.S exported in cache.c used by flush_anon_page */
  74. void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
  75. #define ARCH_HAS_FLUSH_ANON_PAGE
  76. static inline void
  77. flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
  78. {
  79. if (PageAnon(page))
  80. flush_dcache_page_asm(page_to_phys(page), vmaddr);
  81. }
  82. #define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
  83. void flush_kernel_dcache_page_addr(void *addr);
  84. static inline void flush_kernel_dcache_page(struct page *page)
  85. {
  86. flush_kernel_dcache_page_addr(page_address(page));
  87. }
  88. #ifdef CONFIG_DEBUG_RODATA
  89. void mark_rodata_ro(void);
  90. #endif
  91. #ifdef CONFIG_PA8X00
  92. /* Only pa8800, pa8900 needs this */
  93. #include <asm/kmap_types.h>
  94. #define ARCH_HAS_KMAP
  95. void kunmap_parisc(void *addr);
  96. static inline void *kmap(struct page *page)
  97. {
  98. might_sleep();
  99. return page_address(page);
  100. }
  101. #define kunmap(page) kunmap_parisc(page_address(page))
  102. static inline void *__kmap_atomic(struct page *page)
  103. {
  104. pagefault_disable();
  105. return page_address(page);
  106. }
  107. static inline void __kunmap_atomic(void *addr)
  108. {
  109. kunmap_parisc(addr);
  110. pagefault_enable();
  111. }
  112. #define kmap_atomic_prot(page, prot) kmap_atomic(page)
  113. #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
  114. #define kmap_atomic_to_page(ptr) virt_to_page(ptr)
  115. #endif
  116. #endif /* _PARISC_CACHEFLUSH_H */