cacheflush.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #ifndef _ASM_X86_CACHEFLUSH_H
  2. #define _ASM_X86_CACHEFLUSH_H
  3. /* Keep includes the same across arches. */
  4. #include <linux/mm.h>
  5. /* Caches aren't brain-dead on the intel. */
  6. static inline void flush_cache_all(void) { }
  7. static inline void flush_cache_mm(struct mm_struct *mm) { }
  8. static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
  9. static inline void flush_cache_range(struct vm_area_struct *vma,
  10. unsigned long start, unsigned long end) { }
  11. static inline void flush_cache_page(struct vm_area_struct *vma,
  12. unsigned long vmaddr, unsigned long pfn) { }
  13. static inline void flush_dcache_page(struct page *page) { }
  14. static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
  15. static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
  16. static inline void flush_icache_range(unsigned long start,
  17. unsigned long end) { }
  18. static inline void flush_icache_page(struct vm_area_struct *vma,
  19. struct page *page) { }
  20. static inline void flush_icache_user_range(struct vm_area_struct *vma,
  21. struct page *page,
  22. unsigned long addr,
  23. unsigned long len) { }
  24. static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
  25. static inline void flush_cache_vunmap(unsigned long start,
  26. unsigned long end) { }
  27. static inline void copy_to_user_page(struct vm_area_struct *vma,
  28. struct page *page, unsigned long vaddr,
  29. void *dst, const void *src,
  30. unsigned long len)
  31. {
  32. memcpy(dst, src, len);
  33. }
  34. static inline void copy_from_user_page(struct vm_area_struct *vma,
  35. struct page *page, unsigned long vaddr,
  36. void *dst, const void *src,
  37. unsigned long len)
  38. {
  39. memcpy(dst, src, len);
  40. }
  41. #define PG_non_WB PG_arch_1
  42. PAGEFLAG(NonWB, non_WB)
  43. /*
  44. * The set_memory_* API can be used to change various attributes of a virtual
  45. * address range. The attributes include:
  46. * Cachability : UnCached, WriteCombining, WriteBack
  47. * Executability : eXeutable, NoteXecutable
  48. * Read/Write : ReadOnly, ReadWrite
  49. * Presence : NotPresent
  50. *
  51. * Within a catagory, the attributes are mutually exclusive.
  52. *
  53. * The implementation of this API will take care of various aspects that
  54. * are associated with changing such attributes, such as:
  55. * - Flushing TLBs
  56. * - Flushing CPU caches
  57. * - Making sure aliases of the memory behind the mapping don't violate
  58. * coherency rules as defined by the CPU in the system.
  59. *
  60. * What this API does not do:
  61. * - Provide exclusion between various callers - including callers that
  62. * operation on other mappings of the same physical page
  63. * - Restore default attributes when a page is freed
  64. * - Guarantee that mappings other than the requested one are
  65. * in any state, other than that these do not violate rules for
  66. * the CPU you have. Do not depend on any effects on other mappings,
  67. * CPUs other than the one you have may have more relaxed rules.
  68. * The caller is required to take care of these.
  69. */
  70. int _set_memory_uc(unsigned long addr, int numpages);
  71. int _set_memory_wc(unsigned long addr, int numpages);
  72. int _set_memory_wb(unsigned long addr, int numpages);
  73. int set_memory_uc(unsigned long addr, int numpages);
  74. int set_memory_wc(unsigned long addr, int numpages);
  75. int set_memory_wb(unsigned long addr, int numpages);
  76. int set_memory_x(unsigned long addr, int numpages);
  77. int set_memory_nx(unsigned long addr, int numpages);
  78. int set_memory_ro(unsigned long addr, int numpages);
  79. int set_memory_rw(unsigned long addr, int numpages);
  80. int set_memory_np(unsigned long addr, int numpages);
  81. int set_memory_4k(unsigned long addr, int numpages);
  82. int set_memory_array_uc(unsigned long *addr, int addrinarray);
  83. int set_memory_array_wb(unsigned long *addr, int addrinarray);
  84. /*
  85. * For legacy compatibility with the old APIs, a few functions
  86. * are provided that work on a "struct page".
  87. * These functions operate ONLY on the 1:1 kernel mapping of the
  88. * memory that the struct page represents, and internally just
  89. * call the set_memory_* function. See the description of the
  90. * set_memory_* function for more details on conventions.
  91. *
  92. * These APIs should be considered *deprecated* and are likely going to
  93. * be removed in the future.
  94. * The reason for this is the implicit operation on the 1:1 mapping only,
  95. * making this not a generally useful API.
  96. *
  97. * Specifically, many users of the old APIs had a virtual address,
  98. * called virt_to_page() or vmalloc_to_page() on that address to
  99. * get a struct page* that the old API required.
  100. * To convert these cases, use set_memory_*() on the original
  101. * virtual address, do not use these functions.
  102. */
  103. int set_pages_uc(struct page *page, int numpages);
  104. int set_pages_wb(struct page *page, int numpages);
  105. int set_pages_x(struct page *page, int numpages);
  106. int set_pages_nx(struct page *page, int numpages);
  107. int set_pages_ro(struct page *page, int numpages);
  108. int set_pages_rw(struct page *page, int numpages);
  109. void clflush_cache_range(void *addr, unsigned int size);
  110. #ifdef CONFIG_DEBUG_RODATA
  111. void mark_rodata_ro(void);
  112. extern const int rodata_test_data;
  113. #endif
  114. #ifdef CONFIG_DEBUG_RODATA_TEST
  115. int rodata_test(void);
  116. #else
  117. static inline int rodata_test(void)
  118. {
  119. return 0;
  120. }
  121. #endif
  122. #endif /* _ASM_X86_CACHEFLUSH_H */