cacheflush.h 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. #ifndef _ASM_X86_CACHEFLUSH_H
  2. #define _ASM_X86_CACHEFLUSH_H
  3. /* Keep includes the same across arches. */
  4. #include <linux/mm.h>
  5. /* Caches aren't brain-dead on the intel. */
  6. static inline void flush_cache_all(void) { }
  7. static inline void flush_cache_mm(struct mm_struct *mm) { }
  8. static inline void flush_cache_dup_mm(struct mm_struct *mm) { }
  9. static inline void flush_cache_range(struct vm_area_struct *vma,
  10. unsigned long start, unsigned long end) { }
  11. static inline void flush_cache_page(struct vm_area_struct *vma,
  12. unsigned long vmaddr, unsigned long pfn) { }
  13. #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
  14. static inline void flush_dcache_page(struct page *page) { }
  15. static inline void flush_dcache_mmap_lock(struct address_space *mapping) { }
  16. static inline void flush_dcache_mmap_unlock(struct address_space *mapping) { }
  17. static inline void flush_icache_range(unsigned long start,
  18. unsigned long end) { }
  19. static inline void flush_icache_page(struct vm_area_struct *vma,
  20. struct page *page) { }
  21. static inline void flush_icache_user_range(struct vm_area_struct *vma,
  22. struct page *page,
  23. unsigned long addr,
  24. unsigned long len) { }
  25. static inline void flush_cache_vmap(unsigned long start, unsigned long end) { }
  26. static inline void flush_cache_vunmap(unsigned long start,
  27. unsigned long end) { }
  28. static inline void copy_to_user_page(struct vm_area_struct *vma,
  29. struct page *page, unsigned long vaddr,
  30. void *dst, const void *src,
  31. unsigned long len)
  32. {
  33. memcpy(dst, src, len);
  34. }
  35. static inline void copy_from_user_page(struct vm_area_struct *vma,
  36. struct page *page, unsigned long vaddr,
  37. void *dst, const void *src,
  38. unsigned long len)
  39. {
  40. memcpy(dst, src, len);
  41. }
  42. #ifdef CONFIG_X86_PAT
  43. /*
  44. * X86 PAT uses page flags WC and Uncached together to keep track of
  45. * memory type of pages that have backing page struct. X86 PAT supports 3
  46. * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
  47. * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
  48. * been changed from its default (value of -1 used to denote this).
  49. * Note we do not support _PAGE_CACHE_UC here.
  50. */
  51. #define _PGMT_DEFAULT 0
  52. #define _PGMT_WC (1UL << PG_arch_1)
  53. #define _PGMT_UC_MINUS (1UL << PG_uncached)
  54. #define _PGMT_WB (1UL << PG_uncached | 1UL << PG_arch_1)
  55. #define _PGMT_MASK (1UL << PG_uncached | 1UL << PG_arch_1)
  56. #define _PGMT_CLEAR_MASK (~_PGMT_MASK)
  57. static inline unsigned long get_page_memtype(struct page *pg)
  58. {
  59. unsigned long pg_flags = pg->flags & _PGMT_MASK;
  60. if (pg_flags == _PGMT_DEFAULT)
  61. return -1;
  62. else if (pg_flags == _PGMT_WC)
  63. return _PAGE_CACHE_WC;
  64. else if (pg_flags == _PGMT_UC_MINUS)
  65. return _PAGE_CACHE_UC_MINUS;
  66. else
  67. return _PAGE_CACHE_WB;
  68. }
  69. static inline void set_page_memtype(struct page *pg, unsigned long memtype)
  70. {
  71. unsigned long memtype_flags = _PGMT_DEFAULT;
  72. unsigned long old_flags;
  73. unsigned long new_flags;
  74. switch (memtype) {
  75. case _PAGE_CACHE_WC:
  76. memtype_flags = _PGMT_WC;
  77. break;
  78. case _PAGE_CACHE_UC_MINUS:
  79. memtype_flags = _PGMT_UC_MINUS;
  80. break;
  81. case _PAGE_CACHE_WB:
  82. memtype_flags = _PGMT_WB;
  83. break;
  84. }
  85. do {
  86. old_flags = pg->flags;
  87. new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
  88. } while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
  89. }
  90. #else
  91. static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
  92. static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
  93. #endif
  94. /*
  95. * The set_memory_* API can be used to change various attributes of a virtual
  96. * address range. The attributes include:
  97. * Cachability : UnCached, WriteCombining, WriteBack
  98. * Executability : eXeutable, NoteXecutable
  99. * Read/Write : ReadOnly, ReadWrite
  100. * Presence : NotPresent
  101. *
  102. * Within a catagory, the attributes are mutually exclusive.
  103. *
  104. * The implementation of this API will take care of various aspects that
  105. * are associated with changing such attributes, such as:
  106. * - Flushing TLBs
  107. * - Flushing CPU caches
  108. * - Making sure aliases of the memory behind the mapping don't violate
  109. * coherency rules as defined by the CPU in the system.
  110. *
  111. * What this API does not do:
  112. * - Provide exclusion between various callers - including callers that
  113. * operation on other mappings of the same physical page
  114. * - Restore default attributes when a page is freed
  115. * - Guarantee that mappings other than the requested one are
  116. * in any state, other than that these do not violate rules for
  117. * the CPU you have. Do not depend on any effects on other mappings,
  118. * CPUs other than the one you have may have more relaxed rules.
  119. * The caller is required to take care of these.
  120. */
  121. int _set_memory_uc(unsigned long addr, int numpages);
  122. int _set_memory_wc(unsigned long addr, int numpages);
  123. int _set_memory_wb(unsigned long addr, int numpages);
  124. int set_memory_uc(unsigned long addr, int numpages);
  125. int set_memory_wc(unsigned long addr, int numpages);
  126. int set_memory_wb(unsigned long addr, int numpages);
  127. int set_memory_x(unsigned long addr, int numpages);
  128. int set_memory_nx(unsigned long addr, int numpages);
  129. int set_memory_ro(unsigned long addr, int numpages);
  130. int set_memory_rw(unsigned long addr, int numpages);
  131. int set_memory_np(unsigned long addr, int numpages);
  132. int set_memory_4k(unsigned long addr, int numpages);
  133. int set_memory_array_uc(unsigned long *addr, int addrinarray);
  134. int set_memory_array_wc(unsigned long *addr, int addrinarray);
  135. int set_memory_array_wb(unsigned long *addr, int addrinarray);
  136. int set_pages_array_uc(struct page **pages, int addrinarray);
  137. int set_pages_array_wc(struct page **pages, int addrinarray);
  138. int set_pages_array_wb(struct page **pages, int addrinarray);
  139. /*
  140. * For legacy compatibility with the old APIs, a few functions
  141. * are provided that work on a "struct page".
  142. * These functions operate ONLY on the 1:1 kernel mapping of the
  143. * memory that the struct page represents, and internally just
  144. * call the set_memory_* function. See the description of the
  145. * set_memory_* function for more details on conventions.
  146. *
  147. * These APIs should be considered *deprecated* and are likely going to
  148. * be removed in the future.
  149. * The reason for this is the implicit operation on the 1:1 mapping only,
  150. * making this not a generally useful API.
  151. *
  152. * Specifically, many users of the old APIs had a virtual address,
  153. * called virt_to_page() or vmalloc_to_page() on that address to
  154. * get a struct page* that the old API required.
  155. * To convert these cases, use set_memory_*() on the original
  156. * virtual address, do not use these functions.
  157. */
  158. int set_pages_uc(struct page *page, int numpages);
  159. int set_pages_wb(struct page *page, int numpages);
  160. int set_pages_x(struct page *page, int numpages);
  161. int set_pages_nx(struct page *page, int numpages);
  162. int set_pages_ro(struct page *page, int numpages);
  163. int set_pages_rw(struct page *page, int numpages);
  164. void clflush_cache_range(void *addr, unsigned int size);
  165. #ifdef CONFIG_DEBUG_RODATA
  166. void mark_rodata_ro(void);
  167. extern const int rodata_test_data;
  168. extern int kernel_set_to_readonly;
  169. void set_kernel_text_rw(void);
  170. void set_kernel_text_ro(void);
  171. #else
  172. static inline void set_kernel_text_rw(void) { }
  173. static inline void set_kernel_text_ro(void) { }
  174. #endif
  175. #ifdef CONFIG_DEBUG_RODATA_TEST
  176. int rodata_test(void);
  177. #else
  178. static inline int rodata_test(void)
  179. {
  180. return 0;
  181. }
  182. #endif
  183. #endif /* _ASM_X86_CACHEFLUSH_H */