cache.c 5.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258
  1. /*
  2. * Cache control for MicroBlaze cache memories
  3. *
  4. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2007-2009 PetaLogix
  6. * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. */
  12. #include <asm/cacheflush.h>
  13. #include <linux/cache.h>
  14. #include <asm/cpuinfo.h>
  15. /* Exported functions */
  16. void _enable_icache(void)
  17. {
  18. if (cpuinfo.use_icache) {
  19. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  20. __asm__ __volatile__ (" \
  21. msrset r0, %0; \
  22. nop; " \
  23. : \
  24. : "i" (MSR_ICE) \
  25. : "memory");
  26. #else
  27. __asm__ __volatile__ (" \
  28. mfs r12, rmsr; \
  29. nop; \
  30. ori r12, r12, %0; \
  31. mts rmsr, r12; \
  32. nop; " \
  33. : \
  34. : "i" (MSR_ICE) \
  35. : "memory", "r12");
  36. #endif
  37. }
  38. }
  39. void _disable_icache(void)
  40. {
  41. if (cpuinfo.use_icache) {
  42. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  43. __asm__ __volatile__ (" \
  44. msrclr r0, %0; \
  45. nop; " \
  46. : \
  47. : "i" (MSR_ICE) \
  48. : "memory");
  49. #else
  50. __asm__ __volatile__ (" \
  51. mfs r12, rmsr; \
  52. nop; \
  53. andi r12, r12, ~%0; \
  54. mts rmsr, r12; \
  55. nop; " \
  56. : \
  57. : "i" (MSR_ICE) \
  58. : "memory", "r12");
  59. #endif
  60. }
  61. }
  62. void _invalidate_icache(unsigned int addr)
  63. {
  64. if (cpuinfo.use_icache) {
  65. __asm__ __volatile__ (" \
  66. wic %0, r0" \
  67. : \
  68. : "r" (addr));
  69. }
  70. }
  71. void _enable_dcache(void)
  72. {
  73. if (cpuinfo.use_dcache) {
  74. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  75. __asm__ __volatile__ (" \
  76. msrset r0, %0; \
  77. nop; " \
  78. : \
  79. : "i" (MSR_DCE) \
  80. : "memory");
  81. #else
  82. __asm__ __volatile__ (" \
  83. mfs r12, rmsr; \
  84. nop; \
  85. ori r12, r12, %0; \
  86. mts rmsr, r12; \
  87. nop; " \
  88. : \
  89. : "i" (MSR_DCE) \
  90. : "memory", "r12");
  91. #endif
  92. }
  93. }
  94. void _disable_dcache(void)
  95. {
  96. if (cpuinfo.use_dcache) {
  97. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  98. __asm__ __volatile__ (" \
  99. msrclr r0, %0; \
  100. nop; " \
  101. : \
  102. : "i" (MSR_DCE) \
  103. : "memory");
  104. #else
  105. __asm__ __volatile__ (" \
  106. mfs r12, rmsr; \
  107. nop; \
  108. andi r12, r12, ~%0; \
  109. mts rmsr, r12; \
  110. nop; " \
  111. : \
  112. : "i" (MSR_DCE) \
  113. : "memory", "r12");
  114. #endif
  115. }
  116. }
  117. void _invalidate_dcache(unsigned int addr)
  118. {
  119. if (cpuinfo.use_dcache)
  120. __asm__ __volatile__ (" \
  121. wdc %0, r0" \
  122. : \
  123. : "r" (addr));
  124. }
  125. void __invalidate_icache_all(void)
  126. {
  127. unsigned int i;
  128. unsigned flags;
  129. if (cpuinfo.use_icache) {
  130. local_irq_save(flags);
  131. __disable_icache();
  132. /* Just loop through cache size and invalidate, no need to add
  133. CACHE_BASE address */
  134. for (i = 0; i < cpuinfo.icache_size;
  135. i += cpuinfo.icache_line)
  136. __invalidate_icache(i);
  137. __enable_icache();
  138. local_irq_restore(flags);
  139. }
  140. }
  141. void __invalidate_icache_range(unsigned long start, unsigned long end)
  142. {
  143. unsigned int i;
  144. unsigned flags;
  145. unsigned int align;
  146. if (cpuinfo.use_icache) {
  147. /*
  148. * No need to cover entire cache range,
  149. * just cover cache footprint
  150. */
  151. end = min(start + cpuinfo.icache_size, end);
  152. align = ~(cpuinfo.icache_line - 1);
  153. start &= align; /* Make sure we are aligned */
  154. /* Push end up to the next cache line */
  155. end = ((end & align) + cpuinfo.icache_line);
  156. local_irq_save(flags);
  157. __disable_icache();
  158. for (i = start; i < end; i += cpuinfo.icache_line)
  159. __invalidate_icache(i);
  160. __enable_icache();
  161. local_irq_restore(flags);
  162. }
  163. }
  164. void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
  165. {
  166. __invalidate_icache_all();
  167. }
  168. void __invalidate_icache_user_range(struct vm_area_struct *vma,
  169. struct page *page, unsigned long adr,
  170. int len)
  171. {
  172. __invalidate_icache_all();
  173. }
  174. void __invalidate_cache_sigtramp(unsigned long addr)
  175. {
  176. __invalidate_icache_range(addr, addr + 8);
  177. }
  178. void __invalidate_dcache_all(void)
  179. {
  180. unsigned int i;
  181. unsigned flags;
  182. if (cpuinfo.use_dcache) {
  183. local_irq_save(flags);
  184. __disable_dcache();
  185. /*
  186. * Just loop through cache size and invalidate,
  187. * no need to add CACHE_BASE address
  188. */
  189. for (i = 0; i < cpuinfo.dcache_size;
  190. i += cpuinfo.dcache_line)
  191. __invalidate_dcache(i);
  192. __enable_dcache();
  193. local_irq_restore(flags);
  194. }
  195. }
  196. void __invalidate_dcache_range(unsigned long start, unsigned long end)
  197. {
  198. unsigned int i;
  199. unsigned flags;
  200. unsigned int align;
  201. if (cpuinfo.use_dcache) {
  202. /*
  203. * No need to cover entire cache range,
  204. * just cover cache footprint
  205. */
  206. end = min(start + cpuinfo.dcache_size, end);
  207. align = ~(cpuinfo.dcache_line - 1);
  208. start &= align; /* Make sure we are aligned */
  209. /* Push end up to the next cache line */
  210. end = ((end & align) + cpuinfo.dcache_line);
  211. local_irq_save(flags);
  212. __disable_dcache();
  213. for (i = start; i < end; i += cpuinfo.dcache_line)
  214. __invalidate_dcache(i);
  215. __enable_dcache();
  216. local_irq_restore(flags);
  217. }
  218. }
  219. void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
  220. {
  221. __invalidate_dcache_all();
  222. }
  223. void __invalidate_dcache_user_range(struct vm_area_struct *vma,
  224. struct page *page, unsigned long adr,
  225. int len)
  226. {
  227. __invalidate_dcache_all();
  228. }