cache.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. /*
  2. * Cache control for MicroBlaze cache memories
  3. *
  4. * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
  5. * Copyright (C) 2007-2009 PetaLogix
  6. * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
  7. *
  8. * This file is subject to the terms and conditions of the GNU General
  9. * Public License. See the file COPYING in the main directory of this
  10. * archive for more details.
  11. */
  12. #include <asm/cacheflush.h>
  13. #include <linux/cache.h>
  14. #include <asm/cpuinfo.h>
  15. /* Exported functions */
  16. void _enable_icache(void)
  17. {
  18. if (cpuinfo.use_icache) {
  19. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  20. __asm__ __volatile__ (" \
  21. msrset r0, %0; \
  22. nop; " \
  23. : \
  24. : "i" (MSR_ICE) \
  25. : "memory");
  26. #else
  27. __asm__ __volatile__ (" \
  28. mfs r12, rmsr; \
  29. nop; \
  30. ori r12, r12, %0; \
  31. mts rmsr, r12; \
  32. nop; " \
  33. : \
  34. : "i" (MSR_ICE) \
  35. : "memory", "r12");
  36. #endif
  37. }
  38. }
  39. void _disable_icache(void)
  40. {
  41. if (cpuinfo.use_icache) {
  42. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  43. __asm__ __volatile__ (" \
  44. msrclr r0, %0; \
  45. nop; " \
  46. : \
  47. : "i" (MSR_ICE) \
  48. : "memory");
  49. #else
  50. __asm__ __volatile__ (" \
  51. mfs r12, rmsr; \
  52. nop; \
  53. andi r12, r12, ~%0; \
  54. mts rmsr, r12; \
  55. nop; " \
  56. : \
  57. : "i" (MSR_ICE) \
  58. : "memory", "r12");
  59. #endif
  60. }
  61. }
  62. void _invalidate_icache(unsigned int addr)
  63. {
  64. if (cpuinfo.use_icache) {
  65. __asm__ __volatile__ (" \
  66. wic %0, r0" \
  67. : \
  68. : "r" (addr));
  69. }
  70. }
  71. void _enable_dcache(void)
  72. {
  73. if (cpuinfo.use_dcache) {
  74. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  75. __asm__ __volatile__ (" \
  76. msrset r0, %0; \
  77. nop; " \
  78. : \
  79. : "i" (MSR_DCE) \
  80. : "memory");
  81. #else
  82. __asm__ __volatile__ (" \
  83. mfs r12, rmsr; \
  84. nop; \
  85. ori r12, r12, %0; \
  86. mts rmsr, r12; \
  87. nop; " \
  88. : \
  89. : "i" (MSR_DCE) \
  90. : "memory", "r12");
  91. #endif
  92. }
  93. }
  94. void _disable_dcache(void)
  95. {
  96. #if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
  97. __asm__ __volatile__ (" \
  98. msrclr r0, %0; \
  99. nop; " \
  100. : \
  101. : "i" (MSR_DCE) \
  102. : "memory");
  103. #else
  104. __asm__ __volatile__ (" \
  105. mfs r12, rmsr; \
  106. nop; \
  107. andi r12, r12, ~%0; \
  108. mts rmsr, r12; \
  109. nop; " \
  110. : \
  111. : "i" (MSR_DCE) \
  112. : "memory", "r12");
  113. #endif
  114. }
  115. void _invalidate_dcache(unsigned int addr)
  116. {
  117. __asm__ __volatile__ (" \
  118. wdc %0, r0" \
  119. : \
  120. : "r" (addr));
  121. }
  122. void __invalidate_icache_all(void)
  123. {
  124. unsigned int i;
  125. unsigned flags;
  126. if (cpuinfo.use_icache) {
  127. local_irq_save(flags);
  128. __disable_icache();
  129. /* Just loop through cache size and invalidate, no need to add
  130. CACHE_BASE address */
  131. for (i = 0; i < cpuinfo.icache_size;
  132. i += cpuinfo.icache_line)
  133. __invalidate_icache(i);
  134. __enable_icache();
  135. local_irq_restore(flags);
  136. }
  137. }
  138. void __invalidate_icache_range(unsigned long start, unsigned long end)
  139. {
  140. unsigned int i;
  141. unsigned flags;
  142. unsigned int align;
  143. if (cpuinfo.use_icache) {
  144. /*
  145. * No need to cover entire cache range,
  146. * just cover cache footprint
  147. */
  148. end = min(start + cpuinfo.icache_size, end);
  149. align = ~(cpuinfo.icache_line - 1);
  150. start &= align; /* Make sure we are aligned */
  151. /* Push end up to the next cache line */
  152. end = ((end & align) + cpuinfo.icache_line);
  153. local_irq_save(flags);
  154. __disable_icache();
  155. for (i = start; i < end; i += cpuinfo.icache_line)
  156. __invalidate_icache(i);
  157. __enable_icache();
  158. local_irq_restore(flags);
  159. }
  160. }
  161. void __invalidate_icache_page(struct vm_area_struct *vma, struct page *page)
  162. {
  163. __invalidate_icache_all();
  164. }
  165. void __invalidate_icache_user_range(struct vm_area_struct *vma,
  166. struct page *page, unsigned long adr,
  167. int len)
  168. {
  169. __invalidate_icache_all();
  170. }
  171. void __invalidate_cache_sigtramp(unsigned long addr)
  172. {
  173. __invalidate_icache_range(addr, addr + 8);
  174. }
  175. void __invalidate_dcache_all(void)
  176. {
  177. unsigned int i;
  178. unsigned flags;
  179. if (cpuinfo.use_dcache) {
  180. local_irq_save(flags);
  181. __disable_dcache();
  182. /*
  183. * Just loop through cache size and invalidate,
  184. * no need to add CACHE_BASE address
  185. */
  186. for (i = 0; i < cpuinfo.dcache_size;
  187. i += cpuinfo.dcache_line)
  188. __invalidate_dcache(i);
  189. __enable_dcache();
  190. local_irq_restore(flags);
  191. }
  192. }
  193. void __invalidate_dcache_range(unsigned long start, unsigned long end)
  194. {
  195. unsigned int i;
  196. unsigned flags;
  197. unsigned int align;
  198. if (cpuinfo.use_dcache) {
  199. /*
  200. * No need to cover entire cache range,
  201. * just cover cache footprint
  202. */
  203. end = min(start + cpuinfo.dcache_size, end);
  204. align = ~(cpuinfo.dcache_line - 1);
  205. start &= align; /* Make sure we are aligned */
  206. /* Push end up to the next cache line */
  207. end = ((end & align) + cpuinfo.dcache_line);
  208. local_irq_save(flags);
  209. __disable_dcache();
  210. for (i = start; i < end; i += cpuinfo.dcache_line)
  211. __invalidate_dcache(i);
  212. __enable_dcache();
  213. local_irq_restore(flags);
  214. }
  215. }
  216. void __invalidate_dcache_page(struct vm_area_struct *vma, struct page *page)
  217. {
  218. __invalidate_dcache_all();
  219. }
  220. void __invalidate_dcache_user_range(struct vm_area_struct *vma,
  221. struct page *page, unsigned long adr,
  222. int len)
  223. {
  224. __invalidate_dcache_all();
  225. }