cache-tauros2.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
  3. *
  4. * Copyright (C) 2008 Marvell Semiconductor
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. *
  10. * References:
  11. * - PJ1 CPU Core Datasheet,
  12. * Document ID MV-S104837-01, Rev 0.7, January 24 2008.
  13. * - PJ4 CPU Core Datasheet,
  14. * Document ID MV-S105190-00, Rev 0.7, March 14 2008.
  15. */
  16. #include <linux/init.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/cp15.h>
  19. #include <asm/cputype.h>
  20. #include <asm/hardware/cache-tauros2.h>
  21. /*
  22. * When Tauros2 is used on a CPU that supports the v7 hierarchical
  23. * cache operations, the cache handling code in proc-v7.S takes care
  24. * of everything, including handling DMA coherency.
  25. *
  26. * So, we only need to register outer cache operations here if we're
  27. * being used on a pre-v7 CPU, and we only need to build support for
  28. * outer cache operations into the kernel image if the kernel has been
  29. * configured to support a pre-v7 CPU.
  30. */
  31. #if __LINUX_ARM_ARCH__ < 7
  32. /*
  33. * Low-level cache maintenance operations.
  34. */
  35. static inline void tauros2_clean_pa(unsigned long addr)
  36. {
  37. __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
  38. }
  39. static inline void tauros2_clean_inv_pa(unsigned long addr)
  40. {
  41. __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
  42. }
  43. static inline void tauros2_inv_pa(unsigned long addr)
  44. {
  45. __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
  46. }
  47. /*
  48. * Linux primitives.
  49. *
  50. * Note that the end addresses passed to Linux primitives are
  51. * noninclusive.
  52. */
  53. #define CACHE_LINE_SIZE 32
  54. static void tauros2_inv_range(unsigned long start, unsigned long end)
  55. {
  56. /*
  57. * Clean and invalidate partial first cache line.
  58. */
  59. if (start & (CACHE_LINE_SIZE - 1)) {
  60. tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
  61. start = (start | (CACHE_LINE_SIZE - 1)) + 1;
  62. }
  63. /*
  64. * Clean and invalidate partial last cache line.
  65. */
  66. if (end & (CACHE_LINE_SIZE - 1)) {
  67. tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
  68. end &= ~(CACHE_LINE_SIZE - 1);
  69. }
  70. /*
  71. * Invalidate all full cache lines between 'start' and 'end'.
  72. */
  73. while (start < end) {
  74. tauros2_inv_pa(start);
  75. start += CACHE_LINE_SIZE;
  76. }
  77. dsb();
  78. }
  79. static void tauros2_clean_range(unsigned long start, unsigned long end)
  80. {
  81. start &= ~(CACHE_LINE_SIZE - 1);
  82. while (start < end) {
  83. tauros2_clean_pa(start);
  84. start += CACHE_LINE_SIZE;
  85. }
  86. dsb();
  87. }
  88. static void tauros2_flush_range(unsigned long start, unsigned long end)
  89. {
  90. start &= ~(CACHE_LINE_SIZE - 1);
  91. while (start < end) {
  92. tauros2_clean_inv_pa(start);
  93. start += CACHE_LINE_SIZE;
  94. }
  95. dsb();
  96. }
  97. static void tauros2_disable(void)
  98. {
  99. __asm__ __volatile__ (
  100. "mcr p15, 1, %0, c7, c11, 0 @L2 Cache Clean All\n\t"
  101. "mrc p15, 0, %0, c1, c0, 0\n\t"
  102. "bic %0, %0, #(1 << 26)\n\t"
  103. "mcr p15, 0, %0, c1, c0, 0 @Disable L2 Cache\n\t"
  104. : : "r" (0x0));
  105. }
  106. static void tauros2_resume(void)
  107. {
  108. __asm__ __volatile__ (
  109. "mcr p15, 1, %0, c7, c7, 0 @L2 Cache Invalidate All\n\t"
  110. "mrc p15, 0, %0, c1, c0, 0\n\t"
  111. "orr %0, %0, #(1 << 26)\n\t"
  112. "mcr p15, 0, %0, c1, c0, 0 @Enable L2 Cache\n\t"
  113. : : "r" (0x0));
  114. }
  115. #endif
  116. static inline u32 __init read_extra_features(void)
  117. {
  118. u32 u;
  119. __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
  120. return u;
  121. }
  122. static inline void __init write_extra_features(u32 u)
  123. {
  124. __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
  125. }
  126. static inline int __init cpuid_scheme(void)
  127. {
  128. return !!((processor_id & 0x000f0000) == 0x000f0000);
  129. }
  130. static inline u32 __init read_mmfr3(void)
  131. {
  132. u32 mmfr3;
  133. __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
  134. return mmfr3;
  135. }
  136. static inline u32 __init read_actlr(void)
  137. {
  138. u32 actlr;
  139. __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
  140. return actlr;
  141. }
  142. static inline void __init write_actlr(u32 actlr)
  143. {
  144. __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
  145. }
  146. static void enable_extra_feature(unsigned int features)
  147. {
  148. u32 u;
  149. u = read_extra_features();
  150. if (features & CACHE_TAUROS2_PREFETCH_ON)
  151. u &= ~0x01000000;
  152. else
  153. u |= 0x01000000;
  154. printk(KERN_INFO "Tauros2: %s L2 prefetch.\n",
  155. (features & CACHE_TAUROS2_PREFETCH_ON)
  156. ? "Enabling" : "Disabling");
  157. if (features & CACHE_TAUROS2_LINEFILL_BURST8)
  158. u |= 0x00100000;
  159. else
  160. u &= ~0x00100000;
  161. printk(KERN_INFO "Tauros2: %s line fill burt8.\n",
  162. (features & CACHE_TAUROS2_LINEFILL_BURST8)
  163. ? "Enabling" : "Disabling");
  164. write_extra_features(u);
  165. }
  166. void __init tauros2_init(unsigned int features)
  167. {
  168. char *mode = NULL;
  169. enable_extra_feature(features);
  170. #ifdef CONFIG_CPU_32v5
  171. if ((processor_id & 0xff0f0000) == 0x56050000) {
  172. u32 feat;
  173. /*
  174. * v5 CPUs with Tauros2 have the L2 cache enable bit
  175. * located in the CPU Extra Features register.
  176. */
  177. feat = read_extra_features();
  178. if (!(feat & 0x00400000)) {
  179. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  180. write_extra_features(feat | 0x00400000);
  181. }
  182. mode = "ARMv5";
  183. outer_cache.inv_range = tauros2_inv_range;
  184. outer_cache.clean_range = tauros2_clean_range;
  185. outer_cache.flush_range = tauros2_flush_range;
  186. outer_cache.disable = tauros2_disable;
  187. outer_cache.resume = tauros2_resume;
  188. }
  189. #endif
  190. #ifdef CONFIG_CPU_32v6
  191. /*
  192. * Check whether this CPU lacks support for the v7 hierarchical
  193. * cache ops. (PJ4 is in its v6 personality mode if the MMFR3
  194. * register indicates no support for the v7 hierarchical cache
  195. * ops.)
  196. */
  197. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) {
  198. /*
  199. * When Tauros2 is used in an ARMv6 system, the L2
  200. * enable bit is in the ARMv6 ARM-mandated position
  201. * (bit [26] of the System Control Register).
  202. */
  203. if (!(get_cr() & 0x04000000)) {
  204. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  205. adjust_cr(0x04000000, 0x04000000);
  206. }
  207. mode = "ARMv6";
  208. outer_cache.inv_range = tauros2_inv_range;
  209. outer_cache.clean_range = tauros2_clean_range;
  210. outer_cache.flush_range = tauros2_flush_range;
  211. outer_cache.disable = tauros2_disable;
  212. outer_cache.resume = tauros2_resume;
  213. }
  214. #endif
  215. #ifdef CONFIG_CPU_32v7
  216. /*
  217. * Check whether this CPU has support for the v7 hierarchical
  218. * cache ops. (PJ4 is in its v7 personality mode if the MMFR3
  219. * register indicates support for the v7 hierarchical cache
  220. * ops.)
  221. *
  222. * (Although strictly speaking there may exist CPUs that
  223. * implement the v7 cache ops but are only ARMv6 CPUs (due to
  224. * not complying with all of the other ARMv7 requirements),
  225. * there are no real-life examples of Tauros2 being used on
  226. * such CPUs as of yet.)
  227. */
  228. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
  229. u32 actlr;
  230. /*
  231. * When Tauros2 is used in an ARMv7 system, the L2
  232. * enable bit is located in the Auxiliary System Control
  233. * Register (which is the only register allowed by the
  234. * ARMv7 spec to contain fine-grained cache control bits).
  235. */
  236. actlr = read_actlr();
  237. if (!(actlr & 0x00000002)) {
  238. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  239. write_actlr(actlr | 0x00000002);
  240. }
  241. mode = "ARMv7";
  242. }
  243. #endif
  244. if (mode == NULL) {
  245. printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
  246. return;
  247. }
  248. printk(KERN_INFO "Tauros2: L2 cache support initialised "
  249. "in %s mode.\n", mode);
  250. }