cache-tauros2.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263
  1. /*
  2. * arch/arm/mm/cache-tauros2.c - Tauros2 L2 cache controller support
  3. *
  4. * Copyright (C) 2008 Marvell Semiconductor
  5. *
  6. * This file is licensed under the terms of the GNU General Public
  7. * License version 2. This program is licensed "as is" without any
  8. * warranty of any kind, whether express or implied.
  9. *
  10. * References:
  11. * - PJ1 CPU Core Datasheet,
  12. * Document ID MV-S104837-01, Rev 0.7, January 24 2008.
  13. * - PJ4 CPU Core Datasheet,
  14. * Document ID MV-S105190-00, Rev 0.7, March 14 2008.
  15. */
  16. #include <linux/init.h>
  17. #include <asm/cacheflush.h>
  18. #include <asm/hardware/cache-tauros2.h>
  19. /*
  20. * When Tauros2 is used on a CPU that supports the v7 hierarchical
  21. * cache operations, the cache handling code in proc-v7.S takes care
  22. * of everything, including handling DMA coherency.
  23. *
  24. * So, we only need to register outer cache operations here if we're
  25. * being used on a pre-v7 CPU, and we only need to build support for
  26. * outer cache operations into the kernel image if the kernel has been
  27. * configured to support a pre-v7 CPU.
  28. */
  29. #if __LINUX_ARM_ARCH__ < 7
  30. /*
  31. * Low-level cache maintenance operations.
  32. */
  33. static inline void tauros2_clean_pa(unsigned long addr)
  34. {
  35. __asm__("mcr p15, 1, %0, c7, c11, 3" : : "r" (addr));
  36. }
  37. static inline void tauros2_clean_inv_pa(unsigned long addr)
  38. {
  39. __asm__("mcr p15, 1, %0, c7, c15, 3" : : "r" (addr));
  40. }
  41. static inline void tauros2_inv_pa(unsigned long addr)
  42. {
  43. __asm__("mcr p15, 1, %0, c7, c7, 3" : : "r" (addr));
  44. }
  45. /*
  46. * Linux primitives.
  47. *
  48. * Note that the end addresses passed to Linux primitives are
  49. * noninclusive.
  50. */
  51. #define CACHE_LINE_SIZE 32
  52. static void tauros2_inv_range(unsigned long start, unsigned long end)
  53. {
  54. /*
  55. * Clean and invalidate partial first cache line.
  56. */
  57. if (start & (CACHE_LINE_SIZE - 1)) {
  58. tauros2_clean_inv_pa(start & ~(CACHE_LINE_SIZE - 1));
  59. start = (start | (CACHE_LINE_SIZE - 1)) + 1;
  60. }
  61. /*
  62. * Clean and invalidate partial last cache line.
  63. */
  64. if (end & (CACHE_LINE_SIZE - 1)) {
  65. tauros2_clean_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
  66. end &= ~(CACHE_LINE_SIZE - 1);
  67. }
  68. /*
  69. * Invalidate all full cache lines between 'start' and 'end'.
  70. */
  71. while (start < end) {
  72. tauros2_inv_pa(start);
  73. start += CACHE_LINE_SIZE;
  74. }
  75. dsb();
  76. }
  77. static void tauros2_clean_range(unsigned long start, unsigned long end)
  78. {
  79. start &= ~(CACHE_LINE_SIZE - 1);
  80. while (start < end) {
  81. tauros2_clean_pa(start);
  82. start += CACHE_LINE_SIZE;
  83. }
  84. dsb();
  85. }
  86. static void tauros2_flush_range(unsigned long start, unsigned long end)
  87. {
  88. start &= ~(CACHE_LINE_SIZE - 1);
  89. while (start < end) {
  90. tauros2_clean_inv_pa(start);
  91. start += CACHE_LINE_SIZE;
  92. }
  93. dsb();
  94. }
  95. #endif
  96. static inline u32 __init read_extra_features(void)
  97. {
  98. u32 u;
  99. __asm__("mrc p15, 1, %0, c15, c1, 0" : "=r" (u));
  100. return u;
  101. }
  102. static inline void __init write_extra_features(u32 u)
  103. {
  104. __asm__("mcr p15, 1, %0, c15, c1, 0" : : "r" (u));
  105. }
  106. static void __init disable_l2_prefetch(void)
  107. {
  108. u32 u;
  109. /*
  110. * Read the CPU Extra Features register and verify that the
  111. * Disable L2 Prefetch bit is set.
  112. */
  113. u = read_extra_features();
  114. if (!(u & 0x01000000)) {
  115. printk(KERN_INFO "Tauros2: Disabling L2 prefetch.\n");
  116. write_extra_features(u | 0x01000000);
  117. }
  118. }
  119. static inline int __init cpuid_scheme(void)
  120. {
  121. extern int processor_id;
  122. return !!((processor_id & 0x000f0000) == 0x000f0000);
  123. }
  124. static inline u32 __init read_mmfr3(void)
  125. {
  126. u32 mmfr3;
  127. __asm__("mrc p15, 0, %0, c0, c1, 7\n" : "=r" (mmfr3));
  128. return mmfr3;
  129. }
  130. static inline u32 __init read_actlr(void)
  131. {
  132. u32 actlr;
  133. __asm__("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
  134. return actlr;
  135. }
  136. static inline void __init write_actlr(u32 actlr)
  137. {
  138. __asm__("mcr p15, 0, %0, c1, c0, 1\n" : : "r" (actlr));
  139. }
  140. void __init tauros2_init(void)
  141. {
  142. extern int processor_id;
  143. char *mode;
  144. disable_l2_prefetch();
  145. #ifdef CONFIG_CPU_32v5
  146. if ((processor_id & 0xff0f0000) == 0x56050000) {
  147. u32 feat;
  148. /*
  149. * v5 CPUs with Tauros2 have the L2 cache enable bit
  150. * located in the CPU Extra Features register.
  151. */
  152. feat = read_extra_features();
  153. if (!(feat & 0x00400000)) {
  154. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  155. write_extra_features(feat | 0x00400000);
  156. }
  157. mode = "ARMv5";
  158. outer_cache.inv_range = tauros2_inv_range;
  159. outer_cache.clean_range = tauros2_clean_range;
  160. outer_cache.flush_range = tauros2_flush_range;
  161. }
  162. #endif
  163. #ifdef CONFIG_CPU_32v6
  164. /*
  165. * Check whether this CPU lacks support for the v7 hierarchical
  166. * cache ops. (PJ4 is in its v6 personality mode if the MMFR3
  167. * register indicates no support for the v7 hierarchical cache
  168. * ops.)
  169. */
  170. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 0) {
  171. /*
  172. * When Tauros2 is used in an ARMv6 system, the L2
  173. * enable bit is in the ARMv6 ARM-mandated position
  174. * (bit [26] of the System Control Register).
  175. */
  176. if (!(get_cr() & 0x04000000)) {
  177. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  178. adjust_cr(0x04000000, 0x04000000);
  179. }
  180. mode = "ARMv6";
  181. outer_cache.inv_range = tauros2_inv_range;
  182. outer_cache.clean_range = tauros2_clean_range;
  183. outer_cache.flush_range = tauros2_flush_range;
  184. }
  185. #endif
  186. #ifdef CONFIG_CPU_32v7
  187. /*
  188. * Check whether this CPU has support for the v7 hierarchical
  189. * cache ops. (PJ4 is in its v7 personality mode if the MMFR3
  190. * register indicates support for the v7 hierarchical cache
  191. * ops.)
  192. *
  193. * (Although strictly speaking there may exist CPUs that
  194. * implement the v7 cache ops but are only ARMv6 CPUs (due to
  195. * not complying with all of the other ARMv7 requirements),
  196. * there are no real-life examples of Tauros2 being used on
  197. * such CPUs as of yet.)
  198. */
  199. if (cpuid_scheme() && (read_mmfr3() & 0xf) == 1) {
  200. u32 actlr;
  201. /*
  202. * When Tauros2 is used in an ARMv7 system, the L2
  203. * enable bit is located in the Auxiliary System Control
  204. * Register (which is the only register allowed by the
  205. * ARMv7 spec to contain fine-grained cache control bits).
  206. */
  207. actlr = read_actlr();
  208. if (!(actlr & 0x00000002)) {
  209. printk(KERN_INFO "Tauros2: Enabling L2 cache.\n");
  210. write_actlr(actlr | 0x00000002);
  211. }
  212. mode = "ARMv7";
  213. }
  214. #endif
  215. if (mode == NULL) {
  216. printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n");
  217. return;
  218. }
  219. printk(KERN_INFO "Tauros2: L2 cache support initialised "
  220. "in %s mode.\n", mode);
  221. }