dcscb.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253
  1. /*
  2. * arch/arm/mach-vexpress/dcscb.c - Dual Cluster System Configuration Block
  3. *
  4. * Created by: Nicolas Pitre, May 2012
  5. * Copyright: (C) 2012-2013 Linaro Limited
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/io.h>
  14. #include <linux/spinlock.h>
  15. #include <linux/errno.h>
  16. #include <linux/of_address.h>
  17. #include <linux/vexpress.h>
  18. #include <linux/arm-cci.h>
  19. #include <asm/mcpm.h>
  20. #include <asm/proc-fns.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/cputype.h>
  23. #include <asm/cp15.h>
  24. #define RST_HOLD0 0x0
  25. #define RST_HOLD1 0x4
  26. #define SYS_SWRESET 0x8
  27. #define RST_STAT0 0xc
  28. #define RST_STAT1 0x10
  29. #define EAG_CFG_R 0x20
  30. #define EAG_CFG_W 0x24
  31. #define KFC_CFG_R 0x28
  32. #define KFC_CFG_W 0x2c
  33. #define DCS_CFG_R 0x30
  34. /*
  35. * We can't use regular spinlocks. In the switcher case, it is possible
  36. * for an outbound CPU to call power_down() while its inbound counterpart
  37. * is already live using the same logical CPU number which trips lockdep
  38. * debugging.
  39. */
  40. static arch_spinlock_t dcscb_lock = __ARCH_SPIN_LOCK_UNLOCKED;
  41. static void __iomem *dcscb_base;
  42. static int dcscb_use_count[4][2];
  43. static int dcscb_allcpus_mask[2];
  44. static int dcscb_power_up(unsigned int cpu, unsigned int cluster)
  45. {
  46. unsigned int rst_hold, cpumask = (1 << cpu);
  47. unsigned int all_mask = dcscb_allcpus_mask[cluster];
  48. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  49. if (cpu >= 4 || cluster >= 2)
  50. return -EINVAL;
  51. /*
  52. * Since this is called with IRQs enabled, and no arch_spin_lock_irq
  53. * variant exists, we need to disable IRQs manually here.
  54. */
  55. local_irq_disable();
  56. arch_spin_lock(&dcscb_lock);
  57. dcscb_use_count[cpu][cluster]++;
  58. if (dcscb_use_count[cpu][cluster] == 1) {
  59. rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
  60. if (rst_hold & (1 << 8)) {
  61. /* remove cluster reset and add individual CPU's reset */
  62. rst_hold &= ~(1 << 8);
  63. rst_hold |= all_mask;
  64. }
  65. rst_hold &= ~(cpumask | (cpumask << 4));
  66. writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
  67. } else if (dcscb_use_count[cpu][cluster] != 2) {
  68. /*
  69. * The only possible values are:
  70. * 0 = CPU down
  71. * 1 = CPU (still) up
  72. * 2 = CPU requested to be up before it had a chance
  73. * to actually make itself down.
  74. * Any other value is a bug.
  75. */
  76. BUG();
  77. }
  78. arch_spin_unlock(&dcscb_lock);
  79. local_irq_enable();
  80. return 0;
  81. }
  82. static void dcscb_power_down(void)
  83. {
  84. unsigned int mpidr, cpu, cluster, rst_hold, cpumask, all_mask;
  85. bool last_man = false, skip_wfi = false;
  86. mpidr = read_cpuid_mpidr();
  87. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  88. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  89. cpumask = (1 << cpu);
  90. all_mask = dcscb_allcpus_mask[cluster];
  91. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  92. BUG_ON(cpu >= 4 || cluster >= 2);
  93. __mcpm_cpu_going_down(cpu, cluster);
  94. arch_spin_lock(&dcscb_lock);
  95. BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
  96. dcscb_use_count[cpu][cluster]--;
  97. if (dcscb_use_count[cpu][cluster] == 0) {
  98. rst_hold = readl_relaxed(dcscb_base + RST_HOLD0 + cluster * 4);
  99. rst_hold |= cpumask;
  100. if (((rst_hold | (rst_hold >> 4)) & all_mask) == all_mask) {
  101. rst_hold |= (1 << 8);
  102. last_man = true;
  103. }
  104. writel_relaxed(rst_hold, dcscb_base + RST_HOLD0 + cluster * 4);
  105. } else if (dcscb_use_count[cpu][cluster] == 1) {
  106. /*
  107. * A power_up request went ahead of us.
  108. * Even if we do not want to shut this CPU down,
  109. * the caller expects a certain state as if the WFI
  110. * was aborted. So let's continue with cache cleaning.
  111. */
  112. skip_wfi = true;
  113. } else
  114. BUG();
  115. if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
  116. arch_spin_unlock(&dcscb_lock);
  117. /*
  118. * Flush all cache levels for this cluster.
  119. *
  120. * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
  121. * a preliminary flush here for those CPUs. At least, that's
  122. * the theory -- without the extra flush, Linux explodes on
  123. * RTSM (to be investigated).
  124. */
  125. flush_cache_all();
  126. set_cr(get_cr() & ~CR_C);
  127. flush_cache_all();
  128. /*
  129. * This is a harmless no-op. On platforms with a real
  130. * outer cache this might either be needed or not,
  131. * depending on where the outer cache sits.
  132. */
  133. outer_flush_all();
  134. /* Disable local coherency by clearing the ACTLR "SMP" bit: */
  135. set_auxcr(get_auxcr() & ~(1 << 6));
  136. /*
  137. * Disable cluster-level coherency by masking
  138. * incoming snoops and DVM messages:
  139. */
  140. cci_disable_port_by_cpu(mpidr);
  141. __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
  142. } else {
  143. arch_spin_unlock(&dcscb_lock);
  144. /*
  145. * Flush the local CPU cache.
  146. *
  147. * A15/A7 can hit in the cache with SCTLR.C=0, so we don't need
  148. * a preliminary flush here for those CPUs. At least, that's
  149. * the theory -- without the extra flush, Linux explodes on
  150. * RTSM (to be investigated).
  151. */
  152. flush_cache_louis();
  153. set_cr(get_cr() & ~CR_C);
  154. flush_cache_louis();
  155. /* Disable local coherency by clearing the ACTLR "SMP" bit: */
  156. set_auxcr(get_auxcr() & ~(1 << 6));
  157. }
  158. __mcpm_cpu_down(cpu, cluster);
  159. /* Now we are prepared for power-down, do it: */
  160. dsb();
  161. if (!skip_wfi)
  162. wfi();
  163. /* Not dead at this point? Let our caller cope. */
  164. }
  165. static const struct mcpm_platform_ops dcscb_power_ops = {
  166. .power_up = dcscb_power_up,
  167. .power_down = dcscb_power_down,
  168. };
  169. static void __init dcscb_usage_count_init(void)
  170. {
  171. unsigned int mpidr, cpu, cluster;
  172. mpidr = read_cpuid_mpidr();
  173. cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  174. cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  175. pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
  176. BUG_ON(cpu >= 4 || cluster >= 2);
  177. dcscb_use_count[cpu][cluster] = 1;
  178. }
  179. extern void dcscb_power_up_setup(unsigned int affinity_level);
  180. static int __init dcscb_init(void)
  181. {
  182. struct device_node *node;
  183. unsigned int cfg;
  184. int ret;
  185. if (!cci_probed())
  186. return -ENODEV;
  187. node = of_find_compatible_node(NULL, NULL, "arm,rtsm,dcscb");
  188. if (!node)
  189. return -ENODEV;
  190. dcscb_base = of_iomap(node, 0);
  191. if (!dcscb_base)
  192. return -EADDRNOTAVAIL;
  193. cfg = readl_relaxed(dcscb_base + DCS_CFG_R);
  194. dcscb_allcpus_mask[0] = (1 << (((cfg >> 16) >> (0 << 2)) & 0xf)) - 1;
  195. dcscb_allcpus_mask[1] = (1 << (((cfg >> 16) >> (1 << 2)) & 0xf)) - 1;
  196. dcscb_usage_count_init();
  197. ret = mcpm_platform_register(&dcscb_power_ops);
  198. if (!ret)
  199. ret = mcpm_sync_init(dcscb_power_up_setup);
  200. if (ret) {
  201. iounmap(dcscb_base);
  202. return ret;
  203. }
  204. pr_info("VExpress DCSCB support installed\n");
  205. /*
  206. * Future entries into the kernel can now go
  207. * through the cluster entry vectors.
  208. */
  209. vexpress_flags_set(virt_to_phys(mcpm_entry_point));
  210. return 0;
  211. }
  212. early_initcall(dcscb_init);