mcpm_entry.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. /*
  2. * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
  3. *
  4. * Created by: Nicolas Pitre, March 2012
  5. * Copyright: (C) 2012-2013 Linaro Limited
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/irqflags.h>
  14. #include <asm/mcpm.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/idmap.h>
  17. #include <asm/cputype.h>
  18. extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
  19. void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
  20. {
  21. unsigned long val = ptr ? virt_to_phys(ptr) : 0;
  22. mcpm_entry_vectors[cluster][cpu] = val;
  23. sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
  24. }
  25. extern unsigned long mcpm_entry_early_pokes[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER][2];
  26. void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
  27. unsigned long poke_phys_addr, unsigned long poke_val)
  28. {
  29. unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0];
  30. poke[0] = poke_phys_addr;
  31. poke[1] = poke_val;
  32. __cpuc_flush_dcache_area((void *)poke, 8);
  33. outer_clean_range(__pa(poke), __pa(poke + 2));
  34. }
  35. static const struct mcpm_platform_ops *platform_ops;
  36. int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
  37. {
  38. if (platform_ops)
  39. return -EBUSY;
  40. platform_ops = ops;
  41. return 0;
  42. }
  43. int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
  44. {
  45. if (!platform_ops)
  46. return -EUNATCH; /* try not to shadow power_up errors */
  47. might_sleep();
  48. return platform_ops->power_up(cpu, cluster);
  49. }
  50. typedef void (*phys_reset_t)(unsigned long);
  51. void mcpm_cpu_power_down(void)
  52. {
  53. phys_reset_t phys_reset;
  54. if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
  55. return;
  56. BUG_ON(!irqs_disabled());
  57. /*
  58. * Do this before calling into the power_down method,
  59. * as it might not always be safe to do afterwards.
  60. */
  61. setup_mm_for_reboot();
  62. platform_ops->power_down();
  63. /*
  64. * It is possible for a power_up request to happen concurrently
  65. * with a power_down request for the same CPU. In this case the
  66. * power_down method might not be able to actually enter a
  67. * powered down state with the WFI instruction if the power_up
  68. * method has removed the required reset condition. The
  69. * power_down method is then allowed to return. We must perform
  70. * a re-entry in the kernel as if the power_up method just had
  71. * deasserted reset on the CPU.
  72. *
  73. * To simplify race issues, the platform specific implementation
  74. * must accommodate for the possibility of unordered calls to
  75. * power_down and power_up with a usage count. Therefore, if a
  76. * call to power_up is issued for a CPU that is not down, then
  77. * the next call to power_down must not attempt a full shutdown
  78. * but only do the minimum (normally disabling L1 cache and CPU
  79. * coherency) and return just as if a concurrent power_up request
  80. * had happened as described above.
  81. */
  82. phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
  83. phys_reset(virt_to_phys(mcpm_entry_point));
  84. /* should never get here */
  85. BUG();
  86. }
  87. int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
  88. {
  89. int ret;
  90. if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
  91. return -EUNATCH;
  92. ret = platform_ops->power_down_finish(cpu, cluster);
  93. if (ret)
  94. pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
  95. __func__, cpu, cluster, ret);
  96. return ret;
  97. }
  98. void mcpm_cpu_suspend(u64 expected_residency)
  99. {
  100. phys_reset_t phys_reset;
  101. if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
  102. return;
  103. BUG_ON(!irqs_disabled());
  104. /* Very similar to mcpm_cpu_power_down() */
  105. setup_mm_for_reboot();
  106. platform_ops->suspend(expected_residency);
  107. phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
  108. phys_reset(virt_to_phys(mcpm_entry_point));
  109. BUG();
  110. }
  111. int mcpm_cpu_powered_up(void)
  112. {
  113. if (!platform_ops)
  114. return -EUNATCH;
  115. if (platform_ops->powered_up)
  116. platform_ops->powered_up();
  117. return 0;
  118. }
  119. struct sync_struct mcpm_sync;
  120. /*
  121. * __mcpm_cpu_going_down: Indicates that the cpu is being torn down.
  122. * This must be called at the point of committing to teardown of a CPU.
  123. * The CPU cache (SCTRL.C bit) is expected to still be active.
  124. */
  125. void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster)
  126. {
  127. mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN;
  128. sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
  129. }
  130. /*
  131. * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the
  132. * cluster can be torn down without disrupting this CPU.
  133. * To avoid deadlocks, this must be called before a CPU is powered down.
  134. * The CPU cache (SCTRL.C bit) is expected to be off.
  135. * However L2 cache might or might not be active.
  136. */
  137. void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster)
  138. {
  139. dmb();
  140. mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN;
  141. sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu);
  142. dsb_sev();
  143. }
  144. /*
  145. * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
  146. * @state: the final state of the cluster:
  147. * CLUSTER_UP: no destructive teardown was done and the cluster has been
  148. * restored to the previous state (CPU cache still active); or
  149. * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
  150. * (CPU cache disabled, L2 cache either enabled or disabled).
  151. */
  152. void __mcpm_outbound_leave_critical(unsigned int cluster, int state)
  153. {
  154. dmb();
  155. mcpm_sync.clusters[cluster].cluster = state;
  156. sync_cache_w(&mcpm_sync.clusters[cluster].cluster);
  157. dsb_sev();
  158. }
  159. /*
  160. * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
  161. * This function should be called by the last man, after local CPU teardown
  162. * is complete. CPU cache expected to be active.
  163. *
  164. * Returns:
  165. * false: the critical section was not entered because an inbound CPU was
  166. * observed, or the cluster is already being set up;
  167. * true: the critical section was entered: it is now safe to tear down the
  168. * cluster.
  169. */
  170. bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster)
  171. {
  172. unsigned int i;
  173. struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster];
  174. /* Warn inbound CPUs that the cluster is being torn down: */
  175. c->cluster = CLUSTER_GOING_DOWN;
  176. sync_cache_w(&c->cluster);
  177. /* Back out if the inbound cluster is already in the critical region: */
  178. sync_cache_r(&c->inbound);
  179. if (c->inbound == INBOUND_COMING_UP)
  180. goto abort;
  181. /*
  182. * Wait for all CPUs to get out of the GOING_DOWN state, so that local
  183. * teardown is complete on each CPU before tearing down the cluster.
  184. *
  185. * If any CPU has been woken up again from the DOWN state, then we
  186. * shouldn't be taking the cluster down at all: abort in that case.
  187. */
  188. sync_cache_r(&c->cpus);
  189. for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) {
  190. int cpustate;
  191. if (i == cpu)
  192. continue;
  193. while (1) {
  194. cpustate = c->cpus[i].cpu;
  195. if (cpustate != CPU_GOING_DOWN)
  196. break;
  197. wfe();
  198. sync_cache_r(&c->cpus[i].cpu);
  199. }
  200. switch (cpustate) {
  201. case CPU_DOWN:
  202. continue;
  203. default:
  204. goto abort;
  205. }
  206. }
  207. return true;
  208. abort:
  209. __mcpm_outbound_leave_critical(cluster, CLUSTER_UP);
  210. return false;
  211. }
  212. int __mcpm_cluster_state(unsigned int cluster)
  213. {
  214. sync_cache_r(&mcpm_sync.clusters[cluster].cluster);
  215. return mcpm_sync.clusters[cluster].cluster;
  216. }
  217. extern unsigned long mcpm_power_up_setup_phys;
  218. int __init mcpm_sync_init(
  219. void (*power_up_setup)(unsigned int affinity_level))
  220. {
  221. unsigned int i, j, mpidr, this_cluster;
  222. BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync);
  223. BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1));
  224. /*
  225. * Set initial CPU and cluster states.
  226. * Only one cluster is assumed to be active at this point.
  227. */
  228. for (i = 0; i < MAX_NR_CLUSTERS; i++) {
  229. mcpm_sync.clusters[i].cluster = CLUSTER_DOWN;
  230. mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
  231. for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++)
  232. mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN;
  233. }
  234. mpidr = read_cpuid_mpidr();
  235. this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  236. for_each_online_cpu(i)
  237. mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP;
  238. mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP;
  239. sync_cache_w(&mcpm_sync);
  240. if (power_up_setup) {
  241. mcpm_power_up_setup_phys = virt_to_phys(power_up_setup);
  242. sync_cache_w(&mcpm_power_up_setup_phys);
  243. }
  244. return 0;
  245. }