bL_switcher.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /*
  2. * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
  3. *
  4. * Created by: Nicolas Pitre, March 2012
  5. * Copyright: (C) 2012-2013 Linaro Limited
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/init.h>
  12. #include <linux/kernel.h>
  13. #include <linux/module.h>
  14. #include <linux/sched.h>
  15. #include <linux/interrupt.h>
  16. #include <linux/cpu_pm.h>
  17. #include <linux/cpumask.h>
  18. #include <linux/workqueue.h>
  19. #include <linux/clockchips.h>
  20. #include <linux/hrtimer.h>
  21. #include <linux/tick.h>
  22. #include <linux/mm.h>
  23. #include <linux/string.h>
  24. #include <linux/irqchip/arm-gic.h>
  25. #include <asm/smp_plat.h>
  26. #include <asm/suspend.h>
  27. #include <asm/mcpm.h>
  28. #include <asm/bL_switcher.h>
  29. /*
  30. * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
  31. * __attribute_const__ and we don't want the compiler to assume any
  32. * constness here as the value _does_ change along some code paths.
  33. */
  34. static int read_mpidr(void)
  35. {
  36. unsigned int id;
  37. asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
  38. return id & MPIDR_HWID_BITMASK;
  39. }
  40. /*
  41. * bL switcher core code.
  42. */
  43. static void bL_do_switch(void *_unused)
  44. {
  45. unsigned mpidr, cpuid, clusterid, ob_cluster, ib_cluster;
  46. /*
  47. * We now have a piece of stack borrowed from the init task's.
  48. * Let's also switch to init_mm right away to match it.
  49. */
  50. cpu_switch_mm(init_mm.pgd, &init_mm);
  51. pr_debug("%s\n", __func__);
  52. mpidr = read_mpidr();
  53. cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  54. clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  55. ob_cluster = clusterid;
  56. ib_cluster = clusterid ^ 1;
  57. /*
  58. * Our state has been saved at this point. Let's release our
  59. * inbound CPU.
  60. */
  61. mcpm_set_entry_vector(cpuid, ib_cluster, cpu_resume);
  62. sev();
  63. /*
  64. * From this point, we must assume that our counterpart CPU might
  65. * have taken over in its parallel world already, as if execution
  66. * just returned from cpu_suspend(). It is therefore important to
  67. * be very careful not to make any change the other guy is not
  68. * expecting. This is why we need stack isolation.
  69. *
  70. * Fancy under cover tasks could be performed here. For now
  71. * we have none.
  72. */
  73. /* Let's put ourself down. */
  74. mcpm_cpu_power_down();
  75. /* should never get here */
  76. BUG();
  77. }
  78. /*
  79. * Stack isolation. To ensure 'current' remains valid, we just borrow
  80. * a slice of the init/idle task which should be fairly lightly used.
  81. * The borrowed area starts just above the thread_info structure located
  82. * at the very bottom of the stack, aligned to a cache line.
  83. */
  84. #define STACK_SIZE 256
  85. extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
  86. static int bL_switchpoint(unsigned long _arg)
  87. {
  88. unsigned int mpidr = read_mpidr();
  89. unsigned int cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  90. unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  91. unsigned int cpu_index = cpuid + clusterid * MAX_CPUS_PER_CLUSTER;
  92. void *stack = &init_thread_info + 1;
  93. stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
  94. stack += cpu_index * STACK_SIZE + STACK_SIZE;
  95. call_with_stack(bL_do_switch, (void *)_arg, stack);
  96. BUG();
  97. }
  98. /*
  99. * Generic switcher interface
  100. */
  101. /*
  102. * bL_switch_to - Switch to a specific cluster for the current CPU
  103. * @new_cluster_id: the ID of the cluster to switch to.
  104. *
  105. * This function must be called on the CPU to be switched.
  106. * Returns 0 on success, else a negative status code.
  107. */
  108. static int bL_switch_to(unsigned int new_cluster_id)
  109. {
  110. unsigned int mpidr, cpuid, clusterid, ob_cluster, ib_cluster, this_cpu;
  111. struct tick_device *tdev;
  112. enum clock_event_mode tdev_mode;
  113. int ret;
  114. mpidr = read_mpidr();
  115. cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  116. clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  117. ob_cluster = clusterid;
  118. ib_cluster = clusterid ^ 1;
  119. if (new_cluster_id == clusterid)
  120. return 0;
  121. pr_debug("before switch: CPU %d in cluster %d\n", cpuid, clusterid);
  122. /* Close the gate for our entry vectors */
  123. mcpm_set_entry_vector(cpuid, ob_cluster, NULL);
  124. mcpm_set_entry_vector(cpuid, ib_cluster, NULL);
  125. /*
  126. * Let's wake up the inbound CPU now in case it requires some delay
  127. * to come online, but leave it gated in our entry vector code.
  128. */
  129. ret = mcpm_cpu_power_up(cpuid, ib_cluster);
  130. if (ret) {
  131. pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
  132. return ret;
  133. }
  134. /*
  135. * From this point we are entering the switch critical zone
  136. * and can't take any interrupts anymore.
  137. */
  138. local_irq_disable();
  139. local_fiq_disable();
  140. this_cpu = smp_processor_id();
  141. /* redirect GIC's SGIs to our counterpart */
  142. gic_migrate_target(cpuid + ib_cluster*4);
  143. /*
  144. * Raise a SGI on the inbound CPU to make sure it doesn't stall
  145. * in a possible WFI, such as in mcpm_power_down().
  146. */
  147. arch_send_wakeup_ipi_mask(cpumask_of(this_cpu));
  148. tdev = tick_get_device(this_cpu);
  149. if (tdev && !cpumask_equal(tdev->evtdev->cpumask, cpumask_of(this_cpu)))
  150. tdev = NULL;
  151. if (tdev) {
  152. tdev_mode = tdev->evtdev->mode;
  153. clockevents_set_mode(tdev->evtdev, CLOCK_EVT_MODE_SHUTDOWN);
  154. }
  155. ret = cpu_pm_enter();
  156. /* we can not tolerate errors at this point */
  157. if (ret)
  158. panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
  159. /* Flip the cluster in the CPU logical map for this CPU. */
  160. cpu_logical_map(this_cpu) ^= (1 << 8);
  161. /* Let's do the actual CPU switch. */
  162. ret = cpu_suspend(0, bL_switchpoint);
  163. if (ret > 0)
  164. panic("%s: cpu_suspend() returned %d\n", __func__, ret);
  165. /* We are executing on the inbound CPU at this point */
  166. mpidr = read_mpidr();
  167. cpuid = MPIDR_AFFINITY_LEVEL(mpidr, 0);
  168. clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
  169. pr_debug("after switch: CPU %d in cluster %d\n", cpuid, clusterid);
  170. BUG_ON(clusterid != ib_cluster);
  171. mcpm_cpu_powered_up();
  172. ret = cpu_pm_exit();
  173. if (tdev) {
  174. clockevents_set_mode(tdev->evtdev, tdev_mode);
  175. clockevents_program_event(tdev->evtdev,
  176. tdev->evtdev->next_event, 1);
  177. }
  178. local_fiq_enable();
  179. local_irq_enable();
  180. if (ret)
  181. pr_err("%s exiting with error %d\n", __func__, ret);
  182. return ret;
  183. }
  184. struct switch_args {
  185. unsigned int cluster;
  186. struct work_struct work;
  187. };
  188. static void __bL_switch_to(struct work_struct *work)
  189. {
  190. struct switch_args *args = container_of(work, struct switch_args, work);
  191. bL_switch_to(args->cluster);
  192. }
  193. /*
  194. * bL_switch_request - Switch to a specific cluster for the given CPU
  195. *
  196. * @cpu: the CPU to switch
  197. * @new_cluster_id: the ID of the cluster to switch to.
  198. *
  199. * This function causes a cluster switch on the given CPU. If the given
  200. * CPU is the same as the calling CPU then the switch happens right away.
  201. * Otherwise the request is put on a work queue to be scheduled on the
  202. * remote CPU.
  203. */
  204. void bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
  205. {
  206. unsigned int this_cpu = get_cpu();
  207. struct switch_args args;
  208. if (cpu == this_cpu) {
  209. bL_switch_to(new_cluster_id);
  210. put_cpu();
  211. return;
  212. }
  213. put_cpu();
  214. args.cluster = new_cluster_id;
  215. INIT_WORK_ONSTACK(&args.work, __bL_switch_to);
  216. schedule_work_on(cpu, &args.work);
  217. flush_work(&args.work);
  218. }
  219. EXPORT_SYMBOL_GPL(bL_switch_request);