mcpm_entry.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /*
  2. * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
  3. *
  4. * Created by: Nicolas Pitre, March 2012
  5. * Copyright: (C) 2012-2013 Linaro Limited
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/init.h>
  13. #include <linux/irqflags.h>
  14. #include <asm/mcpm.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/idmap.h>
  17. extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER];
  18. void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr)
  19. {
  20. unsigned long val = ptr ? virt_to_phys(ptr) : 0;
  21. mcpm_entry_vectors[cluster][cpu] = val;
  22. sync_cache_w(&mcpm_entry_vectors[cluster][cpu]);
  23. }
  24. static const struct mcpm_platform_ops *platform_ops;
  25. int __init mcpm_platform_register(const struct mcpm_platform_ops *ops)
  26. {
  27. if (platform_ops)
  28. return -EBUSY;
  29. platform_ops = ops;
  30. return 0;
  31. }
  32. int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster)
  33. {
  34. if (!platform_ops)
  35. return -EUNATCH; /* try not to shadow power_up errors */
  36. might_sleep();
  37. return platform_ops->power_up(cpu, cluster);
  38. }
  39. typedef void (*phys_reset_t)(unsigned long);
  40. void mcpm_cpu_power_down(void)
  41. {
  42. phys_reset_t phys_reset;
  43. BUG_ON(!platform_ops);
  44. BUG_ON(!irqs_disabled());
  45. /*
  46. * Do this before calling into the power_down method,
  47. * as it might not always be safe to do afterwards.
  48. */
  49. setup_mm_for_reboot();
  50. platform_ops->power_down();
  51. /*
  52. * It is possible for a power_up request to happen concurrently
  53. * with a power_down request for the same CPU. In this case the
  54. * power_down method might not be able to actually enter a
  55. * powered down state with the WFI instruction if the power_up
  56. * method has removed the required reset condition. The
  57. * power_down method is then allowed to return. We must perform
  58. * a re-entry in the kernel as if the power_up method just had
  59. * deasserted reset on the CPU.
  60. *
  61. * To simplify race issues, the platform specific implementation
  62. * must accommodate for the possibility of unordered calls to
  63. * power_down and power_up with a usage count. Therefore, if a
  64. * call to power_up is issued for a CPU that is not down, then
  65. * the next call to power_down must not attempt a full shutdown
  66. * but only do the minimum (normally disabling L1 cache and CPU
  67. * coherency) and return just as if a concurrent power_up request
  68. * had happened as described above.
  69. */
  70. phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
  71. phys_reset(virt_to_phys(mcpm_entry_point));
  72. /* should never get here */
  73. BUG();
  74. }
  75. void mcpm_cpu_suspend(u64 expected_residency)
  76. {
  77. phys_reset_t phys_reset;
  78. BUG_ON(!platform_ops);
  79. BUG_ON(!irqs_disabled());
  80. /* Very similar to mcpm_cpu_power_down() */
  81. setup_mm_for_reboot();
  82. platform_ops->suspend(expected_residency);
  83. phys_reset = (phys_reset_t)(unsigned long)virt_to_phys(cpu_reset);
  84. phys_reset(virt_to_phys(mcpm_entry_point));
  85. BUG();
  86. }
  87. int mcpm_cpu_powered_up(void)
  88. {
  89. if (!platform_ops)
  90. return -EUNATCH;
  91. if (platform_ops->powered_up)
  92. platform_ops->powered_up();
  93. return 0;
  94. }