mcpm.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * arch/arm/include/asm/mcpm.h
  3. *
  4. * Created by: Nicolas Pitre, April 2012
  5. * Copyright: (C) 2012-2013 Linaro Limited
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #ifndef MCPM_H
  12. #define MCPM_H
  13. /*
  14. * Maximum number of possible clusters / CPUs per cluster.
  15. *
  16. * This should be sufficient for quite a while, while keeping the
  17. * (assembly) code simpler. When this starts to grow then we'll have
  18. * to consider dynamic allocation.
  19. */
  20. #define MAX_CPUS_PER_CLUSTER 4
  21. #define MAX_NR_CLUSTERS 2
  22. #ifndef __ASSEMBLY__
  23. /*
  24. * Platform specific code should use this symbol to set up secondary
  25. * entry location for processors to use when released from reset.
  26. */
  27. extern void mcpm_entry_point(void);
  28. /*
  29. * This is used to indicate where the given CPU from given cluster should
  30. * branch once it is ready to re-enter the kernel using ptr, or NULL if it
  31. * should be gated. A gated CPU is held in a WFE loop until its vector
  32. * becomes non NULL.
  33. */
  34. void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
  35. /*
  36. * CPU/cluster power operations API for higher subsystems to use.
  37. */
  38. /**
  39. * mcpm_cpu_power_up - make given CPU in given cluster runable
  40. *
  41. * @cpu: CPU number within given cluster
  42. * @cluster: cluster number for the CPU
  43. *
  44. * The identified CPU is brought out of reset. If the cluster was powered
  45. * down then it is brought up as well, taking care not to let the other CPUs
  46. * in the cluster run, and ensuring appropriate cluster setup.
  47. *
  48. * Caller must ensure the appropriate entry vector is initialized with
  49. * mcpm_set_entry_vector() prior to calling this.
  50. *
  51. * This must be called in a sleepable context. However, the implementation
  52. * is strongly encouraged to return early and let the operation happen
  53. * asynchronously, especially when significant delays are expected.
  54. *
  55. * If the operation cannot be performed then an error code is returned.
  56. */
  57. int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
  58. /**
  59. * mcpm_cpu_power_down - power the calling CPU down
  60. *
  61. * The calling CPU is powered down.
  62. *
  63. * If this CPU is found to be the "last man standing" in the cluster
  64. * then the cluster is prepared for power-down too.
  65. *
  66. * This must be called with interrupts disabled.
  67. *
  68. * This does not return. Re-entry in the kernel is expected via
  69. * mcpm_entry_point.
  70. */
  71. void mcpm_cpu_power_down(void);
  72. /**
  73. * mcpm_cpu_suspend - bring the calling CPU in a suspended state
  74. *
  75. * @expected_residency: duration in microseconds the CPU is expected
  76. * to remain suspended, or 0 if unknown/infinity.
  77. *
  78. * The calling CPU is suspended. The expected residency argument is used
  79. * as a hint by the platform specific backend to implement the appropriate
  80. * sleep state level according to the knowledge it has on wake-up latency
  81. * for the given hardware.
  82. *
  83. * If this CPU is found to be the "last man standing" in the cluster
  84. * then the cluster may be prepared for power-down too, if the expected
  85. * residency makes it worthwhile.
  86. *
  87. * This must be called with interrupts disabled.
  88. *
  89. * This does not return. Re-entry in the kernel is expected via
  90. * mcpm_entry_point.
  91. */
  92. void mcpm_cpu_suspend(u64 expected_residency);
  93. /**
  94. * mcpm_cpu_powered_up - housekeeping workafter a CPU has been powered up
  95. *
  96. * This lets the platform specific backend code perform needed housekeeping
  97. * work. This must be called by the newly activated CPU as soon as it is
  98. * fully operational in kernel space, before it enables interrupts.
  99. *
  100. * If the operation cannot be performed then an error code is returned.
  101. */
  102. int mcpm_cpu_powered_up(void);
  103. /*
  104. * Platform specific methods used in the implementation of the above API.
  105. */
  106. struct mcpm_platform_ops {
  107. int (*power_up)(unsigned int cpu, unsigned int cluster);
  108. void (*power_down)(void);
  109. void (*suspend)(u64);
  110. void (*powered_up)(void);
  111. };
  112. /**
  113. * mcpm_platform_register - register platform specific power methods
  114. *
  115. * @ops: mcpm_platform_ops structure to register
  116. *
  117. * An error is returned if the registration has been done previously.
  118. */
  119. int __init mcpm_platform_register(const struct mcpm_platform_ops *ops);
  120. #endif /* ! __ASSEMBLY__ */
  121. #endif