hotplug.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. /* linux arch/arm/mach-exynos4/hotplug.c
  2. *
  3. * Cloned from linux/arch/arm/mach-realview/hotplug.c
  4. *
  5. * Copyright (C) 2002 ARM Ltd.
  6. * All Rights Reserved
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/smp.h>
  15. #include <linux/io.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/cp15.h>
  18. #include <asm/smp_plat.h>
  19. #include <mach/regs-pmu.h>
  20. #include <plat/cpu.h>
  21. #include "common.h"
  22. static inline void cpu_enter_lowpower_a9(void)
  23. {
  24. unsigned int v;
  25. asm volatile(
  26. " mcr p15, 0, %1, c7, c5, 0\n"
  27. " mcr p15, 0, %1, c7, c10, 4\n"
  28. /*
  29. * Turn off coherency
  30. */
  31. " mrc p15, 0, %0, c1, c0, 1\n"
  32. " bic %0, %0, %3\n"
  33. " mcr p15, 0, %0, c1, c0, 1\n"
  34. " mrc p15, 0, %0, c1, c0, 0\n"
  35. " bic %0, %0, %2\n"
  36. " mcr p15, 0, %0, c1, c0, 0\n"
  37. : "=&r" (v)
  38. : "r" (0), "Ir" (CR_C), "Ir" (0x40)
  39. : "cc");
  40. }
  41. static inline void cpu_enter_lowpower_a15(void)
  42. {
  43. unsigned int v;
  44. asm volatile(
  45. " mrc p15, 0, %0, c1, c0, 0\n"
  46. " bic %0, %0, %1\n"
  47. " mcr p15, 0, %0, c1, c0, 0\n"
  48. : "=&r" (v)
  49. : "Ir" (CR_C)
  50. : "cc");
  51. flush_cache_louis();
  52. asm volatile(
  53. /*
  54. * Turn off coherency
  55. */
  56. " mrc p15, 0, %0, c1, c0, 1\n"
  57. " bic %0, %0, %1\n"
  58. " mcr p15, 0, %0, c1, c0, 1\n"
  59. : "=&r" (v)
  60. : "Ir" (0x40)
  61. : "cc");
  62. isb();
  63. dsb();
  64. }
  65. static inline void cpu_leave_lowpower(void)
  66. {
  67. unsigned int v;
  68. asm volatile(
  69. "mrc p15, 0, %0, c1, c0, 0\n"
  70. " orr %0, %0, %1\n"
  71. " mcr p15, 0, %0, c1, c0, 0\n"
  72. " mrc p15, 0, %0, c1, c0, 1\n"
  73. " orr %0, %0, %2\n"
  74. " mcr p15, 0, %0, c1, c0, 1\n"
  75. : "=&r" (v)
  76. : "Ir" (CR_C), "Ir" (0x40)
  77. : "cc");
  78. }
  79. static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
  80. {
  81. for (;;) {
  82. /* make cpu1 to be turned off at next WFI command */
  83. if (cpu == 1)
  84. __raw_writel(0, S5P_ARM_CORE1_CONFIGURATION);
  85. /*
  86. * here's the WFI
  87. */
  88. asm(".word 0xe320f003\n"
  89. :
  90. :
  91. : "memory", "cc");
  92. if (pen_release == cpu_logical_map(cpu)) {
  93. /*
  94. * OK, proper wakeup, we're done
  95. */
  96. break;
  97. }
  98. /*
  99. * Getting here, means that we have come out of WFI without
  100. * having been woken up - this shouldn't happen
  101. *
  102. * Just note it happening - when we're woken, we can report
  103. * its occurrence.
  104. */
  105. (*spurious)++;
  106. }
  107. }
  108. /*
  109. * platform-specific code to shutdown a CPU
  110. *
  111. * Called with IRQs disabled
  112. */
  113. void __ref exynos_cpu_die(unsigned int cpu)
  114. {
  115. int spurious = 0;
  116. int primary_part = 0;
  117. /*
  118. * we're ready for shutdown now, so do it.
  119. * Exynos4 is A9 based while Exynos5 is A15; check the CPU part
  120. * number by reading the Main ID register and then perform the
  121. * appropriate sequence for entering low power.
  122. */
  123. asm("mrc p15, 0, %0, c0, c0, 0" : "=r"(primary_part) : : "cc");
  124. if ((primary_part & 0xfff0) == 0xc0f0)
  125. cpu_enter_lowpower_a15();
  126. else
  127. cpu_enter_lowpower_a9();
  128. platform_do_lowpower(cpu, &spurious);
  129. /*
  130. * bring this CPU back into the world of cache
  131. * coherency, and then restore interrupts
  132. */
  133. cpu_leave_lowpower();
  134. if (spurious)
  135. pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
  136. }