hotplug.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /* linux arch/arm/mach-exynos4/hotplug.c
  2. *
  3. * Cloned from linux/arch/arm/mach-realview/hotplug.c
  4. *
  5. * Copyright (C) 2002 ARM Ltd.
  6. * All Rights Reserved
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/smp.h>
  15. #include <linux/io.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/cp15.h>
  18. #include <asm/smp_plat.h>
  19. #include <mach/regs-pmu.h>
  20. #include <plat/cpu.h>
  21. #include "common.h"
  22. static inline void cpu_enter_lowpower_a9(void)
  23. {
  24. unsigned int v;
  25. flush_cache_all();
  26. asm volatile(
  27. " mcr p15, 0, %1, c7, c5, 0\n"
  28. " mcr p15, 0, %1, c7, c10, 4\n"
  29. /*
  30. * Turn off coherency
  31. */
  32. " mrc p15, 0, %0, c1, c0, 1\n"
  33. " bic %0, %0, %3\n"
  34. " mcr p15, 0, %0, c1, c0, 1\n"
  35. " mrc p15, 0, %0, c1, c0, 0\n"
  36. " bic %0, %0, %2\n"
  37. " mcr p15, 0, %0, c1, c0, 0\n"
  38. : "=&r" (v)
  39. : "r" (0), "Ir" (CR_C), "Ir" (0x40)
  40. : "cc");
  41. }
  42. static inline void cpu_enter_lowpower_a15(void)
  43. {
  44. unsigned int v;
  45. asm volatile(
  46. " mrc p15, 0, %0, c1, c0, 0\n"
  47. " bic %0, %0, %1\n"
  48. " mcr p15, 0, %0, c1, c0, 0\n"
  49. : "=&r" (v)
  50. : "Ir" (CR_C)
  51. : "cc");
  52. flush_cache_louis();
  53. asm volatile(
  54. /*
  55. * Turn off coherency
  56. */
  57. " mrc p15, 0, %0, c1, c0, 1\n"
  58. " bic %0, %0, %1\n"
  59. " mcr p15, 0, %0, c1, c0, 1\n"
  60. : "=&r" (v)
  61. : "Ir" (0x40)
  62. : "cc");
  63. isb();
  64. dsb();
  65. }
  66. static inline void cpu_leave_lowpower(void)
  67. {
  68. unsigned int v;
  69. asm volatile(
  70. "mrc p15, 0, %0, c1, c0, 0\n"
  71. " orr %0, %0, %1\n"
  72. " mcr p15, 0, %0, c1, c0, 0\n"
  73. " mrc p15, 0, %0, c1, c0, 1\n"
  74. " orr %0, %0, %2\n"
  75. " mcr p15, 0, %0, c1, c0, 1\n"
  76. : "=&r" (v)
  77. : "Ir" (CR_C), "Ir" (0x40)
  78. : "cc");
  79. }
  80. static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
  81. {
  82. for (;;) {
  83. /* make cpu1 to be turned off at next WFI command */
  84. if (cpu == 1)
  85. __raw_writel(0, S5P_ARM_CORE1_CONFIGURATION);
  86. /*
  87. * here's the WFI
  88. */
  89. asm(".word 0xe320f003\n"
  90. :
  91. :
  92. : "memory", "cc");
  93. if (pen_release == cpu_logical_map(cpu)) {
  94. /*
  95. * OK, proper wakeup, we're done
  96. */
  97. break;
  98. }
  99. /*
  100. * Getting here, means that we have come out of WFI without
  101. * having been woken up - this shouldn't happen
  102. *
  103. * Just note it happening - when we're woken, we can report
  104. * its occurrence.
  105. */
  106. (*spurious)++;
  107. }
  108. }
  109. /*
  110. * platform-specific code to shutdown a CPU
  111. *
  112. * Called with IRQs disabled
  113. */
  114. void __ref exynos_cpu_die(unsigned int cpu)
  115. {
  116. int spurious = 0;
  117. int primary_part = 0;
  118. /*
  119. * we're ready for shutdown now, so do it.
  120. * Exynos4 is A9 based while Exynos5 is A15; check the CPU part
  121. * number by reading the Main ID register and then perform the
  122. * appropriate sequence for entering low power.
  123. */
  124. asm("mrc p15, 0, %0, c0, c0, 0" : "=r"(primary_part) : : "cc");
  125. if ((primary_part & 0xfff0) == 0xc0f0)
  126. cpu_enter_lowpower_a15();
  127. else
  128. cpu_enter_lowpower_a9();
  129. platform_do_lowpower(cpu, &spurious);
  130. /*
  131. * bring this CPU back into the world of cache
  132. * coherency, and then restore interrupts
  133. */
  134. cpu_leave_lowpower();
  135. if (spurious)
  136. pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
  137. }