hotplug.c 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. /* linux arch/arm/mach-s5pv310/hotplug.c
  2. *
  3. * Cloned from linux/arch/arm/mach-realview/hotplug.c
  4. *
  5. * Copyright (C) 2002 ARM Ltd.
  6. * All Rights Reserved
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/errno.h>
  14. #include <linux/smp.h>
  15. #include <linux/completion.h>
  16. #include <asm/cacheflush.h>
  17. extern volatile int pen_release;
  18. static DECLARE_COMPLETION(cpu_killed);
  19. static inline void cpu_enter_lowpower(void)
  20. {
  21. unsigned int v;
  22. flush_cache_all();
  23. asm volatile(
  24. " mcr p15, 0, %1, c7, c5, 0\n"
  25. " mcr p15, 0, %1, c7, c10, 4\n"
  26. /*
  27. * Turn off coherency
  28. */
  29. " mrc p15, 0, %0, c1, c0, 1\n"
  30. " bic %0, %0, #0x20\n"
  31. " mcr p15, 0, %0, c1, c0, 1\n"
  32. " mrc p15, 0, %0, c1, c0, 0\n"
  33. " bic %0, %0, #0x04\n"
  34. " mcr p15, 0, %0, c1, c0, 0\n"
  35. : "=&r" (v)
  36. : "r" (0)
  37. : "cc");
  38. }
  39. static inline void cpu_leave_lowpower(void)
  40. {
  41. unsigned int v;
  42. asm volatile(
  43. "mrc p15, 0, %0, c1, c0, 0\n"
  44. " orr %0, %0, #0x04\n"
  45. " mcr p15, 0, %0, c1, c0, 0\n"
  46. " mrc p15, 0, %0, c1, c0, 1\n"
  47. " orr %0, %0, #0x20\n"
  48. " mcr p15, 0, %0, c1, c0, 1\n"
  49. : "=&r" (v)
  50. :
  51. : "cc");
  52. }
  53. static inline void platform_do_lowpower(unsigned int cpu)
  54. {
  55. /*
  56. * there is no power-control hardware on this platform, so all
  57. * we can do is put the core into WFI; this is safe as the calling
  58. * code will have already disabled interrupts
  59. */
  60. for (;;) {
  61. /*
  62. * here's the WFI
  63. */
  64. asm(".word 0xe320f003\n"
  65. :
  66. :
  67. : "memory", "cc");
  68. if (pen_release == cpu) {
  69. /*
  70. * OK, proper wakeup, we're done
  71. */
  72. break;
  73. }
  74. /*
  75. * getting here, means that we have come out of WFI without
  76. * having been woken up - this shouldn't happen
  77. *
  78. * The trouble is, letting people know about this is not really
  79. * possible, since we are currently running incoherently, and
  80. * therefore cannot safely call printk() or anything else
  81. */
  82. #ifdef DEBUG
  83. printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
  84. #endif
  85. }
  86. }
  87. int platform_cpu_kill(unsigned int cpu)
  88. {
  89. return wait_for_completion_timeout(&cpu_killed, 5000);
  90. }
  91. /*
  92. * platform-specific code to shutdown a CPU
  93. *
  94. * Called with IRQs disabled
  95. */
  96. void platform_cpu_die(unsigned int cpu)
  97. {
  98. #ifdef DEBUG
  99. unsigned int this_cpu = hard_smp_processor_id();
  100. if (cpu != this_cpu) {
  101. printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
  102. this_cpu, cpu);
  103. BUG();
  104. }
  105. #endif
  106. printk(KERN_NOTICE "CPU%u: shutdown\n", cpu);
  107. complete(&cpu_killed);
  108. /*
  109. * we're ready for shutdown now, so do it
  110. */
  111. cpu_enter_lowpower();
  112. platform_do_lowpower(cpu);
  113. /*
  114. * bring this CPU back into the world of cache
  115. * coherency, and then restore interrupts
  116. */
  117. cpu_leave_lowpower();
  118. }
  119. int platform_cpu_disable(unsigned int cpu)
  120. {
  121. /*
  122. * we don't allow CPU 0 to be shutdown (it is still too special
  123. * e.g. clock tick interrupts)
  124. */
  125. return cpu == 0 ? -EPERM : 0;
  126. }