hotplug.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. /*
  2. * linux/arch/arm/mach-realview/hotplug.c
  3. *
  4. * Copyright (C) 2002 ARM Ltd.
  5. * All Rights Reserved
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/kernel.h>
  12. #include <linux/errno.h>
  13. #include <linux/smp.h>
  14. #include <asm/cacheflush.h>
  15. static inline void cpu_enter_lowpower(void)
  16. {
  17. unsigned int v;
  18. flush_cache_all();
  19. asm volatile(
  20. " mcr p15, 0, %1, c7, c5, 0\n"
  21. " mcr p15, 0, %1, c7, c10, 4\n"
  22. /*
  23. * Turn off coherency
  24. */
  25. " mrc p15, 0, %0, c1, c0, 1\n"
  26. " bic %0, %0, #0x20\n"
  27. " mcr p15, 0, %0, c1, c0, 1\n"
  28. " mrc p15, 0, %0, c1, c0, 0\n"
  29. " bic %0, %0, #0x04\n"
  30. " mcr p15, 0, %0, c1, c0, 0\n"
  31. : "=&r" (v)
  32. : "r" (0)
  33. : "cc");
  34. }
  35. static inline void cpu_leave_lowpower(void)
  36. {
  37. unsigned int v;
  38. asm volatile(
  39. "mrc p15, 0, %0, c1, c0, 0\n"
  40. " orr %0, %0, #0x04\n"
  41. " mcr p15, 0, %0, c1, c0, 0\n"
  42. " mrc p15, 0, %0, c1, c0, 1\n"
  43. " orr %0, %0, #0x20\n"
  44. " mcr p15, 0, %0, c1, c0, 1\n"
  45. : "=&r" (v)
  46. :
  47. : "cc");
  48. }
  49. static inline void platform_do_lowpower(unsigned int cpu)
  50. {
  51. /*
  52. * there is no power-control hardware on this platform, so all
  53. * we can do is put the core into WFI; this is safe as the calling
  54. * code will have already disabled interrupts
  55. */
  56. for (;;) {
  57. /*
  58. * here's the WFI
  59. */
  60. asm(".word 0xe320f003\n"
  61. :
  62. :
  63. : "memory", "cc");
  64. /*if (pen_release == cpu) {*/
  65. /*
  66. * OK, proper wakeup, we're done
  67. */
  68. break;
  69. /*}*/
  70. /*
  71. * getting here, means that we have come out of WFI without
  72. * having been woken up - this shouldn't happen
  73. *
  74. * The trouble is, letting people know about this is not really
  75. * possible, since we are currently running incoherently, and
  76. * therefore cannot safely call printk() or anything else
  77. */
  78. #ifdef DEBUG
  79. printk(KERN_WARN "CPU%u: spurious wakeup call\n", cpu);
  80. #endif
  81. }
  82. }
  83. int platform_cpu_kill(unsigned int cpu)
  84. {
  85. return 1;
  86. }
  87. /*
  88. * platform-specific code to shutdown a CPU
  89. *
  90. * Called with IRQs disabled
  91. */
  92. void platform_cpu_die(unsigned int cpu)
  93. {
  94. #ifdef DEBUG
  95. unsigned int this_cpu = hard_smp_processor_id();
  96. if (cpu != this_cpu) {
  97. printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n",
  98. this_cpu, cpu);
  99. BUG();
  100. }
  101. #endif
  102. /*
  103. * we're ready for shutdown now, so do it
  104. */
  105. cpu_enter_lowpower();
  106. platform_do_lowpower(cpu);
  107. /*
  108. * bring this CPU back into the world of cache
  109. * coherency, and then restore interrupts
  110. */
  111. cpu_leave_lowpower();
  112. }
  113. int platform_cpu_disable(unsigned int cpu)
  114. {
  115. /*
  116. * we don't allow CPU 0 to be shutdown (it is still too special
  117. * e.g. clock tick interrupts)
  118. */
  119. return cpu == 0 ? -EPERM : 0;
  120. }