cpu.c 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. /* CPU control.
  2. * (C) 2001, 2002, 2003, 2004 Rusty Russell
  3. *
  4. * This code is licenced under the GPL.
  5. */
  6. #include <linux/proc_fs.h>
  7. #include <linux/smp.h>
  8. #include <linux/init.h>
  9. #include <linux/notifier.h>
  10. #include <linux/sched.h>
  11. #include <linux/unistd.h>
  12. #include <linux/cpu.h>
  13. #include <linux/module.h>
  14. #include <linux/kthread.h>
  15. #include <linux/stop_machine.h>
  16. #include <asm/semaphore.h>
  17. /* This protects CPUs going up and down... */
  18. DECLARE_MUTEX(cpucontrol);
  19. EXPORT_SYMBOL_GPL(cpucontrol);
  20. static struct notifier_block *cpu_chain;
  21. /* Need to know about CPUs going up/down? */
  22. int register_cpu_notifier(struct notifier_block *nb)
  23. {
  24. int ret;
  25. if ((ret = down_interruptible(&cpucontrol)) != 0)
  26. return ret;
  27. ret = notifier_chain_register(&cpu_chain, nb);
  28. up(&cpucontrol);
  29. return ret;
  30. }
  31. EXPORT_SYMBOL(register_cpu_notifier);
  32. void unregister_cpu_notifier(struct notifier_block *nb)
  33. {
  34. down(&cpucontrol);
  35. notifier_chain_unregister(&cpu_chain, nb);
  36. up(&cpucontrol);
  37. }
  38. EXPORT_SYMBOL(unregister_cpu_notifier);
  39. #ifdef CONFIG_HOTPLUG_CPU
  40. static inline void check_for_tasks(int cpu)
  41. {
  42. struct task_struct *p;
  43. write_lock_irq(&tasklist_lock);
  44. for_each_process(p) {
  45. if (task_cpu(p) == cpu &&
  46. (!cputime_eq(p->utime, cputime_zero) ||
  47. !cputime_eq(p->stime, cputime_zero)))
  48. printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\
  49. (state = %ld, flags = %lx) \n",
  50. p->comm, p->pid, cpu, p->state, p->flags);
  51. }
  52. write_unlock_irq(&tasklist_lock);
  53. }
  54. /* Take this CPU down. */
  55. static int take_cpu_down(void *unused)
  56. {
  57. int err;
  58. /* Ensure this CPU doesn't handle any more interrupts. */
  59. err = __cpu_disable();
  60. if (err < 0)
  61. return err;
  62. /* Force idle task to run as soon as we yield: it should
  63. immediately notice cpu is offline and die quickly. */
  64. sched_idle_next();
  65. return 0;
  66. }
  67. int cpu_down(unsigned int cpu)
  68. {
  69. int err;
  70. struct task_struct *p;
  71. cpumask_t old_allowed, tmp;
  72. if ((err = lock_cpu_hotplug_interruptible()) != 0)
  73. return err;
  74. if (num_online_cpus() == 1) {
  75. err = -EBUSY;
  76. goto out;
  77. }
  78. if (!cpu_online(cpu)) {
  79. err = -EINVAL;
  80. goto out;
  81. }
  82. err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
  83. (void *)(long)cpu);
  84. if (err == NOTIFY_BAD) {
  85. printk("%s: attempt to take down CPU %u failed\n",
  86. __FUNCTION__, cpu);
  87. err = -EINVAL;
  88. goto out;
  89. }
  90. /* Ensure that we are not runnable on dying cpu */
  91. old_allowed = current->cpus_allowed;
  92. tmp = CPU_MASK_ALL;
  93. cpu_clear(cpu, tmp);
  94. set_cpus_allowed(current, tmp);
  95. p = __stop_machine_run(take_cpu_down, NULL, cpu);
  96. if (IS_ERR(p)) {
  97. /* CPU didn't die: tell everyone. Can't complain. */
  98. if (notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED,
  99. (void *)(long)cpu) == NOTIFY_BAD)
  100. BUG();
  101. err = PTR_ERR(p);
  102. goto out_allowed;
  103. }
  104. if (cpu_online(cpu))
  105. goto out_thread;
  106. /* Wait for it to sleep (leaving idle task). */
  107. while (!idle_cpu(cpu))
  108. yield();
  109. /* This actually kills the CPU. */
  110. __cpu_die(cpu);
  111. /* Move it here so it can run. */
  112. kthread_bind(p, get_cpu());
  113. put_cpu();
  114. /* CPU is completely dead: tell everyone. Too late to complain. */
  115. if (notifier_call_chain(&cpu_chain, CPU_DEAD, (void *)(long)cpu)
  116. == NOTIFY_BAD)
  117. BUG();
  118. check_for_tasks(cpu);
  119. out_thread:
  120. err = kthread_stop(p);
  121. out_allowed:
  122. set_cpus_allowed(current, old_allowed);
  123. out:
  124. unlock_cpu_hotplug();
  125. return err;
  126. }
  127. #endif /*CONFIG_HOTPLUG_CPU*/
  128. int __devinit cpu_up(unsigned int cpu)
  129. {
  130. int ret;
  131. void *hcpu = (void *)(long)cpu;
  132. if ((ret = down_interruptible(&cpucontrol)) != 0)
  133. return ret;
  134. if (cpu_online(cpu) || !cpu_present(cpu)) {
  135. ret = -EINVAL;
  136. goto out;
  137. }
  138. ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
  139. if (ret == NOTIFY_BAD) {
  140. printk("%s: attempt to bring up CPU %u failed\n",
  141. __FUNCTION__, cpu);
  142. ret = -EINVAL;
  143. goto out_notify;
  144. }
  145. /* Arch-specific enabling code. */
  146. ret = __cpu_up(cpu);
  147. if (ret != 0)
  148. goto out_notify;
  149. if (!cpu_online(cpu))
  150. BUG();
  151. /* Now call notifier in preparation. */
  152. notifier_call_chain(&cpu_chain, CPU_ONLINE, hcpu);
  153. out_notify:
  154. if (ret != 0)
  155. notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
  156. out:
  157. up(&cpucontrol);
  158. return ret;
  159. }