hotplug-cpu.c 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291
  1. /*
  2. * pseries CPU Hotplug infrastructure.
  3. *
  4. * Split out from arch/powerpc/platforms/pseries/setup.c
  5. * arch/powerpc/kernel/rtas.c, and arch/powerpc/platforms/pseries/smp.c
  6. *
  7. * Peter Bergner, IBM March 2001.
  8. * Copyright (C) 2001 IBM.
  9. * Dave Engebretsen, Peter Bergner, and
  10. * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
  11. * Plus various changes from other IBM teams...
  12. *
  13. * Copyright (C) 2006 Michael Ellerman, IBM Corporation
  14. *
  15. * This program is free software; you can redistribute it and/or
  16. * modify it under the terms of the GNU General Public License
  17. * as published by the Free Software Foundation; either version
  18. * 2 of the License, or (at your option) any later version.
  19. */
  20. #include <linux/kernel.h>
  21. #include <linux/delay.h>
  22. #include <linux/cpu.h>
  23. #include <asm/system.h>
  24. #include <asm/prom.h>
  25. #include <asm/rtas.h>
  26. #include <asm/firmware.h>
  27. #include <asm/machdep.h>
  28. #include <asm/vdso_datapage.h>
  29. #include <asm/pSeries_reconfig.h>
  30. #include "xics.h"
  31. #include "plpar_wrappers.h"
  32. /* This version can't take the spinlock, because it never returns */
  33. static struct rtas_args rtas_stop_self_args = {
  34. .token = RTAS_UNKNOWN_SERVICE,
  35. .nargs = 0,
  36. .nret = 1,
  37. .rets = &rtas_stop_self_args.args[0],
  38. };
  39. static void rtas_stop_self(void)
  40. {
  41. struct rtas_args *args = &rtas_stop_self_args;
  42. local_irq_disable();
  43. BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
  44. printk("cpu %u (hwid %u) Ready to die...\n",
  45. smp_processor_id(), hard_smp_processor_id());
  46. enter_rtas(__pa(args));
  47. panic("Alas, I survived.\n");
  48. }
  49. static void pseries_mach_cpu_die(void)
  50. {
  51. local_irq_disable();
  52. idle_task_exit();
  53. xics_teardown_cpu();
  54. unregister_slb_shadow(hard_smp_processor_id(), __pa(get_slb_shadow()));
  55. rtas_stop_self();
  56. /* Should never get here... */
  57. BUG();
  58. for(;;);
  59. }
  60. static int qcss_tok; /* query-cpu-stopped-state token */
  61. /* Get state of physical CPU.
  62. * Return codes:
  63. * 0 - The processor is in the RTAS stopped state
  64. * 1 - stop-self is in progress
  65. * 2 - The processor is not in the RTAS stopped state
  66. * -1 - Hardware Error
  67. * -2 - Hardware Busy, Try again later.
  68. */
  69. static int query_cpu_stopped(unsigned int pcpu)
  70. {
  71. int cpu_status, status;
  72. status = rtas_call(qcss_tok, 1, 2, &cpu_status, pcpu);
  73. if (status != 0) {
  74. printk(KERN_ERR
  75. "RTAS query-cpu-stopped-state failed: %i\n", status);
  76. return status;
  77. }
  78. return cpu_status;
  79. }
  80. static int pseries_cpu_disable(void)
  81. {
  82. int cpu = smp_processor_id();
  83. cpu_clear(cpu, cpu_online_map);
  84. vdso_data->processorCount--;
  85. /*fix boot_cpuid here*/
  86. if (cpu == boot_cpuid)
  87. boot_cpuid = any_online_cpu(cpu_online_map);
  88. /* FIXME: abstract this to not be platform specific later on */
  89. xics_migrate_irqs_away();
  90. return 0;
  91. }
  92. static void pseries_cpu_die(unsigned int cpu)
  93. {
  94. int tries;
  95. int cpu_status;
  96. unsigned int pcpu = get_hard_smp_processor_id(cpu);
  97. for (tries = 0; tries < 25; tries++) {
  98. cpu_status = query_cpu_stopped(pcpu);
  99. if (cpu_status == 0 || cpu_status == -1)
  100. break;
  101. cpu_relax();
  102. }
  103. if (cpu_status != 0) {
  104. printk("Querying DEAD? cpu %i (%i) shows %i\n",
  105. cpu, pcpu, cpu_status);
  106. }
  107. /* Isolation and deallocation are definatly done by
  108. * drslot_chrp_cpu. If they were not they would be
  109. * done here. Change isolate state to Isolate and
  110. * change allocation-state to Unusable.
  111. */
  112. paca[cpu].cpu_start = 0;
  113. }
  114. /*
  115. * Update cpu_present_map and paca(s) for a new cpu node. The wrinkle
  116. * here is that a cpu device node may represent up to two logical cpus
  117. * in the SMT case. We must honor the assumption in other code that
  118. * the logical ids for sibling SMT threads x and y are adjacent, such
  119. * that x^1 == y and y^1 == x.
  120. */
  121. static int pseries_add_processor(struct device_node *np)
  122. {
  123. unsigned int cpu;
  124. cpumask_t candidate_map, tmp = CPU_MASK_NONE;
  125. int err = -ENOSPC, len, nthreads, i;
  126. const u32 *intserv;
  127. intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
  128. if (!intserv)
  129. return 0;
  130. nthreads = len / sizeof(u32);
  131. for (i = 0; i < nthreads; i++)
  132. cpu_set(i, tmp);
  133. cpu_maps_update_begin();
  134. BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map));
  135. /* Get a bitmap of unoccupied slots. */
  136. cpus_xor(candidate_map, cpu_possible_map, cpu_present_map);
  137. if (cpus_empty(candidate_map)) {
  138. /* If we get here, it most likely means that NR_CPUS is
  139. * less than the partition's max processors setting.
  140. */
  141. printk(KERN_ERR "Cannot add cpu %s; this system configuration"
  142. " supports %d logical cpus.\n", np->full_name,
  143. cpus_weight(cpu_possible_map));
  144. goto out_unlock;
  145. }
  146. while (!cpus_empty(tmp))
  147. if (cpus_subset(tmp, candidate_map))
  148. /* Found a range where we can insert the new cpu(s) */
  149. break;
  150. else
  151. cpus_shift_left(tmp, tmp, nthreads);
  152. if (cpus_empty(tmp)) {
  153. printk(KERN_ERR "Unable to find space in cpu_present_map for"
  154. " processor %s with %d thread(s)\n", np->name,
  155. nthreads);
  156. goto out_unlock;
  157. }
  158. for_each_cpu_mask(cpu, tmp) {
  159. BUG_ON(cpu_isset(cpu, cpu_present_map));
  160. cpu_set(cpu, cpu_present_map);
  161. set_hard_smp_processor_id(cpu, *intserv++);
  162. }
  163. err = 0;
  164. out_unlock:
  165. cpu_maps_update_done();
  166. return err;
  167. }
  168. /*
  169. * Update the present map for a cpu node which is going away, and set
  170. * the hard id in the paca(s) to -1 to be consistent with boot time
  171. * convention for non-present cpus.
  172. */
  173. static void pseries_remove_processor(struct device_node *np)
  174. {
  175. unsigned int cpu;
  176. int len, nthreads, i;
  177. const u32 *intserv;
  178. intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
  179. if (!intserv)
  180. return;
  181. nthreads = len / sizeof(u32);
  182. cpu_maps_update_begin();
  183. for (i = 0; i < nthreads; i++) {
  184. for_each_present_cpu(cpu) {
  185. if (get_hard_smp_processor_id(cpu) != intserv[i])
  186. continue;
  187. BUG_ON(cpu_online(cpu));
  188. cpu_clear(cpu, cpu_present_map);
  189. set_hard_smp_processor_id(cpu, -1);
  190. break;
  191. }
  192. if (cpu == NR_CPUS)
  193. printk(KERN_WARNING "Could not find cpu to remove "
  194. "with physical id 0x%x\n", intserv[i]);
  195. }
  196. cpu_maps_update_done();
  197. }
  198. static int pseries_smp_notifier(struct notifier_block *nb,
  199. unsigned long action, void *node)
  200. {
  201. int err = NOTIFY_OK;
  202. switch (action) {
  203. case PSERIES_RECONFIG_ADD:
  204. if (pseries_add_processor(node))
  205. err = NOTIFY_BAD;
  206. break;
  207. case PSERIES_RECONFIG_REMOVE:
  208. pseries_remove_processor(node);
  209. break;
  210. default:
  211. err = NOTIFY_DONE;
  212. break;
  213. }
  214. return err;
  215. }
  216. static struct notifier_block pseries_smp_nb = {
  217. .notifier_call = pseries_smp_notifier,
  218. };
  219. static int __init pseries_cpu_hotplug_init(void)
  220. {
  221. struct device_node *np;
  222. const char *typep;
  223. for_each_node_by_name(np, "interrupt-controller") {
  224. typep = of_get_property(np, "compatible", NULL);
  225. if (strstr(typep, "open-pic")) {
  226. of_node_put(np);
  227. printk(KERN_INFO "CPU Hotplug not supported on "
  228. "systems using MPIC\n");
  229. return 0;
  230. }
  231. }
  232. rtas_stop_self_args.token = rtas_token("stop-self");
  233. qcss_tok = rtas_token("query-cpu-stopped-state");
  234. if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
  235. qcss_tok == RTAS_UNKNOWN_SERVICE) {
  236. printk(KERN_INFO "CPU Hotplug not supported by firmware "
  237. "- disabling.\n");
  238. return 0;
  239. }
  240. ppc_md.cpu_die = pseries_mach_cpu_die;
  241. smp_ops->cpu_disable = pseries_cpu_disable;
  242. smp_ops->cpu_die = pseries_cpu_die;
  243. /* Processors can be added/removed only on LPAR */
  244. if (firmware_has_feature(FW_FEATURE_LPAR))
  245. pSeries_reconfig_notifier_register(&pseries_smp_nb);
  246. return 0;
  247. }
  248. arch_initcall(pseries_cpu_hotplug_init);