smp.c 25 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048
  1. /*
  2. * arch/s390/kernel/smp.c
  3. *
  4. * Copyright IBM Corp. 1999,2007
  5. * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. * Heiko Carstens (heiko.carstens@de.ibm.com)
  8. *
  9. * based on other smp stuff by
  10. * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
  11. * (c) 1998 Ingo Molnar
  12. *
  13. * We work with logical cpu numbering everywhere we can. The only
  14. * functions using the real cpu address (got from STAP) are the sigp
  15. * functions. For all other functions we use the identity mapping.
  16. * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
  17. * used e.g. to find the idle task belonging to a logical cpu. Every array
  18. * in the kernel is sorted by the logical cpu number and not by the physical
  19. * one which is causing all the confusion with __cpu_logical_map and
  20. * cpu_number_map in other architectures.
  21. */
  22. #define KMSG_COMPONENT "cpu"
  23. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  24. #include <linux/module.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/err.h>
  28. #include <linux/spinlock.h>
  29. #include <linux/kernel_stat.h>
  30. #include <linux/delay.h>
  31. #include <linux/cache.h>
  32. #include <linux/interrupt.h>
  33. #include <linux/cpu.h>
  34. #include <linux/timex.h>
  35. #include <linux/bootmem.h>
  36. #include <asm/ipl.h>
  37. #include <asm/setup.h>
  38. #include <asm/sigp.h>
  39. #include <asm/pgalloc.h>
  40. #include <asm/irq.h>
  41. #include <asm/s390_ext.h>
  42. #include <asm/cpcmd.h>
  43. #include <asm/tlbflush.h>
  44. #include <asm/timer.h>
  45. #include <asm/lowcore.h>
  46. #include <asm/sclp.h>
  47. #include <asm/cpu.h>
  48. #include "entry.h"
  49. /*
  50. * An array with a pointer the lowcore of every CPU.
  51. */
  52. struct _lowcore *lowcore_ptr[NR_CPUS];
  53. EXPORT_SYMBOL(lowcore_ptr);
  54. cpumask_t cpu_online_map = CPU_MASK_NONE;
  55. EXPORT_SYMBOL(cpu_online_map);
  56. cpumask_t cpu_possible_map = CPU_MASK_ALL;
  57. EXPORT_SYMBOL(cpu_possible_map);
  58. static struct task_struct *current_set[NR_CPUS];
  59. static u8 smp_cpu_type;
  60. static int smp_use_sigp_detection;
  61. enum s390_cpu_state {
  62. CPU_STATE_STANDBY,
  63. CPU_STATE_CONFIGURED,
  64. };
  65. DEFINE_MUTEX(smp_cpu_state_mutex);
  66. int smp_cpu_polarization[NR_CPUS];
  67. static int smp_cpu_state[NR_CPUS];
  68. static int cpu_management;
  69. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  70. static void smp_ext_bitcall(int, ec_bit_sig);
  71. void smp_send_stop(void)
  72. {
  73. int cpu, rc;
  74. /* Disable all interrupts/machine checks */
  75. __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
  76. /* write magic number to zero page (absolute 0) */
  77. lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
  78. /* stop all processors */
  79. for_each_online_cpu(cpu) {
  80. if (cpu == smp_processor_id())
  81. continue;
  82. do {
  83. rc = signal_processor(cpu, sigp_stop);
  84. } while (rc == sigp_busy);
  85. while (!smp_cpu_not_running(cpu))
  86. cpu_relax();
  87. }
  88. }
  89. /*
  90. * This is the main routine where commands issued by other
  91. * cpus are handled.
  92. */
  93. static void do_ext_call_interrupt(__u16 code)
  94. {
  95. unsigned long bits;
  96. /*
  97. * handle bit signal external calls
  98. *
  99. * For the ec_schedule signal we have to do nothing. All the work
  100. * is done automatically when we return from the interrupt.
  101. */
  102. bits = xchg(&S390_lowcore.ext_call_fast, 0);
  103. if (test_bit(ec_call_function, &bits))
  104. generic_smp_call_function_interrupt();
  105. if (test_bit(ec_call_function_single, &bits))
  106. generic_smp_call_function_single_interrupt();
  107. }
  108. /*
  109. * Send an external call sigp to another cpu and return without waiting
  110. * for its completion.
  111. */
  112. static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
  113. {
  114. /*
  115. * Set signaling bit in lowcore of target cpu and kick it
  116. */
  117. set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
  118. while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
  119. udelay(10);
  120. }
  121. void arch_send_call_function_ipi(cpumask_t mask)
  122. {
  123. int cpu;
  124. for_each_cpu_mask(cpu, mask)
  125. smp_ext_bitcall(cpu, ec_call_function);
  126. }
  127. void arch_send_call_function_single_ipi(int cpu)
  128. {
  129. smp_ext_bitcall(cpu, ec_call_function_single);
  130. }
  131. #ifndef CONFIG_64BIT
  132. /*
  133. * this function sends a 'purge tlb' signal to another CPU.
  134. */
  135. static void smp_ptlb_callback(void *info)
  136. {
  137. __tlb_flush_local();
  138. }
  139. void smp_ptlb_all(void)
  140. {
  141. on_each_cpu(smp_ptlb_callback, NULL, 1);
  142. }
  143. EXPORT_SYMBOL(smp_ptlb_all);
  144. #endif /* ! CONFIG_64BIT */
  145. /*
  146. * this function sends a 'reschedule' IPI to another CPU.
  147. * it goes straight through and wastes no time serializing
  148. * anything. Worst case is that we lose a reschedule ...
  149. */
  150. void smp_send_reschedule(int cpu)
  151. {
  152. smp_ext_bitcall(cpu, ec_schedule);
  153. }
  154. /*
  155. * parameter area for the set/clear control bit callbacks
  156. */
  157. struct ec_creg_mask_parms {
  158. unsigned long orvals[16];
  159. unsigned long andvals[16];
  160. };
  161. /*
  162. * callback for setting/clearing control bits
  163. */
  164. static void smp_ctl_bit_callback(void *info)
  165. {
  166. struct ec_creg_mask_parms *pp = info;
  167. unsigned long cregs[16];
  168. int i;
  169. __ctl_store(cregs, 0, 15);
  170. for (i = 0; i <= 15; i++)
  171. cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
  172. __ctl_load(cregs, 0, 15);
  173. }
  174. /*
  175. * Set a bit in a control register of all cpus
  176. */
  177. void smp_ctl_set_bit(int cr, int bit)
  178. {
  179. struct ec_creg_mask_parms parms;
  180. memset(&parms.orvals, 0, sizeof(parms.orvals));
  181. memset(&parms.andvals, 0xff, sizeof(parms.andvals));
  182. parms.orvals[cr] = 1 << bit;
  183. on_each_cpu(smp_ctl_bit_callback, &parms, 1);
  184. }
  185. EXPORT_SYMBOL(smp_ctl_set_bit);
  186. /*
  187. * Clear a bit in a control register of all cpus
  188. */
  189. void smp_ctl_clear_bit(int cr, int bit)
  190. {
  191. struct ec_creg_mask_parms parms;
  192. memset(&parms.orvals, 0, sizeof(parms.orvals));
  193. memset(&parms.andvals, 0xff, sizeof(parms.andvals));
  194. parms.andvals[cr] = ~(1L << bit);
  195. on_each_cpu(smp_ctl_bit_callback, &parms, 1);
  196. }
  197. EXPORT_SYMBOL(smp_ctl_clear_bit);
  198. /*
  199. * In early ipl state a temp. logically cpu number is needed, so the sigp
  200. * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
  201. * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
  202. */
  203. #define CPU_INIT_NO 1
  204. #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
  205. /*
  206. * zfcpdump_prefix_array holds prefix registers for the following scenario:
  207. * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
  208. * save its prefix registers, since they get lost, when switching from 31 bit
  209. * to 64 bit.
  210. */
  211. unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
  212. __attribute__((__section__(".data")));
  213. static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
  214. {
  215. if (ipl_info.type != IPL_TYPE_FCP_DUMP)
  216. return;
  217. if (cpu >= NR_CPUS) {
  218. pr_warning("CPU %i exceeds the maximum %i and is excluded from "
  219. "the dump\n", cpu, NR_CPUS - 1);
  220. return;
  221. }
  222. zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
  223. __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
  224. while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
  225. sigp_busy)
  226. cpu_relax();
  227. memcpy(zfcpdump_save_areas[cpu],
  228. (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
  229. SAVE_AREA_SIZE);
  230. #ifdef CONFIG_64BIT
  231. /* copy original prefix register */
  232. zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
  233. #endif
  234. }
  235. union save_area *zfcpdump_save_areas[NR_CPUS + 1];
  236. EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
  237. #else
  238. static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
  239. #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
  240. static int cpu_stopped(int cpu)
  241. {
  242. __u32 status;
  243. /* Check for stopped state */
  244. if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
  245. sigp_status_stored) {
  246. if (status & 0x40)
  247. return 1;
  248. }
  249. return 0;
  250. }
  251. static int cpu_known(int cpu_id)
  252. {
  253. int cpu;
  254. for_each_present_cpu(cpu) {
  255. if (__cpu_logical_map[cpu] == cpu_id)
  256. return 1;
  257. }
  258. return 0;
  259. }
  260. static int smp_rescan_cpus_sigp(cpumask_t avail)
  261. {
  262. int cpu_id, logical_cpu;
  263. logical_cpu = first_cpu(avail);
  264. if (logical_cpu == NR_CPUS)
  265. return 0;
  266. for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
  267. if (cpu_known(cpu_id))
  268. continue;
  269. __cpu_logical_map[logical_cpu] = cpu_id;
  270. smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
  271. if (!cpu_stopped(logical_cpu))
  272. continue;
  273. cpu_set(logical_cpu, cpu_present_map);
  274. smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
  275. logical_cpu = next_cpu(logical_cpu, avail);
  276. if (logical_cpu == NR_CPUS)
  277. break;
  278. }
  279. return 0;
  280. }
  281. static int smp_rescan_cpus_sclp(cpumask_t avail)
  282. {
  283. struct sclp_cpu_info *info;
  284. int cpu_id, logical_cpu, cpu;
  285. int rc;
  286. logical_cpu = first_cpu(avail);
  287. if (logical_cpu == NR_CPUS)
  288. return 0;
  289. info = kmalloc(sizeof(*info), GFP_KERNEL);
  290. if (!info)
  291. return -ENOMEM;
  292. rc = sclp_get_cpu_info(info);
  293. if (rc)
  294. goto out;
  295. for (cpu = 0; cpu < info->combined; cpu++) {
  296. if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
  297. continue;
  298. cpu_id = info->cpu[cpu].address;
  299. if (cpu_known(cpu_id))
  300. continue;
  301. __cpu_logical_map[logical_cpu] = cpu_id;
  302. smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
  303. cpu_set(logical_cpu, cpu_present_map);
  304. if (cpu >= info->configured)
  305. smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
  306. else
  307. smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
  308. logical_cpu = next_cpu(logical_cpu, avail);
  309. if (logical_cpu == NR_CPUS)
  310. break;
  311. }
  312. out:
  313. kfree(info);
  314. return rc;
  315. }
  316. static int __smp_rescan_cpus(void)
  317. {
  318. cpumask_t avail;
  319. cpus_xor(avail, cpu_possible_map, cpu_present_map);
  320. if (smp_use_sigp_detection)
  321. return smp_rescan_cpus_sigp(avail);
  322. else
  323. return smp_rescan_cpus_sclp(avail);
  324. }
  325. static void __init smp_detect_cpus(void)
  326. {
  327. unsigned int cpu, c_cpus, s_cpus;
  328. struct sclp_cpu_info *info;
  329. u16 boot_cpu_addr, cpu_addr;
  330. c_cpus = 1;
  331. s_cpus = 0;
  332. boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
  333. info = kmalloc(sizeof(*info), GFP_KERNEL);
  334. if (!info)
  335. panic("smp_detect_cpus failed to allocate memory\n");
  336. /* Use sigp detection algorithm if sclp doesn't work. */
  337. if (sclp_get_cpu_info(info)) {
  338. smp_use_sigp_detection = 1;
  339. for (cpu = 0; cpu <= 65535; cpu++) {
  340. if (cpu == boot_cpu_addr)
  341. continue;
  342. __cpu_logical_map[CPU_INIT_NO] = cpu;
  343. if (!cpu_stopped(CPU_INIT_NO))
  344. continue;
  345. smp_get_save_area(c_cpus, cpu);
  346. c_cpus++;
  347. }
  348. goto out;
  349. }
  350. if (info->has_cpu_type) {
  351. for (cpu = 0; cpu < info->combined; cpu++) {
  352. if (info->cpu[cpu].address == boot_cpu_addr) {
  353. smp_cpu_type = info->cpu[cpu].type;
  354. break;
  355. }
  356. }
  357. }
  358. for (cpu = 0; cpu < info->combined; cpu++) {
  359. if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
  360. continue;
  361. cpu_addr = info->cpu[cpu].address;
  362. if (cpu_addr == boot_cpu_addr)
  363. continue;
  364. __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
  365. if (!cpu_stopped(CPU_INIT_NO)) {
  366. s_cpus++;
  367. continue;
  368. }
  369. smp_get_save_area(c_cpus, cpu_addr);
  370. c_cpus++;
  371. }
  372. out:
  373. kfree(info);
  374. pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
  375. get_online_cpus();
  376. __smp_rescan_cpus();
  377. put_online_cpus();
  378. }
  379. /*
  380. * Activate a secondary processor.
  381. */
  382. int __cpuinit start_secondary(void *cpuvoid)
  383. {
  384. /* Setup the cpu */
  385. cpu_init();
  386. preempt_disable();
  387. /* Enable TOD clock interrupts on the secondary cpu. */
  388. init_cpu_timer();
  389. /* Enable cpu timer interrupts on the secondary cpu. */
  390. init_cpu_vtimer();
  391. /* Enable pfault pseudo page faults on this cpu. */
  392. pfault_init();
  393. /* call cpu notifiers */
  394. notify_cpu_starting(smp_processor_id());
  395. /* Mark this cpu as online */
  396. ipi_call_lock();
  397. cpu_set(smp_processor_id(), cpu_online_map);
  398. ipi_call_unlock();
  399. /* Switch on interrupts */
  400. local_irq_enable();
  401. /* Print info about this processor */
  402. print_cpu_info(&S390_lowcore.cpu_data);
  403. /* cpu_idle will call schedule for us */
  404. cpu_idle();
  405. return 0;
  406. }
  407. static void __init smp_create_idle(unsigned int cpu)
  408. {
  409. struct task_struct *p;
  410. /*
  411. * don't care about the psw and regs settings since we'll never
  412. * reschedule the forked task.
  413. */
  414. p = fork_idle(cpu);
  415. if (IS_ERR(p))
  416. panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
  417. current_set[cpu] = p;
  418. }
  419. static int __cpuinit smp_alloc_lowcore(int cpu)
  420. {
  421. unsigned long async_stack, panic_stack;
  422. struct _lowcore *lowcore;
  423. int lc_order;
  424. lc_order = sizeof(long) == 8 ? 1 : 0;
  425. lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
  426. if (!lowcore)
  427. return -ENOMEM;
  428. async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
  429. panic_stack = __get_free_page(GFP_KERNEL);
  430. if (!panic_stack || !async_stack)
  431. goto out;
  432. memcpy(lowcore, &S390_lowcore, 512);
  433. memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
  434. lowcore->async_stack = async_stack + ASYNC_SIZE;
  435. lowcore->panic_stack = panic_stack + PAGE_SIZE;
  436. #ifndef CONFIG_64BIT
  437. if (MACHINE_HAS_IEEE) {
  438. unsigned long save_area;
  439. save_area = get_zeroed_page(GFP_KERNEL);
  440. if (!save_area)
  441. goto out;
  442. lowcore->extended_save_area_addr = (u32) save_area;
  443. }
  444. #endif
  445. lowcore_ptr[cpu] = lowcore;
  446. return 0;
  447. out:
  448. free_page(panic_stack);
  449. free_pages(async_stack, ASYNC_ORDER);
  450. free_pages((unsigned long) lowcore, lc_order);
  451. return -ENOMEM;
  452. }
  453. #ifdef CONFIG_HOTPLUG_CPU
  454. static void smp_free_lowcore(int cpu)
  455. {
  456. struct _lowcore *lowcore;
  457. int lc_order;
  458. lc_order = sizeof(long) == 8 ? 1 : 0;
  459. lowcore = lowcore_ptr[cpu];
  460. #ifndef CONFIG_64BIT
  461. if (MACHINE_HAS_IEEE)
  462. free_page((unsigned long) lowcore->extended_save_area_addr);
  463. #endif
  464. free_page(lowcore->panic_stack - PAGE_SIZE);
  465. free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
  466. free_pages((unsigned long) lowcore, lc_order);
  467. lowcore_ptr[cpu] = NULL;
  468. }
  469. #endif /* CONFIG_HOTPLUG_CPU */
  470. /* Upping and downing of CPUs */
  471. int __cpuinit __cpu_up(unsigned int cpu)
  472. {
  473. struct task_struct *idle;
  474. struct _lowcore *cpu_lowcore;
  475. struct stack_frame *sf;
  476. sigp_ccode ccode;
  477. if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
  478. return -EIO;
  479. if (smp_alloc_lowcore(cpu))
  480. return -ENOMEM;
  481. ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
  482. cpu, sigp_set_prefix);
  483. if (ccode)
  484. return -EIO;
  485. idle = current_set[cpu];
  486. cpu_lowcore = lowcore_ptr[cpu];
  487. cpu_lowcore->kernel_stack = (unsigned long)
  488. task_stack_page(idle) + THREAD_SIZE;
  489. cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
  490. sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
  491. - sizeof(struct pt_regs)
  492. - sizeof(struct stack_frame));
  493. memset(sf, 0, sizeof(struct stack_frame));
  494. sf->gprs[9] = (unsigned long) sf;
  495. cpu_lowcore->save_area[15] = (unsigned long) sf;
  496. __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
  497. asm volatile(
  498. " stam 0,15,0(%0)"
  499. : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
  500. cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
  501. cpu_lowcore->current_task = (unsigned long) idle;
  502. cpu_lowcore->cpu_data.cpu_nr = cpu;
  503. cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
  504. cpu_lowcore->ipl_device = S390_lowcore.ipl_device;
  505. eieio();
  506. while (signal_processor(cpu, sigp_restart) == sigp_busy)
  507. udelay(10);
  508. while (!cpu_online(cpu))
  509. cpu_relax();
  510. return 0;
  511. }
  512. static int __init setup_possible_cpus(char *s)
  513. {
  514. int pcpus, cpu;
  515. pcpus = simple_strtoul(s, NULL, 0);
  516. cpu_possible_map = cpumask_of_cpu(0);
  517. for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
  518. cpu_set(cpu, cpu_possible_map);
  519. return 0;
  520. }
  521. early_param("possible_cpus", setup_possible_cpus);
  522. #ifdef CONFIG_HOTPLUG_CPU
  523. int __cpu_disable(void)
  524. {
  525. struct ec_creg_mask_parms cr_parms;
  526. int cpu = smp_processor_id();
  527. cpu_clear(cpu, cpu_online_map);
  528. /* Disable pfault pseudo page faults on this cpu. */
  529. pfault_fini();
  530. memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
  531. memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
  532. /* disable all external interrupts */
  533. cr_parms.orvals[0] = 0;
  534. cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
  535. 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
  536. /* disable all I/O interrupts */
  537. cr_parms.orvals[6] = 0;
  538. cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
  539. 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
  540. /* disable most machine checks */
  541. cr_parms.orvals[14] = 0;
  542. cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
  543. 1 << 25 | 1 << 24);
  544. smp_ctl_bit_callback(&cr_parms);
  545. return 0;
  546. }
  547. void __cpu_die(unsigned int cpu)
  548. {
  549. /* Wait until target cpu is down */
  550. while (!smp_cpu_not_running(cpu))
  551. cpu_relax();
  552. smp_free_lowcore(cpu);
  553. pr_info("Processor %d stopped\n", cpu);
  554. }
  555. void cpu_die(void)
  556. {
  557. idle_task_exit();
  558. signal_processor(smp_processor_id(), sigp_stop);
  559. BUG();
  560. for (;;);
  561. }
  562. #endif /* CONFIG_HOTPLUG_CPU */
  563. void __init smp_prepare_cpus(unsigned int max_cpus)
  564. {
  565. #ifndef CONFIG_64BIT
  566. unsigned long save_area = 0;
  567. #endif
  568. unsigned long async_stack, panic_stack;
  569. struct _lowcore *lowcore;
  570. unsigned int cpu;
  571. int lc_order;
  572. smp_detect_cpus();
  573. /* request the 0x1201 emergency signal external interrupt */
  574. if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
  575. panic("Couldn't request external interrupt 0x1201");
  576. print_cpu_info(&S390_lowcore.cpu_data);
  577. /* Reallocate current lowcore, but keep its contents. */
  578. lc_order = sizeof(long) == 8 ? 1 : 0;
  579. lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
  580. panic_stack = __get_free_page(GFP_KERNEL);
  581. async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
  582. #ifndef CONFIG_64BIT
  583. if (MACHINE_HAS_IEEE)
  584. save_area = get_zeroed_page(GFP_KERNEL);
  585. #endif
  586. local_irq_disable();
  587. local_mcck_disable();
  588. lowcore_ptr[smp_processor_id()] = lowcore;
  589. *lowcore = S390_lowcore;
  590. lowcore->panic_stack = panic_stack + PAGE_SIZE;
  591. lowcore->async_stack = async_stack + ASYNC_SIZE;
  592. #ifndef CONFIG_64BIT
  593. if (MACHINE_HAS_IEEE)
  594. lowcore->extended_save_area_addr = (u32) save_area;
  595. #endif
  596. set_prefix((u32)(unsigned long) lowcore);
  597. local_mcck_enable();
  598. local_irq_enable();
  599. for_each_possible_cpu(cpu)
  600. if (cpu != smp_processor_id())
  601. smp_create_idle(cpu);
  602. }
  603. void __init smp_prepare_boot_cpu(void)
  604. {
  605. BUG_ON(smp_processor_id() != 0);
  606. current_thread_info()->cpu = 0;
  607. cpu_set(0, cpu_present_map);
  608. cpu_set(0, cpu_online_map);
  609. S390_lowcore.percpu_offset = __per_cpu_offset[0];
  610. current_set[0] = current;
  611. smp_cpu_state[0] = CPU_STATE_CONFIGURED;
  612. smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
  613. }
  614. void __init smp_cpus_done(unsigned int max_cpus)
  615. {
  616. }
  617. /*
  618. * the frequency of the profiling timer can be changed
  619. * by writing a multiplier value into /proc/profile.
  620. *
  621. * usually you want to run this on all CPUs ;)
  622. */
  623. int setup_profiling_timer(unsigned int multiplier)
  624. {
  625. return 0;
  626. }
  627. #ifdef CONFIG_HOTPLUG_CPU
  628. static ssize_t cpu_configure_show(struct sys_device *dev,
  629. struct sysdev_attribute *attr, char *buf)
  630. {
  631. ssize_t count;
  632. mutex_lock(&smp_cpu_state_mutex);
  633. count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
  634. mutex_unlock(&smp_cpu_state_mutex);
  635. return count;
  636. }
  637. static ssize_t cpu_configure_store(struct sys_device *dev,
  638. struct sysdev_attribute *attr,
  639. const char *buf, size_t count)
  640. {
  641. int cpu = dev->id;
  642. int val, rc;
  643. char delim;
  644. if (sscanf(buf, "%d %c", &val, &delim) != 1)
  645. return -EINVAL;
  646. if (val != 0 && val != 1)
  647. return -EINVAL;
  648. get_online_cpus();
  649. mutex_lock(&smp_cpu_state_mutex);
  650. rc = -EBUSY;
  651. if (cpu_online(cpu))
  652. goto out;
  653. rc = 0;
  654. switch (val) {
  655. case 0:
  656. if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
  657. rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
  658. if (!rc) {
  659. smp_cpu_state[cpu] = CPU_STATE_STANDBY;
  660. smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
  661. }
  662. }
  663. break;
  664. case 1:
  665. if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
  666. rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
  667. if (!rc) {
  668. smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
  669. smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
  670. }
  671. }
  672. break;
  673. default:
  674. break;
  675. }
  676. out:
  677. mutex_unlock(&smp_cpu_state_mutex);
  678. put_online_cpus();
  679. return rc ? rc : count;
  680. }
  681. static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
  682. #endif /* CONFIG_HOTPLUG_CPU */
  683. static ssize_t cpu_polarization_show(struct sys_device *dev,
  684. struct sysdev_attribute *attr, char *buf)
  685. {
  686. int cpu = dev->id;
  687. ssize_t count;
  688. mutex_lock(&smp_cpu_state_mutex);
  689. switch (smp_cpu_polarization[cpu]) {
  690. case POLARIZATION_HRZ:
  691. count = sprintf(buf, "horizontal\n");
  692. break;
  693. case POLARIZATION_VL:
  694. count = sprintf(buf, "vertical:low\n");
  695. break;
  696. case POLARIZATION_VM:
  697. count = sprintf(buf, "vertical:medium\n");
  698. break;
  699. case POLARIZATION_VH:
  700. count = sprintf(buf, "vertical:high\n");
  701. break;
  702. default:
  703. count = sprintf(buf, "unknown\n");
  704. break;
  705. }
  706. mutex_unlock(&smp_cpu_state_mutex);
  707. return count;
  708. }
  709. static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
  710. static ssize_t show_cpu_address(struct sys_device *dev,
  711. struct sysdev_attribute *attr, char *buf)
  712. {
  713. return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
  714. }
  715. static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
  716. static struct attribute *cpu_common_attrs[] = {
  717. #ifdef CONFIG_HOTPLUG_CPU
  718. &attr_configure.attr,
  719. #endif
  720. &attr_address.attr,
  721. &attr_polarization.attr,
  722. NULL,
  723. };
  724. static struct attribute_group cpu_common_attr_group = {
  725. .attrs = cpu_common_attrs,
  726. };
  727. static ssize_t show_capability(struct sys_device *dev,
  728. struct sysdev_attribute *attr, char *buf)
  729. {
  730. unsigned int capability;
  731. int rc;
  732. rc = get_cpu_capability(&capability);
  733. if (rc)
  734. return rc;
  735. return sprintf(buf, "%u\n", capability);
  736. }
  737. static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
  738. static ssize_t show_idle_count(struct sys_device *dev,
  739. struct sysdev_attribute *attr, char *buf)
  740. {
  741. struct s390_idle_data *idle;
  742. unsigned long long idle_count;
  743. idle = &per_cpu(s390_idle, dev->id);
  744. spin_lock_irq(&idle->lock);
  745. idle_count = idle->idle_count;
  746. spin_unlock_irq(&idle->lock);
  747. return sprintf(buf, "%llu\n", idle_count);
  748. }
  749. static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
  750. static ssize_t show_idle_time(struct sys_device *dev,
  751. struct sysdev_attribute *attr, char *buf)
  752. {
  753. struct s390_idle_data *idle;
  754. unsigned long long new_time;
  755. idle = &per_cpu(s390_idle, dev->id);
  756. spin_lock_irq(&idle->lock);
  757. if (idle->in_idle) {
  758. new_time = get_clock();
  759. idle->idle_time += new_time - idle->idle_enter;
  760. idle->idle_enter = new_time;
  761. }
  762. new_time = idle->idle_time;
  763. spin_unlock_irq(&idle->lock);
  764. return sprintf(buf, "%llu\n", new_time >> 12);
  765. }
  766. static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
  767. static struct attribute *cpu_online_attrs[] = {
  768. &attr_capability.attr,
  769. &attr_idle_count.attr,
  770. &attr_idle_time_us.attr,
  771. NULL,
  772. };
  773. static struct attribute_group cpu_online_attr_group = {
  774. .attrs = cpu_online_attrs,
  775. };
  776. static int __cpuinit smp_cpu_notify(struct notifier_block *self,
  777. unsigned long action, void *hcpu)
  778. {
  779. unsigned int cpu = (unsigned int)(long)hcpu;
  780. struct cpu *c = &per_cpu(cpu_devices, cpu);
  781. struct sys_device *s = &c->sysdev;
  782. struct s390_idle_data *idle;
  783. switch (action) {
  784. case CPU_ONLINE:
  785. case CPU_ONLINE_FROZEN:
  786. idle = &per_cpu(s390_idle, cpu);
  787. spin_lock_irq(&idle->lock);
  788. idle->idle_enter = 0;
  789. idle->idle_time = 0;
  790. idle->idle_count = 0;
  791. spin_unlock_irq(&idle->lock);
  792. if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
  793. return NOTIFY_BAD;
  794. break;
  795. case CPU_DEAD:
  796. case CPU_DEAD_FROZEN:
  797. sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
  798. break;
  799. }
  800. return NOTIFY_OK;
  801. }
  802. static struct notifier_block __cpuinitdata smp_cpu_nb = {
  803. .notifier_call = smp_cpu_notify,
  804. };
  805. static int __devinit smp_add_present_cpu(int cpu)
  806. {
  807. struct cpu *c = &per_cpu(cpu_devices, cpu);
  808. struct sys_device *s = &c->sysdev;
  809. int rc;
  810. c->hotpluggable = 1;
  811. rc = register_cpu(c, cpu);
  812. if (rc)
  813. goto out;
  814. rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
  815. if (rc)
  816. goto out_cpu;
  817. if (!cpu_online(cpu))
  818. goto out;
  819. rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
  820. if (!rc)
  821. return 0;
  822. sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
  823. out_cpu:
  824. #ifdef CONFIG_HOTPLUG_CPU
  825. unregister_cpu(c);
  826. #endif
  827. out:
  828. return rc;
  829. }
  830. #ifdef CONFIG_HOTPLUG_CPU
  831. int __ref smp_rescan_cpus(void)
  832. {
  833. cpumask_t newcpus;
  834. int cpu;
  835. int rc;
  836. get_online_cpus();
  837. mutex_lock(&smp_cpu_state_mutex);
  838. newcpus = cpu_present_map;
  839. rc = __smp_rescan_cpus();
  840. if (rc)
  841. goto out;
  842. cpus_andnot(newcpus, cpu_present_map, newcpus);
  843. for_each_cpu_mask(cpu, newcpus) {
  844. rc = smp_add_present_cpu(cpu);
  845. if (rc)
  846. cpu_clear(cpu, cpu_present_map);
  847. }
  848. rc = 0;
  849. out:
  850. mutex_unlock(&smp_cpu_state_mutex);
  851. put_online_cpus();
  852. if (!cpus_empty(newcpus))
  853. topology_schedule_update();
  854. return rc;
  855. }
  856. static ssize_t __ref rescan_store(struct sysdev_class *class, const char *buf,
  857. size_t count)
  858. {
  859. int rc;
  860. rc = smp_rescan_cpus();
  861. return rc ? rc : count;
  862. }
  863. static SYSDEV_CLASS_ATTR(rescan, 0200, NULL, rescan_store);
  864. #endif /* CONFIG_HOTPLUG_CPU */
  865. static ssize_t dispatching_show(struct sysdev_class *class, char *buf)
  866. {
  867. ssize_t count;
  868. mutex_lock(&smp_cpu_state_mutex);
  869. count = sprintf(buf, "%d\n", cpu_management);
  870. mutex_unlock(&smp_cpu_state_mutex);
  871. return count;
  872. }
  873. static ssize_t dispatching_store(struct sysdev_class *dev, const char *buf,
  874. size_t count)
  875. {
  876. int val, rc;
  877. char delim;
  878. if (sscanf(buf, "%d %c", &val, &delim) != 1)
  879. return -EINVAL;
  880. if (val != 0 && val != 1)
  881. return -EINVAL;
  882. rc = 0;
  883. get_online_cpus();
  884. mutex_lock(&smp_cpu_state_mutex);
  885. if (cpu_management == val)
  886. goto out;
  887. rc = topology_set_cpu_management(val);
  888. if (!rc)
  889. cpu_management = val;
  890. out:
  891. mutex_unlock(&smp_cpu_state_mutex);
  892. put_online_cpus();
  893. return rc ? rc : count;
  894. }
  895. static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show,
  896. dispatching_store);
  897. static int __init topology_init(void)
  898. {
  899. int cpu;
  900. int rc;
  901. register_cpu_notifier(&smp_cpu_nb);
  902. #ifdef CONFIG_HOTPLUG_CPU
  903. rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_rescan);
  904. if (rc)
  905. return rc;
  906. #endif
  907. rc = sysdev_class_create_file(&cpu_sysdev_class, &attr_dispatching);
  908. if (rc)
  909. return rc;
  910. for_each_present_cpu(cpu) {
  911. rc = smp_add_present_cpu(cpu);
  912. if (rc)
  913. return rc;
  914. }
  915. return 0;
  916. }
  917. subsys_initcall(topology_init);