smp.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195
  1. /*
  2. * arch/s390/kernel/smp.c
  3. *
  4. * Copyright IBM Corp. 1999,2007
  5. * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
  6. * Martin Schwidefsky (schwidefsky@de.ibm.com)
  7. * Heiko Carstens (heiko.carstens@de.ibm.com)
  8. *
  9. * based on other smp stuff by
  10. * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
  11. * (c) 1998 Ingo Molnar
  12. *
  13. * We work with logical cpu numbering everywhere we can. The only
  14. * functions using the real cpu address (got from STAP) are the sigp
  15. * functions. For all other functions we use the identity mapping.
  16. * That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
  17. * used e.g. to find the idle task belonging to a logical cpu. Every array
  18. * in the kernel is sorted by the logical cpu number and not by the physical
  19. * one which is causing all the confusion with __cpu_logical_map and
  20. * cpu_number_map in other architectures.
  21. */
  22. #include <linux/module.h>
  23. #include <linux/init.h>
  24. #include <linux/mm.h>
  25. #include <linux/err.h>
  26. #include <linux/spinlock.h>
  27. #include <linux/kernel_stat.h>
  28. #include <linux/delay.h>
  29. #include <linux/cache.h>
  30. #include <linux/interrupt.h>
  31. #include <linux/cpu.h>
  32. #include <linux/timex.h>
  33. #include <linux/bootmem.h>
  34. #include <asm/ipl.h>
  35. #include <asm/setup.h>
  36. #include <asm/sigp.h>
  37. #include <asm/pgalloc.h>
  38. #include <asm/irq.h>
  39. #include <asm/s390_ext.h>
  40. #include <asm/cpcmd.h>
  41. #include <asm/tlbflush.h>
  42. #include <asm/timer.h>
  43. #include <asm/lowcore.h>
  44. #include <asm/sclp.h>
  45. #include <asm/cpu.h>
  46. #include "entry.h"
  47. /*
  48. * An array with a pointer the lowcore of every CPU.
  49. */
  50. struct _lowcore *lowcore_ptr[NR_CPUS];
  51. EXPORT_SYMBOL(lowcore_ptr);
  52. cpumask_t cpu_online_map = CPU_MASK_NONE;
  53. EXPORT_SYMBOL(cpu_online_map);
  54. cpumask_t cpu_possible_map = CPU_MASK_ALL;
  55. EXPORT_SYMBOL(cpu_possible_map);
  56. static struct task_struct *current_set[NR_CPUS];
  57. static u8 smp_cpu_type;
  58. static int smp_use_sigp_detection;
  59. enum s390_cpu_state {
  60. CPU_STATE_STANDBY,
  61. CPU_STATE_CONFIGURED,
  62. };
  63. DEFINE_MUTEX(smp_cpu_state_mutex);
  64. int smp_cpu_polarization[NR_CPUS];
  65. static int smp_cpu_state[NR_CPUS];
  66. static int cpu_management;
  67. static DEFINE_PER_CPU(struct cpu, cpu_devices);
  68. static void smp_ext_bitcall(int, ec_bit_sig);
  69. /*
  70. * Structure and data for __smp_call_function_map(). This is designed to
  71. * minimise static memory requirements. It also looks cleaner.
  72. */
  73. static DEFINE_SPINLOCK(call_lock);
  74. struct call_data_struct {
  75. void (*func) (void *info);
  76. void *info;
  77. cpumask_t started;
  78. cpumask_t finished;
  79. int wait;
  80. };
  81. static struct call_data_struct *call_data;
  82. /*
  83. * 'Call function' interrupt callback
  84. */
  85. static void do_call_function(void)
  86. {
  87. void (*func) (void *info) = call_data->func;
  88. void *info = call_data->info;
  89. int wait = call_data->wait;
  90. cpu_set(smp_processor_id(), call_data->started);
  91. (*func)(info);
  92. if (wait)
  93. cpu_set(smp_processor_id(), call_data->finished);;
  94. }
  95. static void __smp_call_function_map(void (*func) (void *info), void *info,
  96. int wait, cpumask_t map)
  97. {
  98. struct call_data_struct data;
  99. int cpu, local = 0;
  100. /*
  101. * Can deadlock when interrupts are disabled or if in wrong context.
  102. */
  103. WARN_ON(irqs_disabled() || in_irq());
  104. /*
  105. * Check for local function call. We have to have the same call order
  106. * as in on_each_cpu() because of machine_restart_smp().
  107. */
  108. if (cpu_isset(smp_processor_id(), map)) {
  109. local = 1;
  110. cpu_clear(smp_processor_id(), map);
  111. }
  112. cpus_and(map, map, cpu_online_map);
  113. if (cpus_empty(map))
  114. goto out;
  115. data.func = func;
  116. data.info = info;
  117. data.started = CPU_MASK_NONE;
  118. data.wait = wait;
  119. if (wait)
  120. data.finished = CPU_MASK_NONE;
  121. call_data = &data;
  122. for_each_cpu_mask(cpu, map)
  123. smp_ext_bitcall(cpu, ec_call_function);
  124. /* Wait for response */
  125. while (!cpus_equal(map, data.started))
  126. cpu_relax();
  127. if (wait)
  128. while (!cpus_equal(map, data.finished))
  129. cpu_relax();
  130. out:
  131. if (local) {
  132. local_irq_disable();
  133. func(info);
  134. local_irq_enable();
  135. }
  136. }
  137. /*
  138. * smp_call_function:
  139. * @func: the function to run; this must be fast and non-blocking
  140. * @info: an arbitrary pointer to pass to the function
  141. * @wait: if true, wait (atomically) until function has completed on other CPUs
  142. *
  143. * Run a function on all other CPUs.
  144. *
  145. * You must not call this function with disabled interrupts, from a
  146. * hardware interrupt handler or from a bottom half.
  147. */
  148. int smp_call_function(void (*func) (void *info), void *info, int wait)
  149. {
  150. cpumask_t map;
  151. spin_lock(&call_lock);
  152. map = cpu_online_map;
  153. cpu_clear(smp_processor_id(), map);
  154. __smp_call_function_map(func, info, wait, map);
  155. spin_unlock(&call_lock);
  156. return 0;
  157. }
  158. EXPORT_SYMBOL(smp_call_function);
  159. /*
  160. * smp_call_function_single:
  161. * @cpu: the CPU where func should run
  162. * @func: the function to run; this must be fast and non-blocking
  163. * @info: an arbitrary pointer to pass to the function
  164. * @wait: if true, wait (atomically) until function has completed on other CPUs
  165. *
  166. * Run a function on one processor.
  167. *
  168. * You must not call this function with disabled interrupts, from a
  169. * hardware interrupt handler or from a bottom half.
  170. */
  171. int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
  172. int wait)
  173. {
  174. spin_lock(&call_lock);
  175. __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu));
  176. spin_unlock(&call_lock);
  177. return 0;
  178. }
  179. EXPORT_SYMBOL(smp_call_function_single);
  180. /**
  181. * smp_call_function_mask(): Run a function on a set of other CPUs.
  182. * @mask: The set of cpus to run on. Must not include the current cpu.
  183. * @func: The function to run. This must be fast and non-blocking.
  184. * @info: An arbitrary pointer to pass to the function.
  185. * @wait: If true, wait (atomically) until function has completed on other CPUs.
  186. *
  187. * Returns 0 on success, else a negative status code.
  188. *
  189. * If @wait is true, then returns once @func has returned; otherwise
  190. * it returns just before the target cpu calls @func.
  191. *
  192. * You must not call this function with disabled interrupts or from a
  193. * hardware interrupt handler or from a bottom half handler.
  194. */
  195. int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info,
  196. int wait)
  197. {
  198. spin_lock(&call_lock);
  199. cpu_clear(smp_processor_id(), mask);
  200. __smp_call_function_map(func, info, wait, mask);
  201. spin_unlock(&call_lock);
  202. return 0;
  203. }
  204. EXPORT_SYMBOL(smp_call_function_mask);
  205. void smp_send_stop(void)
  206. {
  207. int cpu, rc;
  208. /* Disable all interrupts/machine checks */
  209. __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
  210. /* write magic number to zero page (absolute 0) */
  211. lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
  212. /* stop all processors */
  213. for_each_online_cpu(cpu) {
  214. if (cpu == smp_processor_id())
  215. continue;
  216. do {
  217. rc = signal_processor(cpu, sigp_stop);
  218. } while (rc == sigp_busy);
  219. while (!smp_cpu_not_running(cpu))
  220. cpu_relax();
  221. }
  222. }
  223. /*
  224. * This is the main routine where commands issued by other
  225. * cpus are handled.
  226. */
  227. static void do_ext_call_interrupt(__u16 code)
  228. {
  229. unsigned long bits;
  230. /*
  231. * handle bit signal external calls
  232. *
  233. * For the ec_schedule signal we have to do nothing. All the work
  234. * is done automatically when we return from the interrupt.
  235. */
  236. bits = xchg(&S390_lowcore.ext_call_fast, 0);
  237. if (test_bit(ec_call_function, &bits))
  238. do_call_function();
  239. }
  240. /*
  241. * Send an external call sigp to another cpu and return without waiting
  242. * for its completion.
  243. */
  244. static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
  245. {
  246. /*
  247. * Set signaling bit in lowcore of target cpu and kick it
  248. */
  249. set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
  250. while (signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
  251. udelay(10);
  252. }
  253. #ifndef CONFIG_64BIT
  254. /*
  255. * this function sends a 'purge tlb' signal to another CPU.
  256. */
  257. static void smp_ptlb_callback(void *info)
  258. {
  259. __tlb_flush_local();
  260. }
  261. void smp_ptlb_all(void)
  262. {
  263. on_each_cpu(smp_ptlb_callback, NULL, 1);
  264. }
  265. EXPORT_SYMBOL(smp_ptlb_all);
  266. #endif /* ! CONFIG_64BIT */
  267. /*
  268. * this function sends a 'reschedule' IPI to another CPU.
  269. * it goes straight through and wastes no time serializing
  270. * anything. Worst case is that we lose a reschedule ...
  271. */
  272. void smp_send_reschedule(int cpu)
  273. {
  274. smp_ext_bitcall(cpu, ec_schedule);
  275. }
  276. /*
  277. * parameter area for the set/clear control bit callbacks
  278. */
  279. struct ec_creg_mask_parms {
  280. unsigned long orvals[16];
  281. unsigned long andvals[16];
  282. };
  283. /*
  284. * callback for setting/clearing control bits
  285. */
  286. static void smp_ctl_bit_callback(void *info)
  287. {
  288. struct ec_creg_mask_parms *pp = info;
  289. unsigned long cregs[16];
  290. int i;
  291. __ctl_store(cregs, 0, 15);
  292. for (i = 0; i <= 15; i++)
  293. cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
  294. __ctl_load(cregs, 0, 15);
  295. }
  296. /*
  297. * Set a bit in a control register of all cpus
  298. */
  299. void smp_ctl_set_bit(int cr, int bit)
  300. {
  301. struct ec_creg_mask_parms parms;
  302. memset(&parms.orvals, 0, sizeof(parms.orvals));
  303. memset(&parms.andvals, 0xff, sizeof(parms.andvals));
  304. parms.orvals[cr] = 1 << bit;
  305. on_each_cpu(smp_ctl_bit_callback, &parms, 1);
  306. }
  307. EXPORT_SYMBOL(smp_ctl_set_bit);
  308. /*
  309. * Clear a bit in a control register of all cpus
  310. */
  311. void smp_ctl_clear_bit(int cr, int bit)
  312. {
  313. struct ec_creg_mask_parms parms;
  314. memset(&parms.orvals, 0, sizeof(parms.orvals));
  315. memset(&parms.andvals, 0xff, sizeof(parms.andvals));
  316. parms.andvals[cr] = ~(1L << bit);
  317. on_each_cpu(smp_ctl_bit_callback, &parms, 1);
  318. }
  319. EXPORT_SYMBOL(smp_ctl_clear_bit);
  320. /*
  321. * In early ipl state a temp. logically cpu number is needed, so the sigp
  322. * functions can be used to sense other cpus. Since NR_CPUS is >= 2 on
  323. * CONFIG_SMP and the ipl cpu is logical cpu 0, it must be 1.
  324. */
  325. #define CPU_INIT_NO 1
  326. #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_ZFCPDUMP_MODULE)
  327. /*
  328. * zfcpdump_prefix_array holds prefix registers for the following scenario:
  329. * 64 bit zfcpdump kernel and 31 bit kernel which is to be dumped. We have to
  330. * save its prefix registers, since they get lost, when switching from 31 bit
  331. * to 64 bit.
  332. */
  333. unsigned int zfcpdump_prefix_array[NR_CPUS + 1] \
  334. __attribute__((__section__(".data")));
  335. static void __init smp_get_save_area(unsigned int cpu, unsigned int phy_cpu)
  336. {
  337. if (ipl_info.type != IPL_TYPE_FCP_DUMP)
  338. return;
  339. if (cpu >= NR_CPUS) {
  340. printk(KERN_WARNING "Registers for cpu %i not saved since dump "
  341. "kernel was compiled with NR_CPUS=%i\n", cpu, NR_CPUS);
  342. return;
  343. }
  344. zfcpdump_save_areas[cpu] = kmalloc(sizeof(union save_area), GFP_KERNEL);
  345. __cpu_logical_map[CPU_INIT_NO] = (__u16) phy_cpu;
  346. while (signal_processor(CPU_INIT_NO, sigp_stop_and_store_status) ==
  347. sigp_busy)
  348. cpu_relax();
  349. memcpy(zfcpdump_save_areas[cpu],
  350. (void *)(unsigned long) store_prefix() + SAVE_AREA_BASE,
  351. SAVE_AREA_SIZE);
  352. #ifdef CONFIG_64BIT
  353. /* copy original prefix register */
  354. zfcpdump_save_areas[cpu]->s390x.pref_reg = zfcpdump_prefix_array[cpu];
  355. #endif
  356. }
  357. union save_area *zfcpdump_save_areas[NR_CPUS + 1];
  358. EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
  359. #else
  360. static inline void smp_get_save_area(unsigned int cpu, unsigned int phy_cpu) { }
  361. #endif /* CONFIG_ZFCPDUMP || CONFIG_ZFCPDUMP_MODULE */
  362. static int cpu_stopped(int cpu)
  363. {
  364. __u32 status;
  365. /* Check for stopped state */
  366. if (signal_processor_ps(&status, 0, cpu, sigp_sense) ==
  367. sigp_status_stored) {
  368. if (status & 0x40)
  369. return 1;
  370. }
  371. return 0;
  372. }
  373. static int cpu_known(int cpu_id)
  374. {
  375. int cpu;
  376. for_each_present_cpu(cpu) {
  377. if (__cpu_logical_map[cpu] == cpu_id)
  378. return 1;
  379. }
  380. return 0;
  381. }
  382. static int smp_rescan_cpus_sigp(cpumask_t avail)
  383. {
  384. int cpu_id, logical_cpu;
  385. logical_cpu = first_cpu(avail);
  386. if (logical_cpu == NR_CPUS)
  387. return 0;
  388. for (cpu_id = 0; cpu_id <= 65535; cpu_id++) {
  389. if (cpu_known(cpu_id))
  390. continue;
  391. __cpu_logical_map[logical_cpu] = cpu_id;
  392. smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
  393. if (!cpu_stopped(logical_cpu))
  394. continue;
  395. cpu_set(logical_cpu, cpu_present_map);
  396. smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
  397. logical_cpu = next_cpu(logical_cpu, avail);
  398. if (logical_cpu == NR_CPUS)
  399. break;
  400. }
  401. return 0;
  402. }
  403. static int smp_rescan_cpus_sclp(cpumask_t avail)
  404. {
  405. struct sclp_cpu_info *info;
  406. int cpu_id, logical_cpu, cpu;
  407. int rc;
  408. logical_cpu = first_cpu(avail);
  409. if (logical_cpu == NR_CPUS)
  410. return 0;
  411. info = kmalloc(sizeof(*info), GFP_KERNEL);
  412. if (!info)
  413. return -ENOMEM;
  414. rc = sclp_get_cpu_info(info);
  415. if (rc)
  416. goto out;
  417. for (cpu = 0; cpu < info->combined; cpu++) {
  418. if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
  419. continue;
  420. cpu_id = info->cpu[cpu].address;
  421. if (cpu_known(cpu_id))
  422. continue;
  423. __cpu_logical_map[logical_cpu] = cpu_id;
  424. smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN;
  425. cpu_set(logical_cpu, cpu_present_map);
  426. if (cpu >= info->configured)
  427. smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY;
  428. else
  429. smp_cpu_state[logical_cpu] = CPU_STATE_CONFIGURED;
  430. logical_cpu = next_cpu(logical_cpu, avail);
  431. if (logical_cpu == NR_CPUS)
  432. break;
  433. }
  434. out:
  435. kfree(info);
  436. return rc;
  437. }
  438. static int __smp_rescan_cpus(void)
  439. {
  440. cpumask_t avail;
  441. cpus_xor(avail, cpu_possible_map, cpu_present_map);
  442. if (smp_use_sigp_detection)
  443. return smp_rescan_cpus_sigp(avail);
  444. else
  445. return smp_rescan_cpus_sclp(avail);
  446. }
  447. static void __init smp_detect_cpus(void)
  448. {
  449. unsigned int cpu, c_cpus, s_cpus;
  450. struct sclp_cpu_info *info;
  451. u16 boot_cpu_addr, cpu_addr;
  452. c_cpus = 1;
  453. s_cpus = 0;
  454. boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
  455. info = kmalloc(sizeof(*info), GFP_KERNEL);
  456. if (!info)
  457. panic("smp_detect_cpus failed to allocate memory\n");
  458. /* Use sigp detection algorithm if sclp doesn't work. */
  459. if (sclp_get_cpu_info(info)) {
  460. smp_use_sigp_detection = 1;
  461. for (cpu = 0; cpu <= 65535; cpu++) {
  462. if (cpu == boot_cpu_addr)
  463. continue;
  464. __cpu_logical_map[CPU_INIT_NO] = cpu;
  465. if (!cpu_stopped(CPU_INIT_NO))
  466. continue;
  467. smp_get_save_area(c_cpus, cpu);
  468. c_cpus++;
  469. }
  470. goto out;
  471. }
  472. if (info->has_cpu_type) {
  473. for (cpu = 0; cpu < info->combined; cpu++) {
  474. if (info->cpu[cpu].address == boot_cpu_addr) {
  475. smp_cpu_type = info->cpu[cpu].type;
  476. break;
  477. }
  478. }
  479. }
  480. for (cpu = 0; cpu < info->combined; cpu++) {
  481. if (info->has_cpu_type && info->cpu[cpu].type != smp_cpu_type)
  482. continue;
  483. cpu_addr = info->cpu[cpu].address;
  484. if (cpu_addr == boot_cpu_addr)
  485. continue;
  486. __cpu_logical_map[CPU_INIT_NO] = cpu_addr;
  487. if (!cpu_stopped(CPU_INIT_NO)) {
  488. s_cpus++;
  489. continue;
  490. }
  491. smp_get_save_area(c_cpus, cpu_addr);
  492. c_cpus++;
  493. }
  494. out:
  495. kfree(info);
  496. printk(KERN_INFO "CPUs: %d configured, %d standby\n", c_cpus, s_cpus);
  497. get_online_cpus();
  498. __smp_rescan_cpus();
  499. put_online_cpus();
  500. }
  501. /*
  502. * Activate a secondary processor.
  503. */
  504. int __cpuinit start_secondary(void *cpuvoid)
  505. {
  506. /* Setup the cpu */
  507. cpu_init();
  508. preempt_disable();
  509. /* Enable TOD clock interrupts on the secondary cpu. */
  510. init_cpu_timer();
  511. #ifdef CONFIG_VIRT_TIMER
  512. /* Enable cpu timer interrupts on the secondary cpu. */
  513. init_cpu_vtimer();
  514. #endif
  515. /* Enable pfault pseudo page faults on this cpu. */
  516. pfault_init();
  517. /* Mark this cpu as online */
  518. spin_lock(&call_lock);
  519. cpu_set(smp_processor_id(), cpu_online_map);
  520. spin_unlock(&call_lock);
  521. /* Switch on interrupts */
  522. local_irq_enable();
  523. /* Print info about this processor */
  524. print_cpu_info(&S390_lowcore.cpu_data);
  525. /* cpu_idle will call schedule for us */
  526. cpu_idle();
  527. return 0;
  528. }
  529. static void __init smp_create_idle(unsigned int cpu)
  530. {
  531. struct task_struct *p;
  532. /*
  533. * don't care about the psw and regs settings since we'll never
  534. * reschedule the forked task.
  535. */
  536. p = fork_idle(cpu);
  537. if (IS_ERR(p))
  538. panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
  539. current_set[cpu] = p;
  540. }
  541. static int __cpuinit smp_alloc_lowcore(int cpu)
  542. {
  543. unsigned long async_stack, panic_stack;
  544. struct _lowcore *lowcore;
  545. int lc_order;
  546. lc_order = sizeof(long) == 8 ? 1 : 0;
  547. lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
  548. if (!lowcore)
  549. return -ENOMEM;
  550. async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
  551. panic_stack = __get_free_page(GFP_KERNEL);
  552. if (!panic_stack || !async_stack)
  553. goto out;
  554. memcpy(lowcore, &S390_lowcore, 512);
  555. memset((char *)lowcore + 512, 0, sizeof(*lowcore) - 512);
  556. lowcore->async_stack = async_stack + ASYNC_SIZE;
  557. lowcore->panic_stack = panic_stack + PAGE_SIZE;
  558. #ifndef CONFIG_64BIT
  559. if (MACHINE_HAS_IEEE) {
  560. unsigned long save_area;
  561. save_area = get_zeroed_page(GFP_KERNEL);
  562. if (!save_area)
  563. goto out_save_area;
  564. lowcore->extended_save_area_addr = (u32) save_area;
  565. }
  566. #endif
  567. lowcore_ptr[cpu] = lowcore;
  568. return 0;
  569. #ifndef CONFIG_64BIT
  570. out_save_area:
  571. free_page(panic_stack);
  572. #endif
  573. out:
  574. free_pages(async_stack, ASYNC_ORDER);
  575. free_pages((unsigned long) lowcore, lc_order);
  576. return -ENOMEM;
  577. }
  578. #ifdef CONFIG_HOTPLUG_CPU
  579. static void smp_free_lowcore(int cpu)
  580. {
  581. struct _lowcore *lowcore;
  582. int lc_order;
  583. lc_order = sizeof(long) == 8 ? 1 : 0;
  584. lowcore = lowcore_ptr[cpu];
  585. #ifndef CONFIG_64BIT
  586. if (MACHINE_HAS_IEEE)
  587. free_page((unsigned long) lowcore->extended_save_area_addr);
  588. #endif
  589. free_page(lowcore->panic_stack - PAGE_SIZE);
  590. free_pages(lowcore->async_stack - ASYNC_SIZE, ASYNC_ORDER);
  591. free_pages((unsigned long) lowcore, lc_order);
  592. lowcore_ptr[cpu] = NULL;
  593. }
  594. #endif /* CONFIG_HOTPLUG_CPU */
  595. /* Upping and downing of CPUs */
  596. int __cpuinit __cpu_up(unsigned int cpu)
  597. {
  598. struct task_struct *idle;
  599. struct _lowcore *cpu_lowcore;
  600. struct stack_frame *sf;
  601. sigp_ccode ccode;
  602. if (smp_cpu_state[cpu] != CPU_STATE_CONFIGURED)
  603. return -EIO;
  604. if (smp_alloc_lowcore(cpu))
  605. return -ENOMEM;
  606. ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
  607. cpu, sigp_set_prefix);
  608. if (ccode) {
  609. printk("sigp_set_prefix failed for cpu %d "
  610. "with condition code %d\n",
  611. (int) cpu, (int) ccode);
  612. return -EIO;
  613. }
  614. idle = current_set[cpu];
  615. cpu_lowcore = lowcore_ptr[cpu];
  616. cpu_lowcore->kernel_stack = (unsigned long)
  617. task_stack_page(idle) + THREAD_SIZE;
  618. cpu_lowcore->thread_info = (unsigned long) task_thread_info(idle);
  619. sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
  620. - sizeof(struct pt_regs)
  621. - sizeof(struct stack_frame));
  622. memset(sf, 0, sizeof(struct stack_frame));
  623. sf->gprs[9] = (unsigned long) sf;
  624. cpu_lowcore->save_area[15] = (unsigned long) sf;
  625. __ctl_store(cpu_lowcore->cregs_save_area, 0, 15);
  626. asm volatile(
  627. " stam 0,15,0(%0)"
  628. : : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
  629. cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
  630. cpu_lowcore->current_task = (unsigned long) idle;
  631. cpu_lowcore->cpu_data.cpu_nr = cpu;
  632. cpu_lowcore->kernel_asce = S390_lowcore.kernel_asce;
  633. cpu_lowcore->ipl_device = S390_lowcore.ipl_device;
  634. eieio();
  635. while (signal_processor(cpu, sigp_restart) == sigp_busy)
  636. udelay(10);
  637. while (!cpu_online(cpu))
  638. cpu_relax();
  639. return 0;
  640. }
  641. static int __init setup_possible_cpus(char *s)
  642. {
  643. int pcpus, cpu;
  644. pcpus = simple_strtoul(s, NULL, 0);
  645. cpu_possible_map = cpumask_of_cpu(0);
  646. for (cpu = 1; cpu < pcpus && cpu < NR_CPUS; cpu++)
  647. cpu_set(cpu, cpu_possible_map);
  648. return 0;
  649. }
  650. early_param("possible_cpus", setup_possible_cpus);
  651. #ifdef CONFIG_HOTPLUG_CPU
  652. int __cpu_disable(void)
  653. {
  654. struct ec_creg_mask_parms cr_parms;
  655. int cpu = smp_processor_id();
  656. cpu_clear(cpu, cpu_online_map);
  657. /* Disable pfault pseudo page faults on this cpu. */
  658. pfault_fini();
  659. memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
  660. memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
  661. /* disable all external interrupts */
  662. cr_parms.orvals[0] = 0;
  663. cr_parms.andvals[0] = ~(1 << 15 | 1 << 14 | 1 << 13 | 1 << 12 |
  664. 1 << 11 | 1 << 10 | 1 << 6 | 1 << 4);
  665. /* disable all I/O interrupts */
  666. cr_parms.orvals[6] = 0;
  667. cr_parms.andvals[6] = ~(1 << 31 | 1 << 30 | 1 << 29 | 1 << 28 |
  668. 1 << 27 | 1 << 26 | 1 << 25 | 1 << 24);
  669. /* disable most machine checks */
  670. cr_parms.orvals[14] = 0;
  671. cr_parms.andvals[14] = ~(1 << 28 | 1 << 27 | 1 << 26 |
  672. 1 << 25 | 1 << 24);
  673. smp_ctl_bit_callback(&cr_parms);
  674. return 0;
  675. }
  676. void __cpu_die(unsigned int cpu)
  677. {
  678. /* Wait until target cpu is down */
  679. while (!smp_cpu_not_running(cpu))
  680. cpu_relax();
  681. smp_free_lowcore(cpu);
  682. printk(KERN_INFO "Processor %d spun down\n", cpu);
  683. }
  684. void cpu_die(void)
  685. {
  686. idle_task_exit();
  687. signal_processor(smp_processor_id(), sigp_stop);
  688. BUG();
  689. for (;;);
  690. }
  691. #endif /* CONFIG_HOTPLUG_CPU */
  692. void __init smp_prepare_cpus(unsigned int max_cpus)
  693. {
  694. #ifndef CONFIG_64BIT
  695. unsigned long save_area = 0;
  696. #endif
  697. unsigned long async_stack, panic_stack;
  698. struct _lowcore *lowcore;
  699. unsigned int cpu;
  700. int lc_order;
  701. smp_detect_cpus();
  702. /* request the 0x1201 emergency signal external interrupt */
  703. if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
  704. panic("Couldn't request external interrupt 0x1201");
  705. print_cpu_info(&S390_lowcore.cpu_data);
  706. /* Reallocate current lowcore, but keep its contents. */
  707. lc_order = sizeof(long) == 8 ? 1 : 0;
  708. lowcore = (void *) __get_free_pages(GFP_KERNEL | GFP_DMA, lc_order);
  709. panic_stack = __get_free_page(GFP_KERNEL);
  710. async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
  711. #ifndef CONFIG_64BIT
  712. if (MACHINE_HAS_IEEE)
  713. save_area = get_zeroed_page(GFP_KERNEL);
  714. #endif
  715. local_irq_disable();
  716. local_mcck_disable();
  717. lowcore_ptr[smp_processor_id()] = lowcore;
  718. *lowcore = S390_lowcore;
  719. lowcore->panic_stack = panic_stack + PAGE_SIZE;
  720. lowcore->async_stack = async_stack + ASYNC_SIZE;
  721. #ifndef CONFIG_64BIT
  722. if (MACHINE_HAS_IEEE)
  723. lowcore->extended_save_area_addr = (u32) save_area;
  724. #endif
  725. set_prefix((u32)(unsigned long) lowcore);
  726. local_mcck_enable();
  727. local_irq_enable();
  728. for_each_possible_cpu(cpu)
  729. if (cpu != smp_processor_id())
  730. smp_create_idle(cpu);
  731. }
  732. void __init smp_prepare_boot_cpu(void)
  733. {
  734. BUG_ON(smp_processor_id() != 0);
  735. current_thread_info()->cpu = 0;
  736. cpu_set(0, cpu_present_map);
  737. cpu_set(0, cpu_online_map);
  738. S390_lowcore.percpu_offset = __per_cpu_offset[0];
  739. current_set[0] = current;
  740. smp_cpu_state[0] = CPU_STATE_CONFIGURED;
  741. smp_cpu_polarization[0] = POLARIZATION_UNKNWN;
  742. }
  743. void __init smp_cpus_done(unsigned int max_cpus)
  744. {
  745. }
  746. /*
  747. * the frequency of the profiling timer can be changed
  748. * by writing a multiplier value into /proc/profile.
  749. *
  750. * usually you want to run this on all CPUs ;)
  751. */
  752. int setup_profiling_timer(unsigned int multiplier)
  753. {
  754. return 0;
  755. }
  756. #ifdef CONFIG_HOTPLUG_CPU
  757. static ssize_t cpu_configure_show(struct sys_device *dev,
  758. struct sysdev_attribute *attr, char *buf)
  759. {
  760. ssize_t count;
  761. mutex_lock(&smp_cpu_state_mutex);
  762. count = sprintf(buf, "%d\n", smp_cpu_state[dev->id]);
  763. mutex_unlock(&smp_cpu_state_mutex);
  764. return count;
  765. }
  766. static ssize_t cpu_configure_store(struct sys_device *dev,
  767. struct sysdev_attribute *attr,
  768. const char *buf, size_t count)
  769. {
  770. int cpu = dev->id;
  771. int val, rc;
  772. char delim;
  773. if (sscanf(buf, "%d %c", &val, &delim) != 1)
  774. return -EINVAL;
  775. if (val != 0 && val != 1)
  776. return -EINVAL;
  777. get_online_cpus();
  778. mutex_lock(&smp_cpu_state_mutex);
  779. rc = -EBUSY;
  780. if (cpu_online(cpu))
  781. goto out;
  782. rc = 0;
  783. switch (val) {
  784. case 0:
  785. if (smp_cpu_state[cpu] == CPU_STATE_CONFIGURED) {
  786. rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]);
  787. if (!rc) {
  788. smp_cpu_state[cpu] = CPU_STATE_STANDBY;
  789. smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
  790. }
  791. }
  792. break;
  793. case 1:
  794. if (smp_cpu_state[cpu] == CPU_STATE_STANDBY) {
  795. rc = sclp_cpu_configure(__cpu_logical_map[cpu]);
  796. if (!rc) {
  797. smp_cpu_state[cpu] = CPU_STATE_CONFIGURED;
  798. smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
  799. }
  800. }
  801. break;
  802. default:
  803. break;
  804. }
  805. out:
  806. mutex_unlock(&smp_cpu_state_mutex);
  807. put_online_cpus();
  808. return rc ? rc : count;
  809. }
  810. static SYSDEV_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
  811. #endif /* CONFIG_HOTPLUG_CPU */
  812. static ssize_t cpu_polarization_show(struct sys_device *dev,
  813. struct sysdev_attribute *attr, char *buf)
  814. {
  815. int cpu = dev->id;
  816. ssize_t count;
  817. mutex_lock(&smp_cpu_state_mutex);
  818. switch (smp_cpu_polarization[cpu]) {
  819. case POLARIZATION_HRZ:
  820. count = sprintf(buf, "horizontal\n");
  821. break;
  822. case POLARIZATION_VL:
  823. count = sprintf(buf, "vertical:low\n");
  824. break;
  825. case POLARIZATION_VM:
  826. count = sprintf(buf, "vertical:medium\n");
  827. break;
  828. case POLARIZATION_VH:
  829. count = sprintf(buf, "vertical:high\n");
  830. break;
  831. default:
  832. count = sprintf(buf, "unknown\n");
  833. break;
  834. }
  835. mutex_unlock(&smp_cpu_state_mutex);
  836. return count;
  837. }
  838. static SYSDEV_ATTR(polarization, 0444, cpu_polarization_show, NULL);
  839. static ssize_t show_cpu_address(struct sys_device *dev,
  840. struct sysdev_attribute *attr, char *buf)
  841. {
  842. return sprintf(buf, "%d\n", __cpu_logical_map[dev->id]);
  843. }
  844. static SYSDEV_ATTR(address, 0444, show_cpu_address, NULL);
  845. static struct attribute *cpu_common_attrs[] = {
  846. #ifdef CONFIG_HOTPLUG_CPU
  847. &attr_configure.attr,
  848. #endif
  849. &attr_address.attr,
  850. &attr_polarization.attr,
  851. NULL,
  852. };
  853. static struct attribute_group cpu_common_attr_group = {
  854. .attrs = cpu_common_attrs,
  855. };
  856. static ssize_t show_capability(struct sys_device *dev,
  857. struct sysdev_attribute *attr, char *buf)
  858. {
  859. unsigned int capability;
  860. int rc;
  861. rc = get_cpu_capability(&capability);
  862. if (rc)
  863. return rc;
  864. return sprintf(buf, "%u\n", capability);
  865. }
  866. static SYSDEV_ATTR(capability, 0444, show_capability, NULL);
  867. static ssize_t show_idle_count(struct sys_device *dev,
  868. struct sysdev_attribute *attr, char *buf)
  869. {
  870. struct s390_idle_data *idle;
  871. unsigned long long idle_count;
  872. idle = &per_cpu(s390_idle, dev->id);
  873. spin_lock_irq(&idle->lock);
  874. idle_count = idle->idle_count;
  875. spin_unlock_irq(&idle->lock);
  876. return sprintf(buf, "%llu\n", idle_count);
  877. }
  878. static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
  879. static ssize_t show_idle_time(struct sys_device *dev,
  880. struct sysdev_attribute *attr, char *buf)
  881. {
  882. struct s390_idle_data *idle;
  883. unsigned long long new_time;
  884. idle = &per_cpu(s390_idle, dev->id);
  885. spin_lock_irq(&idle->lock);
  886. if (idle->in_idle) {
  887. new_time = get_clock();
  888. idle->idle_time += new_time - idle->idle_enter;
  889. idle->idle_enter = new_time;
  890. }
  891. new_time = idle->idle_time;
  892. spin_unlock_irq(&idle->lock);
  893. return sprintf(buf, "%llu\n", new_time >> 12);
  894. }
  895. static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
  896. static struct attribute *cpu_online_attrs[] = {
  897. &attr_capability.attr,
  898. &attr_idle_count.attr,
  899. &attr_idle_time_us.attr,
  900. NULL,
  901. };
  902. static struct attribute_group cpu_online_attr_group = {
  903. .attrs = cpu_online_attrs,
  904. };
  905. static int __cpuinit smp_cpu_notify(struct notifier_block *self,
  906. unsigned long action, void *hcpu)
  907. {
  908. unsigned int cpu = (unsigned int)(long)hcpu;
  909. struct cpu *c = &per_cpu(cpu_devices, cpu);
  910. struct sys_device *s = &c->sysdev;
  911. struct s390_idle_data *idle;
  912. switch (action) {
  913. case CPU_ONLINE:
  914. case CPU_ONLINE_FROZEN:
  915. idle = &per_cpu(s390_idle, cpu);
  916. spin_lock_irq(&idle->lock);
  917. idle->idle_enter = 0;
  918. idle->idle_time = 0;
  919. idle->idle_count = 0;
  920. spin_unlock_irq(&idle->lock);
  921. if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
  922. return NOTIFY_BAD;
  923. break;
  924. case CPU_DEAD:
  925. case CPU_DEAD_FROZEN:
  926. sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
  927. break;
  928. }
  929. return NOTIFY_OK;
  930. }
  931. static struct notifier_block __cpuinitdata smp_cpu_nb = {
  932. .notifier_call = smp_cpu_notify,
  933. };
  934. static int __devinit smp_add_present_cpu(int cpu)
  935. {
  936. struct cpu *c = &per_cpu(cpu_devices, cpu);
  937. struct sys_device *s = &c->sysdev;
  938. int rc;
  939. c->hotpluggable = 1;
  940. rc = register_cpu(c, cpu);
  941. if (rc)
  942. goto out;
  943. rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
  944. if (rc)
  945. goto out_cpu;
  946. if (!cpu_online(cpu))
  947. goto out;
  948. rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
  949. if (!rc)
  950. return 0;
  951. sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
  952. out_cpu:
  953. #ifdef CONFIG_HOTPLUG_CPU
  954. unregister_cpu(c);
  955. #endif
  956. out:
  957. return rc;
  958. }
  959. #ifdef CONFIG_HOTPLUG_CPU
  960. int __ref smp_rescan_cpus(void)
  961. {
  962. cpumask_t newcpus;
  963. int cpu;
  964. int rc;
  965. get_online_cpus();
  966. mutex_lock(&smp_cpu_state_mutex);
  967. newcpus = cpu_present_map;
  968. rc = __smp_rescan_cpus();
  969. if (rc)
  970. goto out;
  971. cpus_andnot(newcpus, cpu_present_map, newcpus);
  972. for_each_cpu_mask(cpu, newcpus) {
  973. rc = smp_add_present_cpu(cpu);
  974. if (rc)
  975. cpu_clear(cpu, cpu_present_map);
  976. }
  977. rc = 0;
  978. out:
  979. mutex_unlock(&smp_cpu_state_mutex);
  980. put_online_cpus();
  981. if (!cpus_empty(newcpus))
  982. topology_schedule_update();
  983. return rc;
  984. }
  985. static ssize_t __ref rescan_store(struct sys_device *dev,
  986. struct sysdev_attribute *attr,
  987. const char *buf,
  988. size_t count)
  989. {
  990. int rc;
  991. rc = smp_rescan_cpus();
  992. return rc ? rc : count;
  993. }
  994. static SYSDEV_ATTR(rescan, 0200, NULL, rescan_store);
  995. #endif /* CONFIG_HOTPLUG_CPU */
  996. static ssize_t dispatching_show(struct sys_device *dev,
  997. struct sysdev_attribute *attr,
  998. char *buf)
  999. {
  1000. ssize_t count;
  1001. mutex_lock(&smp_cpu_state_mutex);
  1002. count = sprintf(buf, "%d\n", cpu_management);
  1003. mutex_unlock(&smp_cpu_state_mutex);
  1004. return count;
  1005. }
  1006. static ssize_t dispatching_store(struct sys_device *dev,
  1007. struct sysdev_attribute *attr,
  1008. const char *buf, size_t count)
  1009. {
  1010. int val, rc;
  1011. char delim;
  1012. if (sscanf(buf, "%d %c", &val, &delim) != 1)
  1013. return -EINVAL;
  1014. if (val != 0 && val != 1)
  1015. return -EINVAL;
  1016. rc = 0;
  1017. get_online_cpus();
  1018. mutex_lock(&smp_cpu_state_mutex);
  1019. if (cpu_management == val)
  1020. goto out;
  1021. rc = topology_set_cpu_management(val);
  1022. if (!rc)
  1023. cpu_management = val;
  1024. out:
  1025. mutex_unlock(&smp_cpu_state_mutex);
  1026. put_online_cpus();
  1027. return rc ? rc : count;
  1028. }
  1029. static SYSDEV_ATTR(dispatching, 0644, dispatching_show, dispatching_store);
  1030. static int __init topology_init(void)
  1031. {
  1032. int cpu;
  1033. int rc;
  1034. register_cpu_notifier(&smp_cpu_nb);
  1035. #ifdef CONFIG_HOTPLUG_CPU
  1036. rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
  1037. &attr_rescan.attr);
  1038. if (rc)
  1039. return rc;
  1040. #endif
  1041. rc = sysfs_create_file(&cpu_sysdev_class.kset.kobj,
  1042. &attr_dispatching.attr);
  1043. if (rc)
  1044. return rc;
  1045. for_each_present_cpu(cpu) {
  1046. rc = smp_add_present_cpu(cpu);
  1047. if (rc)
  1048. return rc;
  1049. }
  1050. return 0;
  1051. }
  1052. subsys_initcall(topology_init);