smp.c 27 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109
  1. /*
  2. * SMP related functions
  3. *
  4. * Copyright IBM Corp. 1999,2012
  5. * Author(s): Denis Joseph Barrow,
  6. * Martin Schwidefsky <schwidefsky@de.ibm.com>,
  7. * Heiko Carstens <heiko.carstens@de.ibm.com>,
  8. *
  9. * based on other smp stuff by
  10. * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
  11. * (c) 1998 Ingo Molnar
  12. *
  13. * The code outside of smp.c uses logical cpu numbers, only smp.c does
  14. * the translation of logical to physical cpu ids. All new code that
  15. * operates on physical cpu numbers needs to go into smp.c.
  16. */
  17. #define KMSG_COMPONENT "cpu"
  18. #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
  19. #include <linux/workqueue.h>
  20. #include <linux/module.h>
  21. #include <linux/init.h>
  22. #include <linux/mm.h>
  23. #include <linux/err.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/kernel_stat.h>
  26. #include <linux/delay.h>
  27. #include <linux/interrupt.h>
  28. #include <linux/irqflags.h>
  29. #include <linux/cpu.h>
  30. #include <linux/slab.h>
  31. #include <linux/crash_dump.h>
  32. #include <asm/asm-offsets.h>
  33. #include <asm/switch_to.h>
  34. #include <asm/facility.h>
  35. #include <asm/ipl.h>
  36. #include <asm/setup.h>
  37. #include <asm/irq.h>
  38. #include <asm/tlbflush.h>
  39. #include <asm/timer.h>
  40. #include <asm/lowcore.h>
  41. #include <asm/sclp.h>
  42. #include <asm/vdso.h>
  43. #include <asm/debug.h>
  44. #include <asm/os_info.h>
  45. #include "entry.h"
  46. enum {
  47. sigp_sense = 1,
  48. sigp_external_call = 2,
  49. sigp_emergency_signal = 3,
  50. sigp_start = 4,
  51. sigp_stop = 5,
  52. sigp_restart = 6,
  53. sigp_stop_and_store_status = 9,
  54. sigp_initial_cpu_reset = 11,
  55. sigp_cpu_reset = 12,
  56. sigp_set_prefix = 13,
  57. sigp_store_status_at_address = 14,
  58. sigp_store_extended_status_at_address = 15,
  59. sigp_set_architecture = 18,
  60. sigp_conditional_emergency_signal = 19,
  61. sigp_sense_running = 21,
  62. };
  63. enum {
  64. sigp_order_code_accepted = 0,
  65. sigp_status_stored = 1,
  66. sigp_busy = 2,
  67. sigp_not_operational = 3,
  68. };
  69. enum {
  70. ec_schedule = 0,
  71. ec_call_function,
  72. ec_call_function_single,
  73. ec_stop_cpu,
  74. };
  75. enum {
  76. CPU_STATE_STANDBY,
  77. CPU_STATE_CONFIGURED,
  78. };
  79. struct pcpu {
  80. struct cpu cpu;
  81. struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
  82. unsigned long async_stack; /* async stack for the cpu */
  83. unsigned long panic_stack; /* panic stack for the cpu */
  84. unsigned long ec_mask; /* bit mask for ec_xxx functions */
  85. int state; /* physical cpu state */
  86. u32 status; /* last status received via sigp */
  87. u16 address; /* physical cpu address */
  88. };
  89. static u8 boot_cpu_type;
  90. static u16 boot_cpu_address;
  91. static struct pcpu pcpu_devices[NR_CPUS];
  92. DEFINE_MUTEX(smp_cpu_state_mutex);
  93. /*
  94. * Signal processor helper functions.
  95. */
  96. static inline int __pcpu_sigp(u16 addr, u8 order, u32 parm, u32 *status)
  97. {
  98. register unsigned int reg1 asm ("1") = parm;
  99. int cc;
  100. asm volatile(
  101. " sigp %1,%2,0(%3)\n"
  102. " ipm %0\n"
  103. " srl %0,28\n"
  104. : "=d" (cc), "+d" (reg1) : "d" (addr), "a" (order) : "cc");
  105. if (status && cc == 1)
  106. *status = reg1;
  107. return cc;
  108. }
  109. static inline int __pcpu_sigp_relax(u16 addr, u8 order, u32 parm, u32 *status)
  110. {
  111. int cc;
  112. while (1) {
  113. cc = __pcpu_sigp(addr, order, parm, status);
  114. if (cc != sigp_busy)
  115. return cc;
  116. cpu_relax();
  117. }
  118. }
  119. static int pcpu_sigp_retry(struct pcpu *pcpu, u8 order, u32 parm)
  120. {
  121. int cc, retry;
  122. for (retry = 0; ; retry++) {
  123. cc = __pcpu_sigp(pcpu->address, order, parm, &pcpu->status);
  124. if (cc != sigp_busy)
  125. break;
  126. if (retry >= 3)
  127. udelay(10);
  128. }
  129. return cc;
  130. }
  131. static inline int pcpu_stopped(struct pcpu *pcpu)
  132. {
  133. if (__pcpu_sigp(pcpu->address, sigp_sense,
  134. 0, &pcpu->status) != sigp_status_stored)
  135. return 0;
  136. /* Check for stopped and check stop state */
  137. return !!(pcpu->status & 0x50);
  138. }
  139. static inline int pcpu_running(struct pcpu *pcpu)
  140. {
  141. if (__pcpu_sigp(pcpu->address, sigp_sense_running,
  142. 0, &pcpu->status) != sigp_status_stored)
  143. return 1;
  144. /* Check for running status */
  145. return !(pcpu->status & 0x400);
  146. }
  147. /*
  148. * Find struct pcpu by cpu address.
  149. */
  150. static struct pcpu *pcpu_find_address(const struct cpumask *mask, int address)
  151. {
  152. int cpu;
  153. for_each_cpu(cpu, mask)
  154. if (pcpu_devices[cpu].address == address)
  155. return pcpu_devices + cpu;
  156. return NULL;
  157. }
  158. static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
  159. {
  160. int order;
  161. set_bit(ec_bit, &pcpu->ec_mask);
  162. order = pcpu_running(pcpu) ?
  163. sigp_external_call : sigp_emergency_signal;
  164. pcpu_sigp_retry(pcpu, order, 0);
  165. }
  166. static int __cpuinit pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
  167. {
  168. struct _lowcore *lc;
  169. if (pcpu != &pcpu_devices[0]) {
  170. pcpu->lowcore = (struct _lowcore *)
  171. __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
  172. pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
  173. pcpu->panic_stack = __get_free_page(GFP_KERNEL);
  174. if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack)
  175. goto out;
  176. }
  177. lc = pcpu->lowcore;
  178. memcpy(lc, &S390_lowcore, 512);
  179. memset((char *) lc + 512, 0, sizeof(*lc) - 512);
  180. lc->async_stack = pcpu->async_stack + ASYNC_SIZE;
  181. lc->panic_stack = pcpu->panic_stack + PAGE_SIZE;
  182. lc->cpu_nr = cpu;
  183. #ifndef CONFIG_64BIT
  184. if (MACHINE_HAS_IEEE) {
  185. lc->extended_save_area_addr = get_zeroed_page(GFP_KERNEL);
  186. if (!lc->extended_save_area_addr)
  187. goto out;
  188. }
  189. #else
  190. if (vdso_alloc_per_cpu(lc))
  191. goto out;
  192. #endif
  193. lowcore_ptr[cpu] = lc;
  194. pcpu_sigp_retry(pcpu, sigp_set_prefix, (u32)(unsigned long) lc);
  195. return 0;
  196. out:
  197. if (pcpu != &pcpu_devices[0]) {
  198. free_page(pcpu->panic_stack);
  199. free_pages(pcpu->async_stack, ASYNC_ORDER);
  200. free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
  201. }
  202. return -ENOMEM;
  203. }
  204. #ifdef CONFIG_HOTPLUG_CPU
  205. static void pcpu_free_lowcore(struct pcpu *pcpu)
  206. {
  207. pcpu_sigp_retry(pcpu, sigp_set_prefix, 0);
  208. lowcore_ptr[pcpu - pcpu_devices] = NULL;
  209. #ifndef CONFIG_64BIT
  210. if (MACHINE_HAS_IEEE) {
  211. struct _lowcore *lc = pcpu->lowcore;
  212. free_page((unsigned long) lc->extended_save_area_addr);
  213. lc->extended_save_area_addr = 0;
  214. }
  215. #else
  216. vdso_free_per_cpu(pcpu->lowcore);
  217. #endif
  218. if (pcpu != &pcpu_devices[0]) {
  219. free_page(pcpu->panic_stack);
  220. free_pages(pcpu->async_stack, ASYNC_ORDER);
  221. free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
  222. }
  223. }
  224. #endif /* CONFIG_HOTPLUG_CPU */
  225. static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
  226. {
  227. struct _lowcore *lc = pcpu->lowcore;
  228. atomic_inc(&init_mm.context.attach_count);
  229. lc->cpu_nr = cpu;
  230. lc->percpu_offset = __per_cpu_offset[cpu];
  231. lc->kernel_asce = S390_lowcore.kernel_asce;
  232. lc->machine_flags = S390_lowcore.machine_flags;
  233. lc->ftrace_func = S390_lowcore.ftrace_func;
  234. lc->user_timer = lc->system_timer = lc->steal_timer = 0;
  235. __ctl_store(lc->cregs_save_area, 0, 15);
  236. save_access_regs((unsigned int *) lc->access_regs_save_area);
  237. memcpy(lc->stfle_fac_list, S390_lowcore.stfle_fac_list,
  238. MAX_FACILITY_BIT/8);
  239. }
  240. static void pcpu_attach_task(struct pcpu *pcpu, struct task_struct *tsk)
  241. {
  242. struct _lowcore *lc = pcpu->lowcore;
  243. struct thread_info *ti = task_thread_info(tsk);
  244. lc->kernel_stack = (unsigned long) task_stack_page(tsk) + THREAD_SIZE;
  245. lc->thread_info = (unsigned long) task_thread_info(tsk);
  246. lc->current_task = (unsigned long) tsk;
  247. lc->user_timer = ti->user_timer;
  248. lc->system_timer = ti->system_timer;
  249. lc->steal_timer = 0;
  250. }
  251. static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
  252. {
  253. struct _lowcore *lc = pcpu->lowcore;
  254. lc->restart_stack = lc->kernel_stack;
  255. lc->restart_fn = (unsigned long) func;
  256. lc->restart_data = (unsigned long) data;
  257. lc->restart_source = -1UL;
  258. pcpu_sigp_retry(pcpu, sigp_restart, 0);
  259. }
  260. /*
  261. * Call function via PSW restart on pcpu and stop the current cpu.
  262. */
  263. static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
  264. void *data, unsigned long stack)
  265. {
  266. struct _lowcore *lc = pcpu->lowcore;
  267. unsigned short this_cpu;
  268. __load_psw_mask(psw_kernel_bits);
  269. this_cpu = stap();
  270. if (pcpu->address == this_cpu)
  271. func(data); /* should not return */
  272. /* Stop target cpu (if func returns this stops the current cpu). */
  273. pcpu_sigp_retry(pcpu, sigp_stop, 0);
  274. /* Restart func on the target cpu and stop the current cpu. */
  275. lc->restart_stack = stack;
  276. lc->restart_fn = (unsigned long) func;
  277. lc->restart_data = (unsigned long) data;
  278. lc->restart_source = (unsigned long) this_cpu;
  279. asm volatile(
  280. "0: sigp 0,%0,6 # sigp restart to target cpu\n"
  281. " brc 2,0b # busy, try again\n"
  282. "1: sigp 0,%1,5 # sigp stop to current cpu\n"
  283. " brc 2,1b # busy, try again\n"
  284. : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc");
  285. for (;;) ;
  286. }
  287. /*
  288. * Call function on an online CPU.
  289. */
  290. void smp_call_online_cpu(void (*func)(void *), void *data)
  291. {
  292. struct pcpu *pcpu;
  293. /* Use the current cpu if it is online. */
  294. pcpu = pcpu_find_address(cpu_online_mask, stap());
  295. if (!pcpu)
  296. /* Use the first online cpu. */
  297. pcpu = pcpu_devices + cpumask_first(cpu_online_mask);
  298. pcpu_delegate(pcpu, func, data, (unsigned long) restart_stack);
  299. }
  300. /*
  301. * Call function on the ipl CPU.
  302. */
  303. void smp_call_ipl_cpu(void (*func)(void *), void *data)
  304. {
  305. pcpu_delegate(&pcpu_devices[0], func, data,
  306. pcpu_devices->panic_stack + PAGE_SIZE);
  307. }
  308. int smp_find_processor_id(u16 address)
  309. {
  310. int cpu;
  311. for_each_present_cpu(cpu)
  312. if (pcpu_devices[cpu].address == address)
  313. return cpu;
  314. return -1;
  315. }
  316. int smp_vcpu_scheduled(int cpu)
  317. {
  318. return pcpu_running(pcpu_devices + cpu);
  319. }
  320. void smp_yield(void)
  321. {
  322. if (MACHINE_HAS_DIAG44)
  323. asm volatile("diag 0,0,0x44");
  324. }
  325. void smp_yield_cpu(int cpu)
  326. {
  327. if (MACHINE_HAS_DIAG9C)
  328. asm volatile("diag %0,0,0x9c"
  329. : : "d" (pcpu_devices[cpu].address));
  330. else if (MACHINE_HAS_DIAG44)
  331. asm volatile("diag 0,0,0x44");
  332. }
  333. /*
  334. * Send cpus emergency shutdown signal. This gives the cpus the
  335. * opportunity to complete outstanding interrupts.
  336. */
  337. void smp_emergency_stop(cpumask_t *cpumask)
  338. {
  339. u64 end;
  340. int cpu;
  341. end = get_clock() + (1000000UL << 12);
  342. for_each_cpu(cpu, cpumask) {
  343. struct pcpu *pcpu = pcpu_devices + cpu;
  344. set_bit(ec_stop_cpu, &pcpu->ec_mask);
  345. while (__pcpu_sigp(pcpu->address, sigp_emergency_signal,
  346. 0, NULL) == sigp_busy &&
  347. get_clock() < end)
  348. cpu_relax();
  349. }
  350. while (get_clock() < end) {
  351. for_each_cpu(cpu, cpumask)
  352. if (pcpu_stopped(pcpu_devices + cpu))
  353. cpumask_clear_cpu(cpu, cpumask);
  354. if (cpumask_empty(cpumask))
  355. break;
  356. cpu_relax();
  357. }
  358. }
  359. /*
  360. * Stop all cpus but the current one.
  361. */
  362. void smp_send_stop(void)
  363. {
  364. cpumask_t cpumask;
  365. int cpu;
  366. /* Disable all interrupts/machine checks */
  367. __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
  368. trace_hardirqs_off();
  369. debug_set_critical();
  370. cpumask_copy(&cpumask, cpu_online_mask);
  371. cpumask_clear_cpu(smp_processor_id(), &cpumask);
  372. if (oops_in_progress)
  373. smp_emergency_stop(&cpumask);
  374. /* stop all processors */
  375. for_each_cpu(cpu, &cpumask) {
  376. struct pcpu *pcpu = pcpu_devices + cpu;
  377. pcpu_sigp_retry(pcpu, sigp_stop, 0);
  378. while (!pcpu_stopped(pcpu))
  379. cpu_relax();
  380. }
  381. }
  382. /*
  383. * Stop the current cpu.
  384. */
  385. void smp_stop_cpu(void)
  386. {
  387. pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
  388. for (;;) ;
  389. }
  390. /*
  391. * This is the main routine where commands issued by other
  392. * cpus are handled.
  393. */
  394. static void do_ext_call_interrupt(struct ext_code ext_code,
  395. unsigned int param32, unsigned long param64)
  396. {
  397. unsigned long bits;
  398. int cpu;
  399. cpu = smp_processor_id();
  400. if (ext_code.code == 0x1202)
  401. kstat_cpu(cpu).irqs[EXTINT_EXC]++;
  402. else
  403. kstat_cpu(cpu).irqs[EXTINT_EMS]++;
  404. /*
  405. * handle bit signal external calls
  406. */
  407. bits = xchg(&pcpu_devices[cpu].ec_mask, 0);
  408. if (test_bit(ec_stop_cpu, &bits))
  409. smp_stop_cpu();
  410. if (test_bit(ec_schedule, &bits))
  411. scheduler_ipi();
  412. if (test_bit(ec_call_function, &bits))
  413. generic_smp_call_function_interrupt();
  414. if (test_bit(ec_call_function_single, &bits))
  415. generic_smp_call_function_single_interrupt();
  416. }
  417. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  418. {
  419. int cpu;
  420. for_each_cpu(cpu, mask)
  421. pcpu_ec_call(pcpu_devices + cpu, ec_call_function);
  422. }
  423. void arch_send_call_function_single_ipi(int cpu)
  424. {
  425. pcpu_ec_call(pcpu_devices + cpu, ec_call_function_single);
  426. }
  427. #ifndef CONFIG_64BIT
  428. /*
  429. * this function sends a 'purge tlb' signal to another CPU.
  430. */
  431. static void smp_ptlb_callback(void *info)
  432. {
  433. __tlb_flush_local();
  434. }
  435. void smp_ptlb_all(void)
  436. {
  437. on_each_cpu(smp_ptlb_callback, NULL, 1);
  438. }
  439. EXPORT_SYMBOL(smp_ptlb_all);
  440. #endif /* ! CONFIG_64BIT */
  441. /*
  442. * this function sends a 'reschedule' IPI to another CPU.
  443. * it goes straight through and wastes no time serializing
  444. * anything. Worst case is that we lose a reschedule ...
  445. */
  446. void smp_send_reschedule(int cpu)
  447. {
  448. pcpu_ec_call(pcpu_devices + cpu, ec_schedule);
  449. }
  450. /*
  451. * parameter area for the set/clear control bit callbacks
  452. */
  453. struct ec_creg_mask_parms {
  454. unsigned long orval;
  455. unsigned long andval;
  456. int cr;
  457. };
  458. /*
  459. * callback for setting/clearing control bits
  460. */
  461. static void smp_ctl_bit_callback(void *info)
  462. {
  463. struct ec_creg_mask_parms *pp = info;
  464. unsigned long cregs[16];
  465. __ctl_store(cregs, 0, 15);
  466. cregs[pp->cr] = (cregs[pp->cr] & pp->andval) | pp->orval;
  467. __ctl_load(cregs, 0, 15);
  468. }
  469. /*
  470. * Set a bit in a control register of all cpus
  471. */
  472. void smp_ctl_set_bit(int cr, int bit)
  473. {
  474. struct ec_creg_mask_parms parms = { 1UL << bit, -1UL, cr };
  475. on_each_cpu(smp_ctl_bit_callback, &parms, 1);
  476. }
  477. EXPORT_SYMBOL(smp_ctl_set_bit);
  478. /*
  479. * Clear a bit in a control register of all cpus
  480. */
  481. void smp_ctl_clear_bit(int cr, int bit)
  482. {
  483. struct ec_creg_mask_parms parms = { 0, ~(1UL << bit), cr };
  484. on_each_cpu(smp_ctl_bit_callback, &parms, 1);
  485. }
  486. EXPORT_SYMBOL(smp_ctl_clear_bit);
  487. #if defined(CONFIG_ZFCPDUMP) || defined(CONFIG_CRASH_DUMP)
  488. struct save_area *zfcpdump_save_areas[NR_CPUS + 1];
  489. EXPORT_SYMBOL_GPL(zfcpdump_save_areas);
  490. static void __init smp_get_save_area(int cpu, u16 address)
  491. {
  492. void *lc = pcpu_devices[0].lowcore;
  493. struct save_area *save_area;
  494. if (is_kdump_kernel())
  495. return;
  496. if (!OLDMEM_BASE && (address == boot_cpu_address ||
  497. ipl_info.type != IPL_TYPE_FCP_DUMP))
  498. return;
  499. if (cpu >= NR_CPUS) {
  500. pr_warning("CPU %i exceeds the maximum %i and is excluded "
  501. "from the dump\n", cpu, NR_CPUS - 1);
  502. return;
  503. }
  504. save_area = kmalloc(sizeof(struct save_area), GFP_KERNEL);
  505. if (!save_area)
  506. panic("could not allocate memory for save area\n");
  507. zfcpdump_save_areas[cpu] = save_area;
  508. #ifdef CONFIG_CRASH_DUMP
  509. if (address == boot_cpu_address) {
  510. /* Copy the registers of the boot cpu. */
  511. copy_oldmem_page(1, (void *) save_area, sizeof(*save_area),
  512. SAVE_AREA_BASE - PAGE_SIZE, 0);
  513. return;
  514. }
  515. #endif
  516. /* Get the registers of a non-boot cpu. */
  517. __pcpu_sigp_relax(address, sigp_stop_and_store_status, 0, NULL);
  518. memcpy_real(save_area, lc + SAVE_AREA_BASE, sizeof(*save_area));
  519. }
  520. int smp_store_status(int cpu)
  521. {
  522. struct pcpu *pcpu;
  523. pcpu = pcpu_devices + cpu;
  524. if (__pcpu_sigp_relax(pcpu->address, sigp_stop_and_store_status,
  525. 0, NULL) != sigp_order_code_accepted)
  526. return -EIO;
  527. return 0;
  528. }
  529. #else /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
  530. static inline void smp_get_save_area(int cpu, u16 address) { }
  531. #endif /* CONFIG_ZFCPDUMP || CONFIG_CRASH_DUMP */
  532. static struct sclp_cpu_info *smp_get_cpu_info(void)
  533. {
  534. static int use_sigp_detection;
  535. struct sclp_cpu_info *info;
  536. int address;
  537. info = kzalloc(sizeof(*info), GFP_KERNEL);
  538. if (info && (use_sigp_detection || sclp_get_cpu_info(info))) {
  539. use_sigp_detection = 1;
  540. for (address = 0; address <= MAX_CPU_ADDRESS; address++) {
  541. if (__pcpu_sigp_relax(address, sigp_sense, 0, NULL) ==
  542. sigp_not_operational)
  543. continue;
  544. info->cpu[info->configured].address = address;
  545. info->configured++;
  546. }
  547. info->combined = info->configured;
  548. }
  549. return info;
  550. }
  551. static int __devinit smp_add_present_cpu(int cpu);
  552. static int __devinit __smp_rescan_cpus(struct sclp_cpu_info *info,
  553. int sysfs_add)
  554. {
  555. struct pcpu *pcpu;
  556. cpumask_t avail;
  557. int cpu, nr, i;
  558. nr = 0;
  559. cpumask_xor(&avail, cpu_possible_mask, cpu_present_mask);
  560. cpu = cpumask_first(&avail);
  561. for (i = 0; (i < info->combined) && (cpu < nr_cpu_ids); i++) {
  562. if (info->has_cpu_type && info->cpu[i].type != boot_cpu_type)
  563. continue;
  564. if (pcpu_find_address(cpu_present_mask, info->cpu[i].address))
  565. continue;
  566. pcpu = pcpu_devices + cpu;
  567. pcpu->address = info->cpu[i].address;
  568. pcpu->state = (cpu >= info->configured) ?
  569. CPU_STATE_STANDBY : CPU_STATE_CONFIGURED;
  570. cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
  571. set_cpu_present(cpu, true);
  572. if (sysfs_add && smp_add_present_cpu(cpu) != 0)
  573. set_cpu_present(cpu, false);
  574. else
  575. nr++;
  576. cpu = cpumask_next(cpu, &avail);
  577. }
  578. return nr;
  579. }
  580. static void __init smp_detect_cpus(void)
  581. {
  582. unsigned int cpu, c_cpus, s_cpus;
  583. struct sclp_cpu_info *info;
  584. info = smp_get_cpu_info();
  585. if (!info)
  586. panic("smp_detect_cpus failed to allocate memory\n");
  587. if (info->has_cpu_type) {
  588. for (cpu = 0; cpu < info->combined; cpu++) {
  589. if (info->cpu[cpu].address != boot_cpu_address)
  590. continue;
  591. /* The boot cpu dictates the cpu type. */
  592. boot_cpu_type = info->cpu[cpu].type;
  593. break;
  594. }
  595. }
  596. c_cpus = s_cpus = 0;
  597. for (cpu = 0; cpu < info->combined; cpu++) {
  598. if (info->has_cpu_type && info->cpu[cpu].type != boot_cpu_type)
  599. continue;
  600. if (cpu < info->configured) {
  601. smp_get_save_area(c_cpus, info->cpu[cpu].address);
  602. c_cpus++;
  603. } else
  604. s_cpus++;
  605. }
  606. pr_info("%d configured CPUs, %d standby CPUs\n", c_cpus, s_cpus);
  607. get_online_cpus();
  608. __smp_rescan_cpus(info, 0);
  609. put_online_cpus();
  610. kfree(info);
  611. }
  612. /*
  613. * Activate a secondary processor.
  614. */
  615. static void __cpuinit smp_start_secondary(void *cpuvoid)
  616. {
  617. S390_lowcore.last_update_clock = get_clock();
  618. S390_lowcore.restart_stack = (unsigned long) restart_stack;
  619. S390_lowcore.restart_fn = (unsigned long) do_restart;
  620. S390_lowcore.restart_data = 0;
  621. S390_lowcore.restart_source = -1UL;
  622. restore_access_regs(S390_lowcore.access_regs_save_area);
  623. __ctl_load(S390_lowcore.cregs_save_area, 0, 15);
  624. __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT);
  625. cpu_init();
  626. preempt_disable();
  627. init_cpu_timer();
  628. init_cpu_vtimer();
  629. pfault_init();
  630. notify_cpu_starting(smp_processor_id());
  631. ipi_call_lock();
  632. set_cpu_online(smp_processor_id(), true);
  633. ipi_call_unlock();
  634. local_irq_enable();
  635. /* cpu_idle will call schedule for us */
  636. cpu_idle();
  637. }
  638. /* Upping and downing of CPUs */
  639. int __cpuinit __cpu_up(unsigned int cpu, struct task_struct *tidle)
  640. {
  641. struct pcpu *pcpu;
  642. int rc;
  643. pcpu = pcpu_devices + cpu;
  644. if (pcpu->state != CPU_STATE_CONFIGURED)
  645. return -EIO;
  646. if (pcpu_sigp_retry(pcpu, sigp_initial_cpu_reset, 0) !=
  647. sigp_order_code_accepted)
  648. return -EIO;
  649. rc = pcpu_alloc_lowcore(pcpu, cpu);
  650. if (rc)
  651. return rc;
  652. pcpu_prepare_secondary(pcpu, cpu);
  653. pcpu_attach_task(pcpu, tidle);
  654. pcpu_start_fn(pcpu, smp_start_secondary, NULL);
  655. while (!cpu_online(cpu))
  656. cpu_relax();
  657. return 0;
  658. }
  659. static int __init setup_possible_cpus(char *s)
  660. {
  661. int max, cpu;
  662. if (kstrtoint(s, 0, &max) < 0)
  663. return 0;
  664. init_cpu_possible(cpumask_of(0));
  665. for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
  666. set_cpu_possible(cpu, true);
  667. return 0;
  668. }
  669. early_param("possible_cpus", setup_possible_cpus);
  670. #ifdef CONFIG_HOTPLUG_CPU
  671. int __cpu_disable(void)
  672. {
  673. unsigned long cregs[16];
  674. set_cpu_online(smp_processor_id(), false);
  675. /* Disable pseudo page faults on this cpu. */
  676. pfault_fini();
  677. /* Disable interrupt sources via control register. */
  678. __ctl_store(cregs, 0, 15);
  679. cregs[0] &= ~0x0000ee70UL; /* disable all external interrupts */
  680. cregs[6] &= ~0xff000000UL; /* disable all I/O interrupts */
  681. cregs[14] &= ~0x1f000000UL; /* disable most machine checks */
  682. __ctl_load(cregs, 0, 15);
  683. return 0;
  684. }
  685. void __cpu_die(unsigned int cpu)
  686. {
  687. struct pcpu *pcpu;
  688. /* Wait until target cpu is down */
  689. pcpu = pcpu_devices + cpu;
  690. while (!pcpu_stopped(pcpu))
  691. cpu_relax();
  692. pcpu_free_lowcore(pcpu);
  693. atomic_dec(&init_mm.context.attach_count);
  694. }
  695. void __noreturn cpu_die(void)
  696. {
  697. idle_task_exit();
  698. pcpu_sigp_retry(pcpu_devices + smp_processor_id(), sigp_stop, 0);
  699. for (;;) ;
  700. }
  701. #endif /* CONFIG_HOTPLUG_CPU */
  702. void __init smp_prepare_cpus(unsigned int max_cpus)
  703. {
  704. /* request the 0x1201 emergency signal external interrupt */
  705. if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
  706. panic("Couldn't request external interrupt 0x1201");
  707. /* request the 0x1202 external call external interrupt */
  708. if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
  709. panic("Couldn't request external interrupt 0x1202");
  710. smp_detect_cpus();
  711. }
  712. void __init smp_prepare_boot_cpu(void)
  713. {
  714. struct pcpu *pcpu = pcpu_devices;
  715. boot_cpu_address = stap();
  716. pcpu->state = CPU_STATE_CONFIGURED;
  717. pcpu->address = boot_cpu_address;
  718. pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
  719. pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE;
  720. pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE;
  721. S390_lowcore.percpu_offset = __per_cpu_offset[0];
  722. cpu_set_polarization(0, POLARIZATION_UNKNOWN);
  723. set_cpu_present(0, true);
  724. set_cpu_online(0, true);
  725. }
  726. void __init smp_cpus_done(unsigned int max_cpus)
  727. {
  728. }
  729. void __init smp_setup_processor_id(void)
  730. {
  731. S390_lowcore.cpu_nr = 0;
  732. }
  733. /*
  734. * the frequency of the profiling timer can be changed
  735. * by writing a multiplier value into /proc/profile.
  736. *
  737. * usually you want to run this on all CPUs ;)
  738. */
  739. int setup_profiling_timer(unsigned int multiplier)
  740. {
  741. return 0;
  742. }
  743. #ifdef CONFIG_HOTPLUG_CPU
  744. static ssize_t cpu_configure_show(struct device *dev,
  745. struct device_attribute *attr, char *buf)
  746. {
  747. ssize_t count;
  748. mutex_lock(&smp_cpu_state_mutex);
  749. count = sprintf(buf, "%d\n", pcpu_devices[dev->id].state);
  750. mutex_unlock(&smp_cpu_state_mutex);
  751. return count;
  752. }
  753. static ssize_t cpu_configure_store(struct device *dev,
  754. struct device_attribute *attr,
  755. const char *buf, size_t count)
  756. {
  757. struct pcpu *pcpu;
  758. int cpu, val, rc;
  759. char delim;
  760. if (sscanf(buf, "%d %c", &val, &delim) != 1)
  761. return -EINVAL;
  762. if (val != 0 && val != 1)
  763. return -EINVAL;
  764. get_online_cpus();
  765. mutex_lock(&smp_cpu_state_mutex);
  766. rc = -EBUSY;
  767. /* disallow configuration changes of online cpus and cpu 0 */
  768. cpu = dev->id;
  769. if (cpu_online(cpu) || cpu == 0)
  770. goto out;
  771. pcpu = pcpu_devices + cpu;
  772. rc = 0;
  773. switch (val) {
  774. case 0:
  775. if (pcpu->state != CPU_STATE_CONFIGURED)
  776. break;
  777. rc = sclp_cpu_deconfigure(pcpu->address);
  778. if (rc)
  779. break;
  780. pcpu->state = CPU_STATE_STANDBY;
  781. cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
  782. topology_expect_change();
  783. break;
  784. case 1:
  785. if (pcpu->state != CPU_STATE_STANDBY)
  786. break;
  787. rc = sclp_cpu_configure(pcpu->address);
  788. if (rc)
  789. break;
  790. pcpu->state = CPU_STATE_CONFIGURED;
  791. cpu_set_polarization(cpu, POLARIZATION_UNKNOWN);
  792. topology_expect_change();
  793. break;
  794. default:
  795. break;
  796. }
  797. out:
  798. mutex_unlock(&smp_cpu_state_mutex);
  799. put_online_cpus();
  800. return rc ? rc : count;
  801. }
  802. static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store);
  803. #endif /* CONFIG_HOTPLUG_CPU */
  804. static ssize_t show_cpu_address(struct device *dev,
  805. struct device_attribute *attr, char *buf)
  806. {
  807. return sprintf(buf, "%d\n", pcpu_devices[dev->id].address);
  808. }
  809. static DEVICE_ATTR(address, 0444, show_cpu_address, NULL);
  810. static struct attribute *cpu_common_attrs[] = {
  811. #ifdef CONFIG_HOTPLUG_CPU
  812. &dev_attr_configure.attr,
  813. #endif
  814. &dev_attr_address.attr,
  815. NULL,
  816. };
  817. static struct attribute_group cpu_common_attr_group = {
  818. .attrs = cpu_common_attrs,
  819. };
  820. static ssize_t show_capability(struct device *dev,
  821. struct device_attribute *attr, char *buf)
  822. {
  823. unsigned int capability;
  824. int rc;
  825. rc = get_cpu_capability(&capability);
  826. if (rc)
  827. return rc;
  828. return sprintf(buf, "%u\n", capability);
  829. }
  830. static DEVICE_ATTR(capability, 0444, show_capability, NULL);
  831. static ssize_t show_idle_count(struct device *dev,
  832. struct device_attribute *attr, char *buf)
  833. {
  834. struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
  835. unsigned long long idle_count;
  836. unsigned int sequence;
  837. do {
  838. sequence = ACCESS_ONCE(idle->sequence);
  839. idle_count = ACCESS_ONCE(idle->idle_count);
  840. if (ACCESS_ONCE(idle->idle_enter))
  841. idle_count++;
  842. } while ((sequence & 1) || (idle->sequence != sequence));
  843. return sprintf(buf, "%llu\n", idle_count);
  844. }
  845. static DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
  846. static ssize_t show_idle_time(struct device *dev,
  847. struct device_attribute *attr, char *buf)
  848. {
  849. struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
  850. unsigned long long now, idle_time, idle_enter, idle_exit;
  851. unsigned int sequence;
  852. do {
  853. now = get_clock();
  854. sequence = ACCESS_ONCE(idle->sequence);
  855. idle_time = ACCESS_ONCE(idle->idle_time);
  856. idle_enter = ACCESS_ONCE(idle->idle_enter);
  857. idle_exit = ACCESS_ONCE(idle->idle_exit);
  858. } while ((sequence & 1) || (idle->sequence != sequence));
  859. idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
  860. return sprintf(buf, "%llu\n", idle_time >> 12);
  861. }
  862. static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
  863. static struct attribute *cpu_online_attrs[] = {
  864. &dev_attr_capability.attr,
  865. &dev_attr_idle_count.attr,
  866. &dev_attr_idle_time_us.attr,
  867. NULL,
  868. };
  869. static struct attribute_group cpu_online_attr_group = {
  870. .attrs = cpu_online_attrs,
  871. };
  872. static int __cpuinit smp_cpu_notify(struct notifier_block *self,
  873. unsigned long action, void *hcpu)
  874. {
  875. unsigned int cpu = (unsigned int)(long)hcpu;
  876. struct cpu *c = &pcpu_devices[cpu].cpu;
  877. struct device *s = &c->dev;
  878. struct s390_idle_data *idle;
  879. int err = 0;
  880. switch (action) {
  881. case CPU_ONLINE:
  882. case CPU_ONLINE_FROZEN:
  883. idle = &per_cpu(s390_idle, cpu);
  884. memset(idle, 0, sizeof(struct s390_idle_data));
  885. err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
  886. break;
  887. case CPU_DEAD:
  888. case CPU_DEAD_FROZEN:
  889. sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
  890. break;
  891. }
  892. return notifier_from_errno(err);
  893. }
  894. static struct notifier_block __cpuinitdata smp_cpu_nb = {
  895. .notifier_call = smp_cpu_notify,
  896. };
  897. static int __devinit smp_add_present_cpu(int cpu)
  898. {
  899. struct cpu *c = &pcpu_devices[cpu].cpu;
  900. struct device *s = &c->dev;
  901. int rc;
  902. c->hotpluggable = 1;
  903. rc = register_cpu(c, cpu);
  904. if (rc)
  905. goto out;
  906. rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group);
  907. if (rc)
  908. goto out_cpu;
  909. if (cpu_online(cpu)) {
  910. rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
  911. if (rc)
  912. goto out_online;
  913. }
  914. rc = topology_cpu_init(c);
  915. if (rc)
  916. goto out_topology;
  917. return 0;
  918. out_topology:
  919. if (cpu_online(cpu))
  920. sysfs_remove_group(&s->kobj, &cpu_online_attr_group);
  921. out_online:
  922. sysfs_remove_group(&s->kobj, &cpu_common_attr_group);
  923. out_cpu:
  924. #ifdef CONFIG_HOTPLUG_CPU
  925. unregister_cpu(c);
  926. #endif
  927. out:
  928. return rc;
  929. }
  930. #ifdef CONFIG_HOTPLUG_CPU
  931. int __ref smp_rescan_cpus(void)
  932. {
  933. struct sclp_cpu_info *info;
  934. int nr;
  935. info = smp_get_cpu_info();
  936. if (!info)
  937. return -ENOMEM;
  938. get_online_cpus();
  939. mutex_lock(&smp_cpu_state_mutex);
  940. nr = __smp_rescan_cpus(info, 1);
  941. mutex_unlock(&smp_cpu_state_mutex);
  942. put_online_cpus();
  943. kfree(info);
  944. if (nr)
  945. topology_schedule_update();
  946. return 0;
  947. }
  948. static ssize_t __ref rescan_store(struct device *dev,
  949. struct device_attribute *attr,
  950. const char *buf,
  951. size_t count)
  952. {
  953. int rc;
  954. rc = smp_rescan_cpus();
  955. return rc ? rc : count;
  956. }
  957. static DEVICE_ATTR(rescan, 0200, NULL, rescan_store);
  958. #endif /* CONFIG_HOTPLUG_CPU */
  959. static int __init s390_smp_init(void)
  960. {
  961. int cpu, rc;
  962. register_cpu_notifier(&smp_cpu_nb);
  963. #ifdef CONFIG_HOTPLUG_CPU
  964. rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan);
  965. if (rc)
  966. return rc;
  967. #endif
  968. for_each_present_cpu(cpu) {
  969. rc = smp_add_present_cpu(cpu);
  970. if (rc)
  971. return rc;
  972. }
  973. return 0;
  974. }
  975. subsys_initcall(s390_smp_init);