smp.c 34 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477
  1. /* smp.c: Sparc64 SMP support.
  2. *
  3. * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/threads.h>
  11. #include <linux/smp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/delay.h>
  15. #include <linux/init.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/fs.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/cache.h>
  20. #include <linux/jiffies.h>
  21. #include <linux/profile.h>
  22. #include <linux/lmb.h>
  23. #include <asm/head.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/atomic.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/cpudata.h>
  29. #include <asm/hvtramp.h>
  30. #include <asm/io.h>
  31. #include <asm/timer.h>
  32. #include <asm/irq.h>
  33. #include <asm/irq_regs.h>
  34. #include <asm/page.h>
  35. #include <asm/pgtable.h>
  36. #include <asm/oplib.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/starfire.h>
  39. #include <asm/tlb.h>
  40. #include <asm/sections.h>
  41. #include <asm/prom.h>
  42. #include <asm/mdesc.h>
  43. #include <asm/ldc.h>
  44. #include <asm/hypervisor.h>
  45. int sparc64_multi_core __read_mostly;
  46. cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
  47. cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
  48. DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  49. cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  50. { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  51. EXPORT_SYMBOL(cpu_possible_map);
  52. EXPORT_SYMBOL(cpu_online_map);
  53. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  54. EXPORT_SYMBOL(cpu_core_map);
  55. static cpumask_t smp_commenced_mask;
  56. void smp_info(struct seq_file *m)
  57. {
  58. int i;
  59. seq_printf(m, "State:\n");
  60. for_each_online_cpu(i)
  61. seq_printf(m, "CPU%d:\t\tonline\n", i);
  62. }
  63. void smp_bogo(struct seq_file *m)
  64. {
  65. int i;
  66. for_each_online_cpu(i)
  67. seq_printf(m,
  68. "Cpu%dClkTck\t: %016lx\n",
  69. i, cpu_data(i).clock_tick);
  70. }
  71. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
  72. extern void setup_sparc64_timer(void);
  73. static volatile unsigned long callin_flag = 0;
  74. void __cpuinit smp_callin(void)
  75. {
  76. int cpuid = hard_smp_processor_id();
  77. __local_per_cpu_offset = __per_cpu_offset(cpuid);
  78. if (tlb_type == hypervisor)
  79. sun4v_ktsb_register();
  80. __flush_tlb_all();
  81. setup_sparc64_timer();
  82. if (cheetah_pcache_forced_on)
  83. cheetah_enable_pcache();
  84. local_irq_enable();
  85. callin_flag = 1;
  86. __asm__ __volatile__("membar #Sync\n\t"
  87. "flush %%g6" : : : "memory");
  88. /* Clear this or we will die instantly when we
  89. * schedule back to this idler...
  90. */
  91. current_thread_info()->new_child = 0;
  92. /* Attach to the address space of init_task. */
  93. atomic_inc(&init_mm.mm_count);
  94. current->active_mm = &init_mm;
  95. while (!cpu_isset(cpuid, smp_commenced_mask))
  96. rmb();
  97. spin_lock(&call_lock);
  98. cpu_set(cpuid, cpu_online_map);
  99. spin_unlock(&call_lock);
  100. /* idle thread is expected to have preempt disabled */
  101. preempt_disable();
  102. }
  103. void cpu_panic(void)
  104. {
  105. printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
  106. panic("SMP bolixed\n");
  107. }
  108. /* This tick register synchronization scheme is taken entirely from
  109. * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  110. *
  111. * The only change I've made is to rework it so that the master
  112. * initiates the synchonization instead of the slave. -DaveM
  113. */
  114. #define MASTER 0
  115. #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
  116. #define NUM_ROUNDS 64 /* magic value */
  117. #define NUM_ITERS 5 /* likewise */
  118. static DEFINE_SPINLOCK(itc_sync_lock);
  119. static unsigned long go[SLAVE + 1];
  120. #define DEBUG_TICK_SYNC 0
  121. static inline long get_delta (long *rt, long *master)
  122. {
  123. unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
  124. unsigned long tcenter, t0, t1, tm;
  125. unsigned long i;
  126. for (i = 0; i < NUM_ITERS; i++) {
  127. t0 = tick_ops->get_tick();
  128. go[MASTER] = 1;
  129. membar_storeload();
  130. while (!(tm = go[SLAVE]))
  131. rmb();
  132. go[SLAVE] = 0;
  133. wmb();
  134. t1 = tick_ops->get_tick();
  135. if (t1 - t0 < best_t1 - best_t0)
  136. best_t0 = t0, best_t1 = t1, best_tm = tm;
  137. }
  138. *rt = best_t1 - best_t0;
  139. *master = best_tm - best_t0;
  140. /* average best_t0 and best_t1 without overflow: */
  141. tcenter = (best_t0/2 + best_t1/2);
  142. if (best_t0 % 2 + best_t1 % 2 == 2)
  143. tcenter++;
  144. return tcenter - best_tm;
  145. }
  146. void smp_synchronize_tick_client(void)
  147. {
  148. long i, delta, adj, adjust_latency = 0, done = 0;
  149. unsigned long flags, rt, master_time_stamp, bound;
  150. #if DEBUG_TICK_SYNC
  151. struct {
  152. long rt; /* roundtrip time */
  153. long master; /* master's timestamp */
  154. long diff; /* difference between midpoint and master's timestamp */
  155. long lat; /* estimate of itc adjustment latency */
  156. } t[NUM_ROUNDS];
  157. #endif
  158. go[MASTER] = 1;
  159. while (go[MASTER])
  160. rmb();
  161. local_irq_save(flags);
  162. {
  163. for (i = 0; i < NUM_ROUNDS; i++) {
  164. delta = get_delta(&rt, &master_time_stamp);
  165. if (delta == 0) {
  166. done = 1; /* let's lock on to this... */
  167. bound = rt;
  168. }
  169. if (!done) {
  170. if (i > 0) {
  171. adjust_latency += -delta;
  172. adj = -delta + adjust_latency/4;
  173. } else
  174. adj = -delta;
  175. tick_ops->add_tick(adj);
  176. }
  177. #if DEBUG_TICK_SYNC
  178. t[i].rt = rt;
  179. t[i].master = master_time_stamp;
  180. t[i].diff = delta;
  181. t[i].lat = adjust_latency/4;
  182. #endif
  183. }
  184. }
  185. local_irq_restore(flags);
  186. #if DEBUG_TICK_SYNC
  187. for (i = 0; i < NUM_ROUNDS; i++)
  188. printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
  189. t[i].rt, t[i].master, t[i].diff, t[i].lat);
  190. #endif
  191. printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
  192. "(last diff %ld cycles, maxerr %lu cycles)\n",
  193. smp_processor_id(), delta, rt);
  194. }
  195. static void smp_start_sync_tick_client(int cpu);
  196. static void smp_synchronize_one_tick(int cpu)
  197. {
  198. unsigned long flags, i;
  199. go[MASTER] = 0;
  200. smp_start_sync_tick_client(cpu);
  201. /* wait for client to be ready */
  202. while (!go[MASTER])
  203. rmb();
  204. /* now let the client proceed into his loop */
  205. go[MASTER] = 0;
  206. membar_storeload();
  207. spin_lock_irqsave(&itc_sync_lock, flags);
  208. {
  209. for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
  210. while (!go[MASTER])
  211. rmb();
  212. go[MASTER] = 0;
  213. wmb();
  214. go[SLAVE] = tick_ops->get_tick();
  215. membar_storeload();
  216. }
  217. }
  218. spin_unlock_irqrestore(&itc_sync_lock, flags);
  219. }
  220. #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
  221. /* XXX Put this in some common place. XXX */
  222. static unsigned long kimage_addr_to_ra(void *p)
  223. {
  224. unsigned long val = (unsigned long) p;
  225. return kern_base + (val - KERNBASE);
  226. }
  227. static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
  228. {
  229. extern unsigned long sparc64_ttable_tl0;
  230. extern unsigned long kern_locked_tte_data;
  231. struct hvtramp_descr *hdesc;
  232. unsigned long trampoline_ra;
  233. struct trap_per_cpu *tb;
  234. u64 tte_vaddr, tte_data;
  235. unsigned long hv_err;
  236. int i;
  237. hdesc = kzalloc(sizeof(*hdesc) +
  238. (sizeof(struct hvtramp_mapping) *
  239. num_kernel_image_mappings - 1),
  240. GFP_KERNEL);
  241. if (!hdesc) {
  242. printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
  243. "hvtramp_descr.\n");
  244. return;
  245. }
  246. hdesc->cpu = cpu;
  247. hdesc->num_mappings = num_kernel_image_mappings;
  248. tb = &trap_block[cpu];
  249. tb->hdesc = hdesc;
  250. hdesc->fault_info_va = (unsigned long) &tb->fault_info;
  251. hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
  252. hdesc->thread_reg = thread_reg;
  253. tte_vaddr = (unsigned long) KERNBASE;
  254. tte_data = kern_locked_tte_data;
  255. for (i = 0; i < hdesc->num_mappings; i++) {
  256. hdesc->maps[i].vaddr = tte_vaddr;
  257. hdesc->maps[i].tte = tte_data;
  258. tte_vaddr += 0x400000;
  259. tte_data += 0x400000;
  260. }
  261. trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
  262. hv_err = sun4v_cpu_start(cpu, trampoline_ra,
  263. kimage_addr_to_ra(&sparc64_ttable_tl0),
  264. __pa(hdesc));
  265. if (hv_err)
  266. printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
  267. "gives error %lu\n", hv_err);
  268. }
  269. #endif
  270. extern unsigned long sparc64_cpu_startup;
  271. /* The OBP cpu startup callback truncates the 3rd arg cookie to
  272. * 32-bits (I think) so to be safe we have it read the pointer
  273. * contained here so we work on >4GB machines. -DaveM
  274. */
  275. static struct thread_info *cpu_new_thread = NULL;
  276. static int __devinit smp_boot_one_cpu(unsigned int cpu)
  277. {
  278. struct trap_per_cpu *tb = &trap_block[cpu];
  279. unsigned long entry =
  280. (unsigned long)(&sparc64_cpu_startup);
  281. unsigned long cookie =
  282. (unsigned long)(&cpu_new_thread);
  283. struct task_struct *p;
  284. int timeout, ret;
  285. p = fork_idle(cpu);
  286. if (IS_ERR(p))
  287. return PTR_ERR(p);
  288. callin_flag = 0;
  289. cpu_new_thread = task_thread_info(p);
  290. if (tlb_type == hypervisor) {
  291. #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
  292. if (ldom_domaining_enabled)
  293. ldom_startcpu_cpuid(cpu,
  294. (unsigned long) cpu_new_thread);
  295. else
  296. #endif
  297. prom_startcpu_cpuid(cpu, entry, cookie);
  298. } else {
  299. struct device_node *dp = of_find_node_by_cpuid(cpu);
  300. prom_startcpu(dp->node, entry, cookie);
  301. }
  302. for (timeout = 0; timeout < 50000; timeout++) {
  303. if (callin_flag)
  304. break;
  305. udelay(100);
  306. }
  307. if (callin_flag) {
  308. ret = 0;
  309. } else {
  310. printk("Processor %d is stuck.\n", cpu);
  311. ret = -ENODEV;
  312. }
  313. cpu_new_thread = NULL;
  314. if (tb->hdesc) {
  315. kfree(tb->hdesc);
  316. tb->hdesc = NULL;
  317. }
  318. return ret;
  319. }
  320. static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
  321. {
  322. u64 result, target;
  323. int stuck, tmp;
  324. if (this_is_starfire) {
  325. /* map to real upaid */
  326. cpu = (((cpu & 0x3c) << 1) |
  327. ((cpu & 0x40) >> 4) |
  328. (cpu & 0x3));
  329. }
  330. target = (cpu << 14) | 0x70;
  331. again:
  332. /* Ok, this is the real Spitfire Errata #54.
  333. * One must read back from a UDB internal register
  334. * after writes to the UDB interrupt dispatch, but
  335. * before the membar Sync for that write.
  336. * So we use the high UDB control register (ASI 0x7f,
  337. * ADDR 0x20) for the dummy read. -DaveM
  338. */
  339. tmp = 0x40;
  340. __asm__ __volatile__(
  341. "wrpr %1, %2, %%pstate\n\t"
  342. "stxa %4, [%0] %3\n\t"
  343. "stxa %5, [%0+%8] %3\n\t"
  344. "add %0, %8, %0\n\t"
  345. "stxa %6, [%0+%8] %3\n\t"
  346. "membar #Sync\n\t"
  347. "stxa %%g0, [%7] %3\n\t"
  348. "membar #Sync\n\t"
  349. "mov 0x20, %%g1\n\t"
  350. "ldxa [%%g1] 0x7f, %%g0\n\t"
  351. "membar #Sync"
  352. : "=r" (tmp)
  353. : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
  354. "r" (data0), "r" (data1), "r" (data2), "r" (target),
  355. "r" (0x10), "0" (tmp)
  356. : "g1");
  357. /* NOTE: PSTATE_IE is still clear. */
  358. stuck = 100000;
  359. do {
  360. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  361. : "=r" (result)
  362. : "i" (ASI_INTR_DISPATCH_STAT));
  363. if (result == 0) {
  364. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  365. : : "r" (pstate));
  366. return;
  367. }
  368. stuck -= 1;
  369. if (stuck == 0)
  370. break;
  371. } while (result & 0x1);
  372. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  373. : : "r" (pstate));
  374. if (stuck == 0) {
  375. printk("CPU[%d]: mondo stuckage result[%016lx]\n",
  376. smp_processor_id(), result);
  377. } else {
  378. udelay(2);
  379. goto again;
  380. }
  381. }
  382. static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  383. {
  384. u64 pstate;
  385. int i;
  386. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  387. for_each_cpu_mask(i, mask)
  388. spitfire_xcall_helper(data0, data1, data2, pstate, i);
  389. }
  390. /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
  391. * packet, but we have no use for that. However we do take advantage of
  392. * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
  393. */
  394. static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  395. {
  396. u64 pstate, ver, busy_mask;
  397. int nack_busy_id, is_jbus, need_more;
  398. if (cpus_empty(mask))
  399. return;
  400. /* Unfortunately, someone at Sun had the brilliant idea to make the
  401. * busy/nack fields hard-coded by ITID number for this Ultra-III
  402. * derivative processor.
  403. */
  404. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  405. is_jbus = ((ver >> 32) == __JALAPENO_ID ||
  406. (ver >> 32) == __SERRANO_ID);
  407. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  408. retry:
  409. need_more = 0;
  410. __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
  411. : : "r" (pstate), "i" (PSTATE_IE));
  412. /* Setup the dispatch data registers. */
  413. __asm__ __volatile__("stxa %0, [%3] %6\n\t"
  414. "stxa %1, [%4] %6\n\t"
  415. "stxa %2, [%5] %6\n\t"
  416. "membar #Sync\n\t"
  417. : /* no outputs */
  418. : "r" (data0), "r" (data1), "r" (data2),
  419. "r" (0x40), "r" (0x50), "r" (0x60),
  420. "i" (ASI_INTR_W));
  421. nack_busy_id = 0;
  422. busy_mask = 0;
  423. {
  424. int i;
  425. for_each_cpu_mask(i, mask) {
  426. u64 target = (i << 14) | 0x70;
  427. if (is_jbus) {
  428. busy_mask |= (0x1UL << (i * 2));
  429. } else {
  430. target |= (nack_busy_id << 24);
  431. busy_mask |= (0x1UL <<
  432. (nack_busy_id * 2));
  433. }
  434. __asm__ __volatile__(
  435. "stxa %%g0, [%0] %1\n\t"
  436. "membar #Sync\n\t"
  437. : /* no outputs */
  438. : "r" (target), "i" (ASI_INTR_W));
  439. nack_busy_id++;
  440. if (nack_busy_id == 32) {
  441. need_more = 1;
  442. break;
  443. }
  444. }
  445. }
  446. /* Now, poll for completion. */
  447. {
  448. u64 dispatch_stat, nack_mask;
  449. long stuck;
  450. stuck = 100000 * nack_busy_id;
  451. nack_mask = busy_mask << 1;
  452. do {
  453. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  454. : "=r" (dispatch_stat)
  455. : "i" (ASI_INTR_DISPATCH_STAT));
  456. if (!(dispatch_stat & (busy_mask | nack_mask))) {
  457. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  458. : : "r" (pstate));
  459. if (unlikely(need_more)) {
  460. int i, cnt = 0;
  461. for_each_cpu_mask(i, mask) {
  462. cpu_clear(i, mask);
  463. cnt++;
  464. if (cnt == 32)
  465. break;
  466. }
  467. goto retry;
  468. }
  469. return;
  470. }
  471. if (!--stuck)
  472. break;
  473. } while (dispatch_stat & busy_mask);
  474. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  475. : : "r" (pstate));
  476. if (dispatch_stat & busy_mask) {
  477. /* Busy bits will not clear, continue instead
  478. * of freezing up on this cpu.
  479. */
  480. printk("CPU[%d]: mondo stuckage result[%016lx]\n",
  481. smp_processor_id(), dispatch_stat);
  482. } else {
  483. int i, this_busy_nack = 0;
  484. /* Delay some random time with interrupts enabled
  485. * to prevent deadlock.
  486. */
  487. udelay(2 * nack_busy_id);
  488. /* Clear out the mask bits for cpus which did not
  489. * NACK us.
  490. */
  491. for_each_cpu_mask(i, mask) {
  492. u64 check_mask;
  493. if (is_jbus)
  494. check_mask = (0x2UL << (2*i));
  495. else
  496. check_mask = (0x2UL <<
  497. this_busy_nack);
  498. if ((dispatch_stat & check_mask) == 0)
  499. cpu_clear(i, mask);
  500. this_busy_nack += 2;
  501. if (this_busy_nack == 64)
  502. break;
  503. }
  504. goto retry;
  505. }
  506. }
  507. }
  508. /* Multi-cpu list version. */
  509. static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  510. {
  511. struct trap_per_cpu *tb;
  512. u16 *cpu_list;
  513. u64 *mondo;
  514. cpumask_t error_mask;
  515. unsigned long flags, status;
  516. int cnt, retries, this_cpu, prev_sent, i;
  517. if (cpus_empty(mask))
  518. return;
  519. /* We have to do this whole thing with interrupts fully disabled.
  520. * Otherwise if we send an xcall from interrupt context it will
  521. * corrupt both our mondo block and cpu list state.
  522. *
  523. * One consequence of this is that we cannot use timeout mechanisms
  524. * that depend upon interrupts being delivered locally. So, for
  525. * example, we cannot sample jiffies and expect it to advance.
  526. *
  527. * Fortunately, udelay() uses %stick/%tick so we can use that.
  528. */
  529. local_irq_save(flags);
  530. this_cpu = smp_processor_id();
  531. tb = &trap_block[this_cpu];
  532. mondo = __va(tb->cpu_mondo_block_pa);
  533. mondo[0] = data0;
  534. mondo[1] = data1;
  535. mondo[2] = data2;
  536. wmb();
  537. cpu_list = __va(tb->cpu_list_pa);
  538. /* Setup the initial cpu list. */
  539. cnt = 0;
  540. for_each_cpu_mask(i, mask)
  541. cpu_list[cnt++] = i;
  542. cpus_clear(error_mask);
  543. retries = 0;
  544. prev_sent = 0;
  545. do {
  546. int forward_progress, n_sent;
  547. status = sun4v_cpu_mondo_send(cnt,
  548. tb->cpu_list_pa,
  549. tb->cpu_mondo_block_pa);
  550. /* HV_EOK means all cpus received the xcall, we're done. */
  551. if (likely(status == HV_EOK))
  552. break;
  553. /* First, see if we made any forward progress.
  554. *
  555. * The hypervisor indicates successful sends by setting
  556. * cpu list entries to the value 0xffff.
  557. */
  558. n_sent = 0;
  559. for (i = 0; i < cnt; i++) {
  560. if (likely(cpu_list[i] == 0xffff))
  561. n_sent++;
  562. }
  563. forward_progress = 0;
  564. if (n_sent > prev_sent)
  565. forward_progress = 1;
  566. prev_sent = n_sent;
  567. /* If we get a HV_ECPUERROR, then one or more of the cpus
  568. * in the list are in error state. Use the cpu_state()
  569. * hypervisor call to find out which cpus are in error state.
  570. */
  571. if (unlikely(status == HV_ECPUERROR)) {
  572. for (i = 0; i < cnt; i++) {
  573. long err;
  574. u16 cpu;
  575. cpu = cpu_list[i];
  576. if (cpu == 0xffff)
  577. continue;
  578. err = sun4v_cpu_state(cpu);
  579. if (err >= 0 &&
  580. err == HV_CPU_STATE_ERROR) {
  581. cpu_list[i] = 0xffff;
  582. cpu_set(cpu, error_mask);
  583. }
  584. }
  585. } else if (unlikely(status != HV_EWOULDBLOCK))
  586. goto fatal_mondo_error;
  587. /* Don't bother rewriting the CPU list, just leave the
  588. * 0xffff and non-0xffff entries in there and the
  589. * hypervisor will do the right thing.
  590. *
  591. * Only advance timeout state if we didn't make any
  592. * forward progress.
  593. */
  594. if (unlikely(!forward_progress)) {
  595. if (unlikely(++retries > 10000))
  596. goto fatal_mondo_timeout;
  597. /* Delay a little bit to let other cpus catch up
  598. * on their cpu mondo queue work.
  599. */
  600. udelay(2 * cnt);
  601. }
  602. } while (1);
  603. local_irq_restore(flags);
  604. if (unlikely(!cpus_empty(error_mask)))
  605. goto fatal_mondo_cpu_error;
  606. return;
  607. fatal_mondo_cpu_error:
  608. printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
  609. "were in error state\n",
  610. this_cpu);
  611. printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
  612. for_each_cpu_mask(i, error_mask)
  613. printk("%d ", i);
  614. printk("]\n");
  615. return;
  616. fatal_mondo_timeout:
  617. local_irq_restore(flags);
  618. printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
  619. " progress after %d retries.\n",
  620. this_cpu, retries);
  621. goto dump_cpu_list_and_out;
  622. fatal_mondo_error:
  623. local_irq_restore(flags);
  624. printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
  625. this_cpu, status);
  626. printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
  627. "mondo_block_pa(%lx)\n",
  628. this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
  629. dump_cpu_list_and_out:
  630. printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
  631. for (i = 0; i < cnt; i++)
  632. printk("%u ", cpu_list[i]);
  633. printk("]\n");
  634. }
  635. /* Send cross call to all processors mentioned in MASK
  636. * except self.
  637. */
  638. static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
  639. {
  640. u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
  641. int this_cpu = get_cpu();
  642. cpus_and(mask, mask, cpu_online_map);
  643. cpu_clear(this_cpu, mask);
  644. if (tlb_type == spitfire)
  645. spitfire_xcall_deliver(data0, data1, data2, mask);
  646. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  647. cheetah_xcall_deliver(data0, data1, data2, mask);
  648. else
  649. hypervisor_xcall_deliver(data0, data1, data2, mask);
  650. /* NOTE: Caller runs local copy on master. */
  651. put_cpu();
  652. }
  653. extern unsigned long xcall_sync_tick;
  654. static void smp_start_sync_tick_client(int cpu)
  655. {
  656. cpumask_t mask = cpumask_of_cpu(cpu);
  657. smp_cross_call_masked(&xcall_sync_tick,
  658. 0, 0, 0, mask);
  659. }
  660. /* Send cross call to all processors except self. */
  661. #define smp_cross_call(func, ctx, data1, data2) \
  662. smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
  663. struct call_data_struct {
  664. void (*func) (void *info);
  665. void *info;
  666. atomic_t finished;
  667. int wait;
  668. };
  669. static struct call_data_struct *call_data;
  670. extern unsigned long xcall_call_function;
  671. /**
  672. * smp_call_function(): Run a function on all other CPUs.
  673. * @func: The function to run. This must be fast and non-blocking.
  674. * @info: An arbitrary pointer to pass to the function.
  675. * @nonatomic: currently unused.
  676. * @wait: If true, wait (atomically) until function has completed on other CPUs.
  677. *
  678. * Returns 0 on success, else a negative status code. Does not return until
  679. * remote CPUs are nearly ready to execute <<func>> or are or have executed.
  680. *
  681. * You must not call this function with disabled interrupts or from a
  682. * hardware interrupt handler or from a bottom half handler.
  683. */
  684. static int smp_call_function_mask(void (*func)(void *info), void *info,
  685. int nonatomic, int wait, cpumask_t mask)
  686. {
  687. struct call_data_struct data;
  688. int cpus;
  689. /* Can deadlock when called with interrupts disabled */
  690. WARN_ON(irqs_disabled());
  691. data.func = func;
  692. data.info = info;
  693. atomic_set(&data.finished, 0);
  694. data.wait = wait;
  695. spin_lock(&call_lock);
  696. cpu_clear(smp_processor_id(), mask);
  697. cpus = cpus_weight(mask);
  698. if (!cpus)
  699. goto out_unlock;
  700. call_data = &data;
  701. mb();
  702. smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
  703. /* Wait for response */
  704. while (atomic_read(&data.finished) != cpus)
  705. cpu_relax();
  706. out_unlock:
  707. spin_unlock(&call_lock);
  708. return 0;
  709. }
  710. int smp_call_function(void (*func)(void *info), void *info,
  711. int nonatomic, int wait)
  712. {
  713. return smp_call_function_mask(func, info, nonatomic, wait,
  714. cpu_online_map);
  715. }
  716. void smp_call_function_client(int irq, struct pt_regs *regs)
  717. {
  718. void (*func) (void *info) = call_data->func;
  719. void *info = call_data->info;
  720. clear_softint(1 << irq);
  721. if (call_data->wait) {
  722. /* let initiator proceed only after completion */
  723. func(info);
  724. atomic_inc(&call_data->finished);
  725. } else {
  726. /* let initiator proceed after getting data */
  727. atomic_inc(&call_data->finished);
  728. func(info);
  729. }
  730. }
  731. static void tsb_sync(void *info)
  732. {
  733. struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
  734. struct mm_struct *mm = info;
  735. /* It is not valid to test "currrent->active_mm == mm" here.
  736. *
  737. * The value of "current" is not changed atomically with
  738. * switch_mm(). But that's OK, we just need to check the
  739. * current cpu's trap block PGD physical address.
  740. */
  741. if (tp->pgd_paddr == __pa(mm->pgd))
  742. tsb_context_switch(mm);
  743. }
  744. void smp_tsb_sync(struct mm_struct *mm)
  745. {
  746. smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
  747. }
  748. extern unsigned long xcall_flush_tlb_mm;
  749. extern unsigned long xcall_flush_tlb_pending;
  750. extern unsigned long xcall_flush_tlb_kernel_range;
  751. extern unsigned long xcall_report_regs;
  752. #ifdef CONFIG_MAGIC_SYSRQ
  753. extern unsigned long xcall_fetch_glob_regs;
  754. #endif
  755. extern unsigned long xcall_receive_signal;
  756. extern unsigned long xcall_new_mmu_context_version;
  757. #ifdef CONFIG_KGDB
  758. extern unsigned long xcall_kgdb_capture;
  759. #endif
  760. #ifdef DCACHE_ALIASING_POSSIBLE
  761. extern unsigned long xcall_flush_dcache_page_cheetah;
  762. #endif
  763. extern unsigned long xcall_flush_dcache_page_spitfire;
  764. #ifdef CONFIG_DEBUG_DCFLUSH
  765. extern atomic_t dcpage_flushes;
  766. extern atomic_t dcpage_flushes_xcall;
  767. #endif
  768. static inline void __local_flush_dcache_page(struct page *page)
  769. {
  770. #ifdef DCACHE_ALIASING_POSSIBLE
  771. __flush_dcache_page(page_address(page),
  772. ((tlb_type == spitfire) &&
  773. page_mapping(page) != NULL));
  774. #else
  775. if (page_mapping(page) != NULL &&
  776. tlb_type == spitfire)
  777. __flush_icache_page(__pa(page_address(page)));
  778. #endif
  779. }
  780. void smp_flush_dcache_page_impl(struct page *page, int cpu)
  781. {
  782. cpumask_t mask = cpumask_of_cpu(cpu);
  783. int this_cpu;
  784. if (tlb_type == hypervisor)
  785. return;
  786. #ifdef CONFIG_DEBUG_DCFLUSH
  787. atomic_inc(&dcpage_flushes);
  788. #endif
  789. this_cpu = get_cpu();
  790. if (cpu == this_cpu) {
  791. __local_flush_dcache_page(page);
  792. } else if (cpu_online(cpu)) {
  793. void *pg_addr = page_address(page);
  794. u64 data0;
  795. if (tlb_type == spitfire) {
  796. data0 =
  797. ((u64)&xcall_flush_dcache_page_spitfire);
  798. if (page_mapping(page) != NULL)
  799. data0 |= ((u64)1 << 32);
  800. spitfire_xcall_deliver(data0,
  801. __pa(pg_addr),
  802. (u64) pg_addr,
  803. mask);
  804. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  805. #ifdef DCACHE_ALIASING_POSSIBLE
  806. data0 =
  807. ((u64)&xcall_flush_dcache_page_cheetah);
  808. cheetah_xcall_deliver(data0,
  809. __pa(pg_addr),
  810. 0, mask);
  811. #endif
  812. }
  813. #ifdef CONFIG_DEBUG_DCFLUSH
  814. atomic_inc(&dcpage_flushes_xcall);
  815. #endif
  816. }
  817. put_cpu();
  818. }
  819. void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
  820. {
  821. void *pg_addr = page_address(page);
  822. cpumask_t mask = cpu_online_map;
  823. u64 data0;
  824. int this_cpu;
  825. if (tlb_type == hypervisor)
  826. return;
  827. this_cpu = get_cpu();
  828. cpu_clear(this_cpu, mask);
  829. #ifdef CONFIG_DEBUG_DCFLUSH
  830. atomic_inc(&dcpage_flushes);
  831. #endif
  832. if (cpus_empty(mask))
  833. goto flush_self;
  834. if (tlb_type == spitfire) {
  835. data0 = ((u64)&xcall_flush_dcache_page_spitfire);
  836. if (page_mapping(page) != NULL)
  837. data0 |= ((u64)1 << 32);
  838. spitfire_xcall_deliver(data0,
  839. __pa(pg_addr),
  840. (u64) pg_addr,
  841. mask);
  842. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  843. #ifdef DCACHE_ALIASING_POSSIBLE
  844. data0 = ((u64)&xcall_flush_dcache_page_cheetah);
  845. cheetah_xcall_deliver(data0,
  846. __pa(pg_addr),
  847. 0, mask);
  848. #endif
  849. }
  850. #ifdef CONFIG_DEBUG_DCFLUSH
  851. atomic_inc(&dcpage_flushes_xcall);
  852. #endif
  853. flush_self:
  854. __local_flush_dcache_page(page);
  855. put_cpu();
  856. }
  857. static void __smp_receive_signal_mask(cpumask_t mask)
  858. {
  859. smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
  860. }
  861. void smp_receive_signal(int cpu)
  862. {
  863. cpumask_t mask = cpumask_of_cpu(cpu);
  864. if (cpu_online(cpu))
  865. __smp_receive_signal_mask(mask);
  866. }
  867. void smp_receive_signal_client(int irq, struct pt_regs *regs)
  868. {
  869. clear_softint(1 << irq);
  870. }
  871. void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
  872. {
  873. struct mm_struct *mm;
  874. unsigned long flags;
  875. clear_softint(1 << irq);
  876. /* See if we need to allocate a new TLB context because
  877. * the version of the one we are using is now out of date.
  878. */
  879. mm = current->active_mm;
  880. if (unlikely(!mm || (mm == &init_mm)))
  881. return;
  882. spin_lock_irqsave(&mm->context.lock, flags);
  883. if (unlikely(!CTX_VALID(mm->context)))
  884. get_new_mmu_context(mm);
  885. spin_unlock_irqrestore(&mm->context.lock, flags);
  886. load_secondary_context(mm);
  887. __flush_tlb_mm(CTX_HWBITS(mm->context),
  888. SECONDARY_CONTEXT);
  889. }
  890. void smp_new_mmu_context_version(void)
  891. {
  892. smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
  893. }
  894. #ifdef CONFIG_KGDB
  895. void kgdb_roundup_cpus(unsigned long flags)
  896. {
  897. smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
  898. }
  899. #endif
  900. void smp_report_regs(void)
  901. {
  902. smp_cross_call(&xcall_report_regs, 0, 0, 0);
  903. }
  904. #ifdef CONFIG_MAGIC_SYSRQ
  905. void smp_fetch_global_regs(void)
  906. {
  907. smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
  908. }
  909. #endif
  910. /* We know that the window frames of the user have been flushed
  911. * to the stack before we get here because all callers of us
  912. * are flush_tlb_*() routines, and these run after flush_cache_*()
  913. * which performs the flushw.
  914. *
  915. * The SMP TLB coherency scheme we use works as follows:
  916. *
  917. * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
  918. * space has (potentially) executed on, this is the heuristic
  919. * we use to avoid doing cross calls.
  920. *
  921. * Also, for flushing from kswapd and also for clones, we
  922. * use cpu_vm_mask as the list of cpus to make run the TLB.
  923. *
  924. * 2) TLB context numbers are shared globally across all processors
  925. * in the system, this allows us to play several games to avoid
  926. * cross calls.
  927. *
  928. * One invariant is that when a cpu switches to a process, and
  929. * that processes tsk->active_mm->cpu_vm_mask does not have the
  930. * current cpu's bit set, that tlb context is flushed locally.
  931. *
  932. * If the address space is non-shared (ie. mm->count == 1) we avoid
  933. * cross calls when we want to flush the currently running process's
  934. * tlb state. This is done by clearing all cpu bits except the current
  935. * processor's in current->active_mm->cpu_vm_mask and performing the
  936. * flush locally only. This will force any subsequent cpus which run
  937. * this task to flush the context from the local tlb if the process
  938. * migrates to another cpu (again).
  939. *
  940. * 3) For shared address spaces (threads) and swapping we bite the
  941. * bullet for most cases and perform the cross call (but only to
  942. * the cpus listed in cpu_vm_mask).
  943. *
  944. * The performance gain from "optimizing" away the cross call for threads is
  945. * questionable (in theory the big win for threads is the massive sharing of
  946. * address space state across processors).
  947. */
  948. /* This currently is only used by the hugetlb arch pre-fault
  949. * hook on UltraSPARC-III+ and later when changing the pagesize
  950. * bits of the context register for an address space.
  951. */
  952. void smp_flush_tlb_mm(struct mm_struct *mm)
  953. {
  954. u32 ctx = CTX_HWBITS(mm->context);
  955. int cpu = get_cpu();
  956. if (atomic_read(&mm->mm_users) == 1) {
  957. mm->cpu_vm_mask = cpumask_of_cpu(cpu);
  958. goto local_flush_and_out;
  959. }
  960. smp_cross_call_masked(&xcall_flush_tlb_mm,
  961. ctx, 0, 0,
  962. mm->cpu_vm_mask);
  963. local_flush_and_out:
  964. __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
  965. put_cpu();
  966. }
  967. void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
  968. {
  969. u32 ctx = CTX_HWBITS(mm->context);
  970. int cpu = get_cpu();
  971. if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
  972. mm->cpu_vm_mask = cpumask_of_cpu(cpu);
  973. else
  974. smp_cross_call_masked(&xcall_flush_tlb_pending,
  975. ctx, nr, (unsigned long) vaddrs,
  976. mm->cpu_vm_mask);
  977. __flush_tlb_pending(ctx, nr, vaddrs);
  978. put_cpu();
  979. }
  980. void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  981. {
  982. start &= PAGE_MASK;
  983. end = PAGE_ALIGN(end);
  984. if (start != end) {
  985. smp_cross_call(&xcall_flush_tlb_kernel_range,
  986. 0, start, end);
  987. __flush_tlb_kernel_range(start, end);
  988. }
  989. }
  990. /* CPU capture. */
  991. /* #define CAPTURE_DEBUG */
  992. extern unsigned long xcall_capture;
  993. static atomic_t smp_capture_depth = ATOMIC_INIT(0);
  994. static atomic_t smp_capture_registry = ATOMIC_INIT(0);
  995. static unsigned long penguins_are_doing_time;
  996. void smp_capture(void)
  997. {
  998. int result = atomic_add_ret(1, &smp_capture_depth);
  999. if (result == 1) {
  1000. int ncpus = num_online_cpus();
  1001. #ifdef CAPTURE_DEBUG
  1002. printk("CPU[%d]: Sending penguins to jail...",
  1003. smp_processor_id());
  1004. #endif
  1005. penguins_are_doing_time = 1;
  1006. membar_storestore_loadstore();
  1007. atomic_inc(&smp_capture_registry);
  1008. smp_cross_call(&xcall_capture, 0, 0, 0);
  1009. while (atomic_read(&smp_capture_registry) != ncpus)
  1010. rmb();
  1011. #ifdef CAPTURE_DEBUG
  1012. printk("done\n");
  1013. #endif
  1014. }
  1015. }
  1016. void smp_release(void)
  1017. {
  1018. if (atomic_dec_and_test(&smp_capture_depth)) {
  1019. #ifdef CAPTURE_DEBUG
  1020. printk("CPU[%d]: Giving pardon to "
  1021. "imprisoned penguins\n",
  1022. smp_processor_id());
  1023. #endif
  1024. penguins_are_doing_time = 0;
  1025. membar_storeload_storestore();
  1026. atomic_dec(&smp_capture_registry);
  1027. }
  1028. }
  1029. /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
  1030. * can service tlb flush xcalls...
  1031. */
  1032. extern void prom_world(int);
  1033. void smp_penguin_jailcell(int irq, struct pt_regs *regs)
  1034. {
  1035. clear_softint(1 << irq);
  1036. preempt_disable();
  1037. __asm__ __volatile__("flushw");
  1038. prom_world(1);
  1039. atomic_inc(&smp_capture_registry);
  1040. membar_storeload_storestore();
  1041. while (penguins_are_doing_time)
  1042. rmb();
  1043. atomic_dec(&smp_capture_registry);
  1044. prom_world(0);
  1045. preempt_enable();
  1046. }
  1047. /* /proc/profile writes can call this, don't __init it please. */
  1048. int setup_profiling_timer(unsigned int multiplier)
  1049. {
  1050. return -EINVAL;
  1051. }
  1052. void __init smp_prepare_cpus(unsigned int max_cpus)
  1053. {
  1054. }
  1055. void __devinit smp_prepare_boot_cpu(void)
  1056. {
  1057. }
  1058. void __devinit smp_fill_in_sib_core_maps(void)
  1059. {
  1060. unsigned int i;
  1061. for_each_present_cpu(i) {
  1062. unsigned int j;
  1063. cpus_clear(cpu_core_map[i]);
  1064. if (cpu_data(i).core_id == 0) {
  1065. cpu_set(i, cpu_core_map[i]);
  1066. continue;
  1067. }
  1068. for_each_present_cpu(j) {
  1069. if (cpu_data(i).core_id ==
  1070. cpu_data(j).core_id)
  1071. cpu_set(j, cpu_core_map[i]);
  1072. }
  1073. }
  1074. for_each_present_cpu(i) {
  1075. unsigned int j;
  1076. cpus_clear(per_cpu(cpu_sibling_map, i));
  1077. if (cpu_data(i).proc_id == -1) {
  1078. cpu_set(i, per_cpu(cpu_sibling_map, i));
  1079. continue;
  1080. }
  1081. for_each_present_cpu(j) {
  1082. if (cpu_data(i).proc_id ==
  1083. cpu_data(j).proc_id)
  1084. cpu_set(j, per_cpu(cpu_sibling_map, i));
  1085. }
  1086. }
  1087. }
  1088. int __cpuinit __cpu_up(unsigned int cpu)
  1089. {
  1090. int ret = smp_boot_one_cpu(cpu);
  1091. if (!ret) {
  1092. cpu_set(cpu, smp_commenced_mask);
  1093. while (!cpu_isset(cpu, cpu_online_map))
  1094. mb();
  1095. if (!cpu_isset(cpu, cpu_online_map)) {
  1096. ret = -ENODEV;
  1097. } else {
  1098. /* On SUN4V, writes to %tick and %stick are
  1099. * not allowed.
  1100. */
  1101. if (tlb_type != hypervisor)
  1102. smp_synchronize_one_tick(cpu);
  1103. }
  1104. }
  1105. return ret;
  1106. }
  1107. #ifdef CONFIG_HOTPLUG_CPU
  1108. void cpu_play_dead(void)
  1109. {
  1110. int cpu = smp_processor_id();
  1111. unsigned long pstate;
  1112. idle_task_exit();
  1113. if (tlb_type == hypervisor) {
  1114. struct trap_per_cpu *tb = &trap_block[cpu];
  1115. sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
  1116. tb->cpu_mondo_pa, 0);
  1117. sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
  1118. tb->dev_mondo_pa, 0);
  1119. sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
  1120. tb->resum_mondo_pa, 0);
  1121. sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
  1122. tb->nonresum_mondo_pa, 0);
  1123. }
  1124. cpu_clear(cpu, smp_commenced_mask);
  1125. membar_safe("#Sync");
  1126. local_irq_disable();
  1127. __asm__ __volatile__(
  1128. "rdpr %%pstate, %0\n\t"
  1129. "wrpr %0, %1, %%pstate"
  1130. : "=r" (pstate)
  1131. : "i" (PSTATE_IE));
  1132. while (1)
  1133. barrier();
  1134. }
  1135. int __cpu_disable(void)
  1136. {
  1137. int cpu = smp_processor_id();
  1138. cpuinfo_sparc *c;
  1139. int i;
  1140. for_each_cpu_mask(i, cpu_core_map[cpu])
  1141. cpu_clear(cpu, cpu_core_map[i]);
  1142. cpus_clear(cpu_core_map[cpu]);
  1143. for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
  1144. cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
  1145. cpus_clear(per_cpu(cpu_sibling_map, cpu));
  1146. c = &cpu_data(cpu);
  1147. c->core_id = 0;
  1148. c->proc_id = -1;
  1149. spin_lock(&call_lock);
  1150. cpu_clear(cpu, cpu_online_map);
  1151. spin_unlock(&call_lock);
  1152. smp_wmb();
  1153. /* Make sure no interrupts point to this cpu. */
  1154. fixup_irqs();
  1155. local_irq_enable();
  1156. mdelay(1);
  1157. local_irq_disable();
  1158. return 0;
  1159. }
  1160. void __cpu_die(unsigned int cpu)
  1161. {
  1162. int i;
  1163. for (i = 0; i < 100; i++) {
  1164. smp_rmb();
  1165. if (!cpu_isset(cpu, smp_commenced_mask))
  1166. break;
  1167. msleep(100);
  1168. }
  1169. if (cpu_isset(cpu, smp_commenced_mask)) {
  1170. printk(KERN_ERR "CPU %u didn't die...\n", cpu);
  1171. } else {
  1172. #if defined(CONFIG_SUN_LDOMS)
  1173. unsigned long hv_err;
  1174. int limit = 100;
  1175. do {
  1176. hv_err = sun4v_cpu_stop(cpu);
  1177. if (hv_err == HV_EOK) {
  1178. cpu_clear(cpu, cpu_present_map);
  1179. break;
  1180. }
  1181. } while (--limit > 0);
  1182. if (limit <= 0) {
  1183. printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
  1184. hv_err);
  1185. }
  1186. #endif
  1187. }
  1188. }
  1189. #endif
  1190. void __init smp_cpus_done(unsigned int max_cpus)
  1191. {
  1192. }
  1193. void smp_send_reschedule(int cpu)
  1194. {
  1195. smp_receive_signal(cpu);
  1196. }
  1197. /* This is a nop because we capture all other cpus
  1198. * anyways when making the PROM active.
  1199. */
  1200. void smp_send_stop(void)
  1201. {
  1202. }
  1203. unsigned long __per_cpu_base __read_mostly;
  1204. unsigned long __per_cpu_shift __read_mostly;
  1205. EXPORT_SYMBOL(__per_cpu_base);
  1206. EXPORT_SYMBOL(__per_cpu_shift);
  1207. void __init real_setup_per_cpu_areas(void)
  1208. {
  1209. unsigned long paddr, goal, size, i;
  1210. char *ptr;
  1211. /* Copy section for each CPU (we discard the original) */
  1212. goal = PERCPU_ENOUGH_ROOM;
  1213. __per_cpu_shift = PAGE_SHIFT;
  1214. for (size = PAGE_SIZE; size < goal; size <<= 1UL)
  1215. __per_cpu_shift++;
  1216. paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
  1217. if (!paddr) {
  1218. prom_printf("Cannot allocate per-cpu memory.\n");
  1219. prom_halt();
  1220. }
  1221. ptr = __va(paddr);
  1222. __per_cpu_base = ptr - __per_cpu_start;
  1223. for (i = 0; i < NR_CPUS; i++, ptr += size)
  1224. memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
  1225. /* Setup %g5 for the boot cpu. */
  1226. __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
  1227. }