smp_64.c 33 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. /* smp.c: Sparc64 SMP support.
  2. *
  3. * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/threads.h>
  11. #include <linux/smp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/delay.h>
  15. #include <linux/init.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/fs.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/cache.h>
  20. #include <linux/jiffies.h>
  21. #include <linux/profile.h>
  22. #include <linux/lmb.h>
  23. #include <linux/cpu.h>
  24. #include <asm/head.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/atomic.h>
  27. #include <asm/tlbflush.h>
  28. #include <asm/mmu_context.h>
  29. #include <asm/cpudata.h>
  30. #include <asm/hvtramp.h>
  31. #include <asm/io.h>
  32. #include <asm/timer.h>
  33. #include <asm/irq.h>
  34. #include <asm/irq_regs.h>
  35. #include <asm/page.h>
  36. #include <asm/pgtable.h>
  37. #include <asm/oplib.h>
  38. #include <asm/uaccess.h>
  39. #include <asm/starfire.h>
  40. #include <asm/tlb.h>
  41. #include <asm/sections.h>
  42. #include <asm/prom.h>
  43. #include <asm/mdesc.h>
  44. #include <asm/ldc.h>
  45. #include <asm/hypervisor.h>
  46. int sparc64_multi_core __read_mostly;
  47. DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
  48. cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  49. { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  50. EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
  51. EXPORT_SYMBOL(cpu_core_map);
  52. static cpumask_t smp_commenced_mask;
  53. void smp_info(struct seq_file *m)
  54. {
  55. int i;
  56. seq_printf(m, "State:\n");
  57. for_each_online_cpu(i)
  58. seq_printf(m, "CPU%d:\t\tonline\n", i);
  59. }
  60. void smp_bogo(struct seq_file *m)
  61. {
  62. int i;
  63. for_each_online_cpu(i)
  64. seq_printf(m,
  65. "Cpu%dClkTck\t: %016lx\n",
  66. i, cpu_data(i).clock_tick);
  67. }
  68. extern void setup_sparc64_timer(void);
  69. static volatile unsigned long callin_flag = 0;
  70. void __cpuinit smp_callin(void)
  71. {
  72. int cpuid = hard_smp_processor_id();
  73. __local_per_cpu_offset = __per_cpu_offset(cpuid);
  74. if (tlb_type == hypervisor)
  75. sun4v_ktsb_register();
  76. __flush_tlb_all();
  77. setup_sparc64_timer();
  78. if (cheetah_pcache_forced_on)
  79. cheetah_enable_pcache();
  80. local_irq_enable();
  81. callin_flag = 1;
  82. __asm__ __volatile__("membar #Sync\n\t"
  83. "flush %%g6" : : : "memory");
  84. /* Clear this or we will die instantly when we
  85. * schedule back to this idler...
  86. */
  87. current_thread_info()->new_child = 0;
  88. /* Attach to the address space of init_task. */
  89. atomic_inc(&init_mm.mm_count);
  90. current->active_mm = &init_mm;
  91. /* inform the notifiers about the new cpu */
  92. notify_cpu_starting(cpuid);
  93. while (!cpu_isset(cpuid, smp_commenced_mask))
  94. rmb();
  95. ipi_call_lock();
  96. cpu_set(cpuid, cpu_online_map);
  97. ipi_call_unlock();
  98. /* idle thread is expected to have preempt disabled */
  99. preempt_disable();
  100. }
  101. void cpu_panic(void)
  102. {
  103. printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
  104. panic("SMP bolixed\n");
  105. }
  106. /* This tick register synchronization scheme is taken entirely from
  107. * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  108. *
  109. * The only change I've made is to rework it so that the master
  110. * initiates the synchonization instead of the slave. -DaveM
  111. */
  112. #define MASTER 0
  113. #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
  114. #define NUM_ROUNDS 64 /* magic value */
  115. #define NUM_ITERS 5 /* likewise */
  116. static DEFINE_SPINLOCK(itc_sync_lock);
  117. static unsigned long go[SLAVE + 1];
  118. #define DEBUG_TICK_SYNC 0
  119. static inline long get_delta (long *rt, long *master)
  120. {
  121. unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
  122. unsigned long tcenter, t0, t1, tm;
  123. unsigned long i;
  124. for (i = 0; i < NUM_ITERS; i++) {
  125. t0 = tick_ops->get_tick();
  126. go[MASTER] = 1;
  127. membar_safe("#StoreLoad");
  128. while (!(tm = go[SLAVE]))
  129. rmb();
  130. go[SLAVE] = 0;
  131. wmb();
  132. t1 = tick_ops->get_tick();
  133. if (t1 - t0 < best_t1 - best_t0)
  134. best_t0 = t0, best_t1 = t1, best_tm = tm;
  135. }
  136. *rt = best_t1 - best_t0;
  137. *master = best_tm - best_t0;
  138. /* average best_t0 and best_t1 without overflow: */
  139. tcenter = (best_t0/2 + best_t1/2);
  140. if (best_t0 % 2 + best_t1 % 2 == 2)
  141. tcenter++;
  142. return tcenter - best_tm;
  143. }
  144. void smp_synchronize_tick_client(void)
  145. {
  146. long i, delta, adj, adjust_latency = 0, done = 0;
  147. unsigned long flags, rt, master_time_stamp, bound;
  148. #if DEBUG_TICK_SYNC
  149. struct {
  150. long rt; /* roundtrip time */
  151. long master; /* master's timestamp */
  152. long diff; /* difference between midpoint and master's timestamp */
  153. long lat; /* estimate of itc adjustment latency */
  154. } t[NUM_ROUNDS];
  155. #endif
  156. go[MASTER] = 1;
  157. while (go[MASTER])
  158. rmb();
  159. local_irq_save(flags);
  160. {
  161. for (i = 0; i < NUM_ROUNDS; i++) {
  162. delta = get_delta(&rt, &master_time_stamp);
  163. if (delta == 0) {
  164. done = 1; /* let's lock on to this... */
  165. bound = rt;
  166. }
  167. if (!done) {
  168. if (i > 0) {
  169. adjust_latency += -delta;
  170. adj = -delta + adjust_latency/4;
  171. } else
  172. adj = -delta;
  173. tick_ops->add_tick(adj);
  174. }
  175. #if DEBUG_TICK_SYNC
  176. t[i].rt = rt;
  177. t[i].master = master_time_stamp;
  178. t[i].diff = delta;
  179. t[i].lat = adjust_latency/4;
  180. #endif
  181. }
  182. }
  183. local_irq_restore(flags);
  184. #if DEBUG_TICK_SYNC
  185. for (i = 0; i < NUM_ROUNDS; i++)
  186. printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
  187. t[i].rt, t[i].master, t[i].diff, t[i].lat);
  188. #endif
  189. printk(KERN_INFO "CPU %d: synchronized TICK with master CPU "
  190. "(last diff %ld cycles, maxerr %lu cycles)\n",
  191. smp_processor_id(), delta, rt);
  192. }
  193. static void smp_start_sync_tick_client(int cpu);
  194. static void smp_synchronize_one_tick(int cpu)
  195. {
  196. unsigned long flags, i;
  197. go[MASTER] = 0;
  198. smp_start_sync_tick_client(cpu);
  199. /* wait for client to be ready */
  200. while (!go[MASTER])
  201. rmb();
  202. /* now let the client proceed into his loop */
  203. go[MASTER] = 0;
  204. membar_safe("#StoreLoad");
  205. spin_lock_irqsave(&itc_sync_lock, flags);
  206. {
  207. for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
  208. while (!go[MASTER])
  209. rmb();
  210. go[MASTER] = 0;
  211. wmb();
  212. go[SLAVE] = tick_ops->get_tick();
  213. membar_safe("#StoreLoad");
  214. }
  215. }
  216. spin_unlock_irqrestore(&itc_sync_lock, flags);
  217. }
  218. #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
  219. /* XXX Put this in some common place. XXX */
  220. static unsigned long kimage_addr_to_ra(void *p)
  221. {
  222. unsigned long val = (unsigned long) p;
  223. return kern_base + (val - KERNBASE);
  224. }
  225. static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
  226. {
  227. extern unsigned long sparc64_ttable_tl0;
  228. extern unsigned long kern_locked_tte_data;
  229. struct hvtramp_descr *hdesc;
  230. unsigned long trampoline_ra;
  231. struct trap_per_cpu *tb;
  232. u64 tte_vaddr, tte_data;
  233. unsigned long hv_err;
  234. int i;
  235. hdesc = kzalloc(sizeof(*hdesc) +
  236. (sizeof(struct hvtramp_mapping) *
  237. num_kernel_image_mappings - 1),
  238. GFP_KERNEL);
  239. if (!hdesc) {
  240. printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
  241. "hvtramp_descr.\n");
  242. return;
  243. }
  244. hdesc->cpu = cpu;
  245. hdesc->num_mappings = num_kernel_image_mappings;
  246. tb = &trap_block[cpu];
  247. tb->hdesc = hdesc;
  248. hdesc->fault_info_va = (unsigned long) &tb->fault_info;
  249. hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
  250. hdesc->thread_reg = thread_reg;
  251. tte_vaddr = (unsigned long) KERNBASE;
  252. tte_data = kern_locked_tte_data;
  253. for (i = 0; i < hdesc->num_mappings; i++) {
  254. hdesc->maps[i].vaddr = tte_vaddr;
  255. hdesc->maps[i].tte = tte_data;
  256. tte_vaddr += 0x400000;
  257. tte_data += 0x400000;
  258. }
  259. trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
  260. hv_err = sun4v_cpu_start(cpu, trampoline_ra,
  261. kimage_addr_to_ra(&sparc64_ttable_tl0),
  262. __pa(hdesc));
  263. if (hv_err)
  264. printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
  265. "gives error %lu\n", hv_err);
  266. }
  267. #endif
  268. extern unsigned long sparc64_cpu_startup;
  269. /* The OBP cpu startup callback truncates the 3rd arg cookie to
  270. * 32-bits (I think) so to be safe we have it read the pointer
  271. * contained here so we work on >4GB machines. -DaveM
  272. */
  273. static struct thread_info *cpu_new_thread = NULL;
  274. static int __cpuinit smp_boot_one_cpu(unsigned int cpu)
  275. {
  276. struct trap_per_cpu *tb = &trap_block[cpu];
  277. unsigned long entry =
  278. (unsigned long)(&sparc64_cpu_startup);
  279. unsigned long cookie =
  280. (unsigned long)(&cpu_new_thread);
  281. struct task_struct *p;
  282. int timeout, ret;
  283. p = fork_idle(cpu);
  284. if (IS_ERR(p))
  285. return PTR_ERR(p);
  286. callin_flag = 0;
  287. cpu_new_thread = task_thread_info(p);
  288. if (tlb_type == hypervisor) {
  289. #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
  290. if (ldom_domaining_enabled)
  291. ldom_startcpu_cpuid(cpu,
  292. (unsigned long) cpu_new_thread);
  293. else
  294. #endif
  295. prom_startcpu_cpuid(cpu, entry, cookie);
  296. } else {
  297. struct device_node *dp = of_find_node_by_cpuid(cpu);
  298. prom_startcpu(dp->node, entry, cookie);
  299. }
  300. for (timeout = 0; timeout < 50000; timeout++) {
  301. if (callin_flag)
  302. break;
  303. udelay(100);
  304. }
  305. if (callin_flag) {
  306. ret = 0;
  307. } else {
  308. printk("Processor %d is stuck.\n", cpu);
  309. ret = -ENODEV;
  310. }
  311. cpu_new_thread = NULL;
  312. if (tb->hdesc) {
  313. kfree(tb->hdesc);
  314. tb->hdesc = NULL;
  315. }
  316. return ret;
  317. }
  318. static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
  319. {
  320. u64 result, target;
  321. int stuck, tmp;
  322. if (this_is_starfire) {
  323. /* map to real upaid */
  324. cpu = (((cpu & 0x3c) << 1) |
  325. ((cpu & 0x40) >> 4) |
  326. (cpu & 0x3));
  327. }
  328. target = (cpu << 14) | 0x70;
  329. again:
  330. /* Ok, this is the real Spitfire Errata #54.
  331. * One must read back from a UDB internal register
  332. * after writes to the UDB interrupt dispatch, but
  333. * before the membar Sync for that write.
  334. * So we use the high UDB control register (ASI 0x7f,
  335. * ADDR 0x20) for the dummy read. -DaveM
  336. */
  337. tmp = 0x40;
  338. __asm__ __volatile__(
  339. "wrpr %1, %2, %%pstate\n\t"
  340. "stxa %4, [%0] %3\n\t"
  341. "stxa %5, [%0+%8] %3\n\t"
  342. "add %0, %8, %0\n\t"
  343. "stxa %6, [%0+%8] %3\n\t"
  344. "membar #Sync\n\t"
  345. "stxa %%g0, [%7] %3\n\t"
  346. "membar #Sync\n\t"
  347. "mov 0x20, %%g1\n\t"
  348. "ldxa [%%g1] 0x7f, %%g0\n\t"
  349. "membar #Sync"
  350. : "=r" (tmp)
  351. : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
  352. "r" (data0), "r" (data1), "r" (data2), "r" (target),
  353. "r" (0x10), "0" (tmp)
  354. : "g1");
  355. /* NOTE: PSTATE_IE is still clear. */
  356. stuck = 100000;
  357. do {
  358. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  359. : "=r" (result)
  360. : "i" (ASI_INTR_DISPATCH_STAT));
  361. if (result == 0) {
  362. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  363. : : "r" (pstate));
  364. return;
  365. }
  366. stuck -= 1;
  367. if (stuck == 0)
  368. break;
  369. } while (result & 0x1);
  370. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  371. : : "r" (pstate));
  372. if (stuck == 0) {
  373. printk("CPU[%d]: mondo stuckage result[%016llx]\n",
  374. smp_processor_id(), result);
  375. } else {
  376. udelay(2);
  377. goto again;
  378. }
  379. }
  380. static void spitfire_xcall_deliver(struct trap_per_cpu *tb, int cnt)
  381. {
  382. u64 *mondo, data0, data1, data2;
  383. u16 *cpu_list;
  384. u64 pstate;
  385. int i;
  386. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  387. cpu_list = __va(tb->cpu_list_pa);
  388. mondo = __va(tb->cpu_mondo_block_pa);
  389. data0 = mondo[0];
  390. data1 = mondo[1];
  391. data2 = mondo[2];
  392. for (i = 0; i < cnt; i++)
  393. spitfire_xcall_helper(data0, data1, data2, pstate, cpu_list[i]);
  394. }
  395. /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
  396. * packet, but we have no use for that. However we do take advantage of
  397. * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
  398. */
  399. static void cheetah_xcall_deliver(struct trap_per_cpu *tb, int cnt)
  400. {
  401. int nack_busy_id, is_jbus, need_more;
  402. u64 *mondo, pstate, ver, busy_mask;
  403. u16 *cpu_list;
  404. cpu_list = __va(tb->cpu_list_pa);
  405. mondo = __va(tb->cpu_mondo_block_pa);
  406. /* Unfortunately, someone at Sun had the brilliant idea to make the
  407. * busy/nack fields hard-coded by ITID number for this Ultra-III
  408. * derivative processor.
  409. */
  410. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  411. is_jbus = ((ver >> 32) == __JALAPENO_ID ||
  412. (ver >> 32) == __SERRANO_ID);
  413. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  414. retry:
  415. need_more = 0;
  416. __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
  417. : : "r" (pstate), "i" (PSTATE_IE));
  418. /* Setup the dispatch data registers. */
  419. __asm__ __volatile__("stxa %0, [%3] %6\n\t"
  420. "stxa %1, [%4] %6\n\t"
  421. "stxa %2, [%5] %6\n\t"
  422. "membar #Sync\n\t"
  423. : /* no outputs */
  424. : "r" (mondo[0]), "r" (mondo[1]), "r" (mondo[2]),
  425. "r" (0x40), "r" (0x50), "r" (0x60),
  426. "i" (ASI_INTR_W));
  427. nack_busy_id = 0;
  428. busy_mask = 0;
  429. {
  430. int i;
  431. for (i = 0; i < cnt; i++) {
  432. u64 target, nr;
  433. nr = cpu_list[i];
  434. if (nr == 0xffff)
  435. continue;
  436. target = (nr << 14) | 0x70;
  437. if (is_jbus) {
  438. busy_mask |= (0x1UL << (nr * 2));
  439. } else {
  440. target |= (nack_busy_id << 24);
  441. busy_mask |= (0x1UL <<
  442. (nack_busy_id * 2));
  443. }
  444. __asm__ __volatile__(
  445. "stxa %%g0, [%0] %1\n\t"
  446. "membar #Sync\n\t"
  447. : /* no outputs */
  448. : "r" (target), "i" (ASI_INTR_W));
  449. nack_busy_id++;
  450. if (nack_busy_id == 32) {
  451. need_more = 1;
  452. break;
  453. }
  454. }
  455. }
  456. /* Now, poll for completion. */
  457. {
  458. u64 dispatch_stat, nack_mask;
  459. long stuck;
  460. stuck = 100000 * nack_busy_id;
  461. nack_mask = busy_mask << 1;
  462. do {
  463. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  464. : "=r" (dispatch_stat)
  465. : "i" (ASI_INTR_DISPATCH_STAT));
  466. if (!(dispatch_stat & (busy_mask | nack_mask))) {
  467. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  468. : : "r" (pstate));
  469. if (unlikely(need_more)) {
  470. int i, this_cnt = 0;
  471. for (i = 0; i < cnt; i++) {
  472. if (cpu_list[i] == 0xffff)
  473. continue;
  474. cpu_list[i] = 0xffff;
  475. this_cnt++;
  476. if (this_cnt == 32)
  477. break;
  478. }
  479. goto retry;
  480. }
  481. return;
  482. }
  483. if (!--stuck)
  484. break;
  485. } while (dispatch_stat & busy_mask);
  486. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  487. : : "r" (pstate));
  488. if (dispatch_stat & busy_mask) {
  489. /* Busy bits will not clear, continue instead
  490. * of freezing up on this cpu.
  491. */
  492. printk("CPU[%d]: mondo stuckage result[%016llx]\n",
  493. smp_processor_id(), dispatch_stat);
  494. } else {
  495. int i, this_busy_nack = 0;
  496. /* Delay some random time with interrupts enabled
  497. * to prevent deadlock.
  498. */
  499. udelay(2 * nack_busy_id);
  500. /* Clear out the mask bits for cpus which did not
  501. * NACK us.
  502. */
  503. for (i = 0; i < cnt; i++) {
  504. u64 check_mask, nr;
  505. nr = cpu_list[i];
  506. if (nr == 0xffff)
  507. continue;
  508. if (is_jbus)
  509. check_mask = (0x2UL << (2*nr));
  510. else
  511. check_mask = (0x2UL <<
  512. this_busy_nack);
  513. if ((dispatch_stat & check_mask) == 0)
  514. cpu_list[i] = 0xffff;
  515. this_busy_nack += 2;
  516. if (this_busy_nack == 64)
  517. break;
  518. }
  519. goto retry;
  520. }
  521. }
  522. }
  523. /* Multi-cpu list version. */
  524. static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
  525. {
  526. int retries, this_cpu, prev_sent, i, saw_cpu_error;
  527. unsigned long status;
  528. u16 *cpu_list;
  529. this_cpu = smp_processor_id();
  530. cpu_list = __va(tb->cpu_list_pa);
  531. saw_cpu_error = 0;
  532. retries = 0;
  533. prev_sent = 0;
  534. do {
  535. int forward_progress, n_sent;
  536. status = sun4v_cpu_mondo_send(cnt,
  537. tb->cpu_list_pa,
  538. tb->cpu_mondo_block_pa);
  539. /* HV_EOK means all cpus received the xcall, we're done. */
  540. if (likely(status == HV_EOK))
  541. break;
  542. /* First, see if we made any forward progress.
  543. *
  544. * The hypervisor indicates successful sends by setting
  545. * cpu list entries to the value 0xffff.
  546. */
  547. n_sent = 0;
  548. for (i = 0; i < cnt; i++) {
  549. if (likely(cpu_list[i] == 0xffff))
  550. n_sent++;
  551. }
  552. forward_progress = 0;
  553. if (n_sent > prev_sent)
  554. forward_progress = 1;
  555. prev_sent = n_sent;
  556. /* If we get a HV_ECPUERROR, then one or more of the cpus
  557. * in the list are in error state. Use the cpu_state()
  558. * hypervisor call to find out which cpus are in error state.
  559. */
  560. if (unlikely(status == HV_ECPUERROR)) {
  561. for (i = 0; i < cnt; i++) {
  562. long err;
  563. u16 cpu;
  564. cpu = cpu_list[i];
  565. if (cpu == 0xffff)
  566. continue;
  567. err = sun4v_cpu_state(cpu);
  568. if (err == HV_CPU_STATE_ERROR) {
  569. saw_cpu_error = (cpu + 1);
  570. cpu_list[i] = 0xffff;
  571. }
  572. }
  573. } else if (unlikely(status != HV_EWOULDBLOCK))
  574. goto fatal_mondo_error;
  575. /* Don't bother rewriting the CPU list, just leave the
  576. * 0xffff and non-0xffff entries in there and the
  577. * hypervisor will do the right thing.
  578. *
  579. * Only advance timeout state if we didn't make any
  580. * forward progress.
  581. */
  582. if (unlikely(!forward_progress)) {
  583. if (unlikely(++retries > 10000))
  584. goto fatal_mondo_timeout;
  585. /* Delay a little bit to let other cpus catch up
  586. * on their cpu mondo queue work.
  587. */
  588. udelay(2 * cnt);
  589. }
  590. } while (1);
  591. if (unlikely(saw_cpu_error))
  592. goto fatal_mondo_cpu_error;
  593. return;
  594. fatal_mondo_cpu_error:
  595. printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
  596. "(including %d) were in error state\n",
  597. this_cpu, saw_cpu_error - 1);
  598. return;
  599. fatal_mondo_timeout:
  600. printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
  601. " progress after %d retries.\n",
  602. this_cpu, retries);
  603. goto dump_cpu_list_and_out;
  604. fatal_mondo_error:
  605. printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
  606. this_cpu, status);
  607. printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
  608. "mondo_block_pa(%lx)\n",
  609. this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
  610. dump_cpu_list_and_out:
  611. printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
  612. for (i = 0; i < cnt; i++)
  613. printk("%u ", cpu_list[i]);
  614. printk("]\n");
  615. }
  616. static void (*xcall_deliver_impl)(struct trap_per_cpu *, int);
  617. static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
  618. {
  619. struct trap_per_cpu *tb;
  620. int this_cpu, i, cnt;
  621. unsigned long flags;
  622. u16 *cpu_list;
  623. u64 *mondo;
  624. /* We have to do this whole thing with interrupts fully disabled.
  625. * Otherwise if we send an xcall from interrupt context it will
  626. * corrupt both our mondo block and cpu list state.
  627. *
  628. * One consequence of this is that we cannot use timeout mechanisms
  629. * that depend upon interrupts being delivered locally. So, for
  630. * example, we cannot sample jiffies and expect it to advance.
  631. *
  632. * Fortunately, udelay() uses %stick/%tick so we can use that.
  633. */
  634. local_irq_save(flags);
  635. this_cpu = smp_processor_id();
  636. tb = &trap_block[this_cpu];
  637. mondo = __va(tb->cpu_mondo_block_pa);
  638. mondo[0] = data0;
  639. mondo[1] = data1;
  640. mondo[2] = data2;
  641. wmb();
  642. cpu_list = __va(tb->cpu_list_pa);
  643. /* Setup the initial cpu list. */
  644. cnt = 0;
  645. for_each_cpu(i, mask) {
  646. if (i == this_cpu || !cpu_online(i))
  647. continue;
  648. cpu_list[cnt++] = i;
  649. }
  650. if (cnt)
  651. xcall_deliver_impl(tb, cnt);
  652. local_irq_restore(flags);
  653. }
  654. /* Send cross call to all processors mentioned in MASK_P
  655. * except self. Really, there are only two cases currently,
  656. * "&cpu_online_map" and "&mm->cpu_vm_mask".
  657. */
  658. static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask)
  659. {
  660. u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
  661. xcall_deliver(data0, data1, data2, mask);
  662. }
  663. /* Send cross call to all processors except self. */
  664. static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2)
  665. {
  666. smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map);
  667. }
  668. extern unsigned long xcall_sync_tick;
  669. static void smp_start_sync_tick_client(int cpu)
  670. {
  671. xcall_deliver((u64) &xcall_sync_tick, 0, 0,
  672. &cpumask_of_cpu(cpu));
  673. }
  674. extern unsigned long xcall_call_function;
  675. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  676. {
  677. xcall_deliver((u64) &xcall_call_function, 0, 0, mask);
  678. }
  679. extern unsigned long xcall_call_function_single;
  680. void arch_send_call_function_single_ipi(int cpu)
  681. {
  682. xcall_deliver((u64) &xcall_call_function_single, 0, 0,
  683. &cpumask_of_cpu(cpu));
  684. }
  685. void smp_call_function_client(int irq, struct pt_regs *regs)
  686. {
  687. clear_softint(1 << irq);
  688. generic_smp_call_function_interrupt();
  689. }
  690. void smp_call_function_single_client(int irq, struct pt_regs *regs)
  691. {
  692. clear_softint(1 << irq);
  693. generic_smp_call_function_single_interrupt();
  694. }
  695. static void tsb_sync(void *info)
  696. {
  697. struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
  698. struct mm_struct *mm = info;
  699. /* It is not valid to test "currrent->active_mm == mm" here.
  700. *
  701. * The value of "current" is not changed atomically with
  702. * switch_mm(). But that's OK, we just need to check the
  703. * current cpu's trap block PGD physical address.
  704. */
  705. if (tp->pgd_paddr == __pa(mm->pgd))
  706. tsb_context_switch(mm);
  707. }
  708. void smp_tsb_sync(struct mm_struct *mm)
  709. {
  710. smp_call_function_many(mm_cpumask(mm), tsb_sync, mm, 1);
  711. }
  712. extern unsigned long xcall_flush_tlb_mm;
  713. extern unsigned long xcall_flush_tlb_pending;
  714. extern unsigned long xcall_flush_tlb_kernel_range;
  715. extern unsigned long xcall_fetch_glob_regs;
  716. extern unsigned long xcall_receive_signal;
  717. extern unsigned long xcall_new_mmu_context_version;
  718. #ifdef CONFIG_KGDB
  719. extern unsigned long xcall_kgdb_capture;
  720. #endif
  721. #ifdef DCACHE_ALIASING_POSSIBLE
  722. extern unsigned long xcall_flush_dcache_page_cheetah;
  723. #endif
  724. extern unsigned long xcall_flush_dcache_page_spitfire;
  725. #ifdef CONFIG_DEBUG_DCFLUSH
  726. extern atomic_t dcpage_flushes;
  727. extern atomic_t dcpage_flushes_xcall;
  728. #endif
  729. static inline void __local_flush_dcache_page(struct page *page)
  730. {
  731. #ifdef DCACHE_ALIASING_POSSIBLE
  732. __flush_dcache_page(page_address(page),
  733. ((tlb_type == spitfire) &&
  734. page_mapping(page) != NULL));
  735. #else
  736. if (page_mapping(page) != NULL &&
  737. tlb_type == spitfire)
  738. __flush_icache_page(__pa(page_address(page)));
  739. #endif
  740. }
  741. void smp_flush_dcache_page_impl(struct page *page, int cpu)
  742. {
  743. int this_cpu;
  744. if (tlb_type == hypervisor)
  745. return;
  746. #ifdef CONFIG_DEBUG_DCFLUSH
  747. atomic_inc(&dcpage_flushes);
  748. #endif
  749. this_cpu = get_cpu();
  750. if (cpu == this_cpu) {
  751. __local_flush_dcache_page(page);
  752. } else if (cpu_online(cpu)) {
  753. void *pg_addr = page_address(page);
  754. u64 data0 = 0;
  755. if (tlb_type == spitfire) {
  756. data0 = ((u64)&xcall_flush_dcache_page_spitfire);
  757. if (page_mapping(page) != NULL)
  758. data0 |= ((u64)1 << 32);
  759. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  760. #ifdef DCACHE_ALIASING_POSSIBLE
  761. data0 = ((u64)&xcall_flush_dcache_page_cheetah);
  762. #endif
  763. }
  764. if (data0) {
  765. xcall_deliver(data0, __pa(pg_addr),
  766. (u64) pg_addr, &cpumask_of_cpu(cpu));
  767. #ifdef CONFIG_DEBUG_DCFLUSH
  768. atomic_inc(&dcpage_flushes_xcall);
  769. #endif
  770. }
  771. }
  772. put_cpu();
  773. }
  774. void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
  775. {
  776. void *pg_addr;
  777. int this_cpu;
  778. u64 data0;
  779. if (tlb_type == hypervisor)
  780. return;
  781. this_cpu = get_cpu();
  782. #ifdef CONFIG_DEBUG_DCFLUSH
  783. atomic_inc(&dcpage_flushes);
  784. #endif
  785. data0 = 0;
  786. pg_addr = page_address(page);
  787. if (tlb_type == spitfire) {
  788. data0 = ((u64)&xcall_flush_dcache_page_spitfire);
  789. if (page_mapping(page) != NULL)
  790. data0 |= ((u64)1 << 32);
  791. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  792. #ifdef DCACHE_ALIASING_POSSIBLE
  793. data0 = ((u64)&xcall_flush_dcache_page_cheetah);
  794. #endif
  795. }
  796. if (data0) {
  797. xcall_deliver(data0, __pa(pg_addr),
  798. (u64) pg_addr, &cpu_online_map);
  799. #ifdef CONFIG_DEBUG_DCFLUSH
  800. atomic_inc(&dcpage_flushes_xcall);
  801. #endif
  802. }
  803. __local_flush_dcache_page(page);
  804. put_cpu();
  805. }
  806. void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
  807. {
  808. struct mm_struct *mm;
  809. unsigned long flags;
  810. clear_softint(1 << irq);
  811. /* See if we need to allocate a new TLB context because
  812. * the version of the one we are using is now out of date.
  813. */
  814. mm = current->active_mm;
  815. if (unlikely(!mm || (mm == &init_mm)))
  816. return;
  817. spin_lock_irqsave(&mm->context.lock, flags);
  818. if (unlikely(!CTX_VALID(mm->context)))
  819. get_new_mmu_context(mm);
  820. spin_unlock_irqrestore(&mm->context.lock, flags);
  821. load_secondary_context(mm);
  822. __flush_tlb_mm(CTX_HWBITS(mm->context),
  823. SECONDARY_CONTEXT);
  824. }
  825. void smp_new_mmu_context_version(void)
  826. {
  827. smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
  828. }
  829. #ifdef CONFIG_KGDB
  830. void kgdb_roundup_cpus(unsigned long flags)
  831. {
  832. smp_cross_call(&xcall_kgdb_capture, 0, 0, 0);
  833. }
  834. #endif
  835. void smp_fetch_global_regs(void)
  836. {
  837. smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0);
  838. }
  839. /* We know that the window frames of the user have been flushed
  840. * to the stack before we get here because all callers of us
  841. * are flush_tlb_*() routines, and these run after flush_cache_*()
  842. * which performs the flushw.
  843. *
  844. * The SMP TLB coherency scheme we use works as follows:
  845. *
  846. * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
  847. * space has (potentially) executed on, this is the heuristic
  848. * we use to avoid doing cross calls.
  849. *
  850. * Also, for flushing from kswapd and also for clones, we
  851. * use cpu_vm_mask as the list of cpus to make run the TLB.
  852. *
  853. * 2) TLB context numbers are shared globally across all processors
  854. * in the system, this allows us to play several games to avoid
  855. * cross calls.
  856. *
  857. * One invariant is that when a cpu switches to a process, and
  858. * that processes tsk->active_mm->cpu_vm_mask does not have the
  859. * current cpu's bit set, that tlb context is flushed locally.
  860. *
  861. * If the address space is non-shared (ie. mm->count == 1) we avoid
  862. * cross calls when we want to flush the currently running process's
  863. * tlb state. This is done by clearing all cpu bits except the current
  864. * processor's in current->mm->cpu_vm_mask and performing the
  865. * flush locally only. This will force any subsequent cpus which run
  866. * this task to flush the context from the local tlb if the process
  867. * migrates to another cpu (again).
  868. *
  869. * 3) For shared address spaces (threads) and swapping we bite the
  870. * bullet for most cases and perform the cross call (but only to
  871. * the cpus listed in cpu_vm_mask).
  872. *
  873. * The performance gain from "optimizing" away the cross call for threads is
  874. * questionable (in theory the big win for threads is the massive sharing of
  875. * address space state across processors).
  876. */
  877. /* This currently is only used by the hugetlb arch pre-fault
  878. * hook on UltraSPARC-III+ and later when changing the pagesize
  879. * bits of the context register for an address space.
  880. */
  881. void smp_flush_tlb_mm(struct mm_struct *mm)
  882. {
  883. u32 ctx = CTX_HWBITS(mm->context);
  884. int cpu = get_cpu();
  885. if (atomic_read(&mm->mm_users) == 1) {
  886. cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
  887. goto local_flush_and_out;
  888. }
  889. smp_cross_call_masked(&xcall_flush_tlb_mm,
  890. ctx, 0, 0,
  891. mm_cpumask(mm));
  892. local_flush_and_out:
  893. __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
  894. put_cpu();
  895. }
  896. void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
  897. {
  898. u32 ctx = CTX_HWBITS(mm->context);
  899. int cpu = get_cpu();
  900. if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
  901. cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
  902. else
  903. smp_cross_call_masked(&xcall_flush_tlb_pending,
  904. ctx, nr, (unsigned long) vaddrs,
  905. mm_cpumask(mm));
  906. __flush_tlb_pending(ctx, nr, vaddrs);
  907. put_cpu();
  908. }
  909. void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  910. {
  911. start &= PAGE_MASK;
  912. end = PAGE_ALIGN(end);
  913. if (start != end) {
  914. smp_cross_call(&xcall_flush_tlb_kernel_range,
  915. 0, start, end);
  916. __flush_tlb_kernel_range(start, end);
  917. }
  918. }
  919. /* CPU capture. */
  920. /* #define CAPTURE_DEBUG */
  921. extern unsigned long xcall_capture;
  922. static atomic_t smp_capture_depth = ATOMIC_INIT(0);
  923. static atomic_t smp_capture_registry = ATOMIC_INIT(0);
  924. static unsigned long penguins_are_doing_time;
  925. void smp_capture(void)
  926. {
  927. int result = atomic_add_ret(1, &smp_capture_depth);
  928. if (result == 1) {
  929. int ncpus = num_online_cpus();
  930. #ifdef CAPTURE_DEBUG
  931. printk("CPU[%d]: Sending penguins to jail...",
  932. smp_processor_id());
  933. #endif
  934. penguins_are_doing_time = 1;
  935. atomic_inc(&smp_capture_registry);
  936. smp_cross_call(&xcall_capture, 0, 0, 0);
  937. while (atomic_read(&smp_capture_registry) != ncpus)
  938. rmb();
  939. #ifdef CAPTURE_DEBUG
  940. printk("done\n");
  941. #endif
  942. }
  943. }
  944. void smp_release(void)
  945. {
  946. if (atomic_dec_and_test(&smp_capture_depth)) {
  947. #ifdef CAPTURE_DEBUG
  948. printk("CPU[%d]: Giving pardon to "
  949. "imprisoned penguins\n",
  950. smp_processor_id());
  951. #endif
  952. penguins_are_doing_time = 0;
  953. membar_safe("#StoreLoad");
  954. atomic_dec(&smp_capture_registry);
  955. }
  956. }
  957. /* Imprisoned penguins run with %pil == PIL_NORMAL_MAX, but PSTATE_IE
  958. * set, so they can service tlb flush xcalls...
  959. */
  960. extern void prom_world(int);
  961. void smp_penguin_jailcell(int irq, struct pt_regs *regs)
  962. {
  963. clear_softint(1 << irq);
  964. preempt_disable();
  965. __asm__ __volatile__("flushw");
  966. prom_world(1);
  967. atomic_inc(&smp_capture_registry);
  968. membar_safe("#StoreLoad");
  969. while (penguins_are_doing_time)
  970. rmb();
  971. atomic_dec(&smp_capture_registry);
  972. prom_world(0);
  973. preempt_enable();
  974. }
  975. /* /proc/profile writes can call this, don't __init it please. */
  976. int setup_profiling_timer(unsigned int multiplier)
  977. {
  978. return -EINVAL;
  979. }
  980. void __init smp_prepare_cpus(unsigned int max_cpus)
  981. {
  982. }
  983. void __devinit smp_prepare_boot_cpu(void)
  984. {
  985. }
  986. void __init smp_setup_processor_id(void)
  987. {
  988. if (tlb_type == spitfire)
  989. xcall_deliver_impl = spitfire_xcall_deliver;
  990. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  991. xcall_deliver_impl = cheetah_xcall_deliver;
  992. else
  993. xcall_deliver_impl = hypervisor_xcall_deliver;
  994. }
  995. void __devinit smp_fill_in_sib_core_maps(void)
  996. {
  997. unsigned int i;
  998. for_each_present_cpu(i) {
  999. unsigned int j;
  1000. cpus_clear(cpu_core_map[i]);
  1001. if (cpu_data(i).core_id == 0) {
  1002. cpu_set(i, cpu_core_map[i]);
  1003. continue;
  1004. }
  1005. for_each_present_cpu(j) {
  1006. if (cpu_data(i).core_id ==
  1007. cpu_data(j).core_id)
  1008. cpu_set(j, cpu_core_map[i]);
  1009. }
  1010. }
  1011. for_each_present_cpu(i) {
  1012. unsigned int j;
  1013. cpus_clear(per_cpu(cpu_sibling_map, i));
  1014. if (cpu_data(i).proc_id == -1) {
  1015. cpu_set(i, per_cpu(cpu_sibling_map, i));
  1016. continue;
  1017. }
  1018. for_each_present_cpu(j) {
  1019. if (cpu_data(i).proc_id ==
  1020. cpu_data(j).proc_id)
  1021. cpu_set(j, per_cpu(cpu_sibling_map, i));
  1022. }
  1023. }
  1024. }
  1025. int __cpuinit __cpu_up(unsigned int cpu)
  1026. {
  1027. int ret = smp_boot_one_cpu(cpu);
  1028. if (!ret) {
  1029. cpu_set(cpu, smp_commenced_mask);
  1030. while (!cpu_isset(cpu, cpu_online_map))
  1031. mb();
  1032. if (!cpu_isset(cpu, cpu_online_map)) {
  1033. ret = -ENODEV;
  1034. } else {
  1035. /* On SUN4V, writes to %tick and %stick are
  1036. * not allowed.
  1037. */
  1038. if (tlb_type != hypervisor)
  1039. smp_synchronize_one_tick(cpu);
  1040. }
  1041. }
  1042. return ret;
  1043. }
  1044. #ifdef CONFIG_HOTPLUG_CPU
  1045. void cpu_play_dead(void)
  1046. {
  1047. int cpu = smp_processor_id();
  1048. unsigned long pstate;
  1049. idle_task_exit();
  1050. if (tlb_type == hypervisor) {
  1051. struct trap_per_cpu *tb = &trap_block[cpu];
  1052. sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
  1053. tb->cpu_mondo_pa, 0);
  1054. sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
  1055. tb->dev_mondo_pa, 0);
  1056. sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
  1057. tb->resum_mondo_pa, 0);
  1058. sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
  1059. tb->nonresum_mondo_pa, 0);
  1060. }
  1061. cpu_clear(cpu, smp_commenced_mask);
  1062. membar_safe("#Sync");
  1063. local_irq_disable();
  1064. __asm__ __volatile__(
  1065. "rdpr %%pstate, %0\n\t"
  1066. "wrpr %0, %1, %%pstate"
  1067. : "=r" (pstate)
  1068. : "i" (PSTATE_IE));
  1069. while (1)
  1070. barrier();
  1071. }
  1072. int __cpu_disable(void)
  1073. {
  1074. int cpu = smp_processor_id();
  1075. cpuinfo_sparc *c;
  1076. int i;
  1077. for_each_cpu_mask(i, cpu_core_map[cpu])
  1078. cpu_clear(cpu, cpu_core_map[i]);
  1079. cpus_clear(cpu_core_map[cpu]);
  1080. for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
  1081. cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
  1082. cpus_clear(per_cpu(cpu_sibling_map, cpu));
  1083. c = &cpu_data(cpu);
  1084. c->core_id = 0;
  1085. c->proc_id = -1;
  1086. smp_wmb();
  1087. /* Make sure no interrupts point to this cpu. */
  1088. fixup_irqs();
  1089. local_irq_enable();
  1090. mdelay(1);
  1091. local_irq_disable();
  1092. ipi_call_lock();
  1093. cpu_clear(cpu, cpu_online_map);
  1094. ipi_call_unlock();
  1095. return 0;
  1096. }
  1097. void __cpu_die(unsigned int cpu)
  1098. {
  1099. int i;
  1100. for (i = 0; i < 100; i++) {
  1101. smp_rmb();
  1102. if (!cpu_isset(cpu, smp_commenced_mask))
  1103. break;
  1104. msleep(100);
  1105. }
  1106. if (cpu_isset(cpu, smp_commenced_mask)) {
  1107. printk(KERN_ERR "CPU %u didn't die...\n", cpu);
  1108. } else {
  1109. #if defined(CONFIG_SUN_LDOMS)
  1110. unsigned long hv_err;
  1111. int limit = 100;
  1112. do {
  1113. hv_err = sun4v_cpu_stop(cpu);
  1114. if (hv_err == HV_EOK) {
  1115. cpu_clear(cpu, cpu_present_map);
  1116. break;
  1117. }
  1118. } while (--limit > 0);
  1119. if (limit <= 0) {
  1120. printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
  1121. hv_err);
  1122. }
  1123. #endif
  1124. }
  1125. }
  1126. #endif
  1127. void __init smp_cpus_done(unsigned int max_cpus)
  1128. {
  1129. }
  1130. void smp_send_reschedule(int cpu)
  1131. {
  1132. xcall_deliver((u64) &xcall_receive_signal, 0, 0,
  1133. &cpumask_of_cpu(cpu));
  1134. }
  1135. void smp_receive_signal_client(int irq, struct pt_regs *regs)
  1136. {
  1137. clear_softint(1 << irq);
  1138. }
  1139. /* This is a nop because we capture all other cpus
  1140. * anyways when making the PROM active.
  1141. */
  1142. void smp_send_stop(void)
  1143. {
  1144. }
  1145. unsigned long __per_cpu_base __read_mostly;
  1146. unsigned long __per_cpu_shift __read_mostly;
  1147. EXPORT_SYMBOL(__per_cpu_base);
  1148. EXPORT_SYMBOL(__per_cpu_shift);
  1149. void __init real_setup_per_cpu_areas(void)
  1150. {
  1151. unsigned long paddr, goal, size, i;
  1152. char *ptr;
  1153. /* Copy section for each CPU (we discard the original) */
  1154. goal = PERCPU_ENOUGH_ROOM;
  1155. __per_cpu_shift = PAGE_SHIFT;
  1156. for (size = PAGE_SIZE; size < goal; size <<= 1UL)
  1157. __per_cpu_shift++;
  1158. paddr = lmb_alloc(size * NR_CPUS, PAGE_SIZE);
  1159. if (!paddr) {
  1160. prom_printf("Cannot allocate per-cpu memory.\n");
  1161. prom_halt();
  1162. }
  1163. ptr = __va(paddr);
  1164. __per_cpu_base = ptr - __per_cpu_start;
  1165. for (i = 0; i < NR_CPUS; i++, ptr += size)
  1166. memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
  1167. /* Setup %g5 for the boot cpu. */
  1168. __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
  1169. }