smp.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446
  1. /* smp.c: Sparc64 SMP support.
  2. *
  3. * Copyright (C) 1997, 2007 David S. Miller (davem@davemloft.net)
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/threads.h>
  11. #include <linux/smp.h>
  12. #include <linux/interrupt.h>
  13. #include <linux/kernel_stat.h>
  14. #include <linux/delay.h>
  15. #include <linux/init.h>
  16. #include <linux/spinlock.h>
  17. #include <linux/fs.h>
  18. #include <linux/seq_file.h>
  19. #include <linux/cache.h>
  20. #include <linux/jiffies.h>
  21. #include <linux/profile.h>
  22. #include <linux/bootmem.h>
  23. #include <asm/head.h>
  24. #include <asm/ptrace.h>
  25. #include <asm/atomic.h>
  26. #include <asm/tlbflush.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/cpudata.h>
  29. #include <asm/hvtramp.h>
  30. #include <asm/io.h>
  31. #include <asm/irq.h>
  32. #include <asm/irq_regs.h>
  33. #include <asm/page.h>
  34. #include <asm/pgtable.h>
  35. #include <asm/oplib.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/timer.h>
  38. #include <asm/starfire.h>
  39. #include <asm/tlb.h>
  40. #include <asm/sections.h>
  41. #include <asm/prom.h>
  42. #include <asm/mdesc.h>
  43. #include <asm/ldc.h>
  44. #include <asm/hypervisor.h>
  45. extern void calibrate_delay(void);
  46. int sparc64_multi_core __read_mostly;
  47. cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
  48. cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
  49. cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly =
  50. { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  51. cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
  52. { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
  53. EXPORT_SYMBOL(cpu_possible_map);
  54. EXPORT_SYMBOL(cpu_online_map);
  55. EXPORT_SYMBOL(cpu_sibling_map);
  56. EXPORT_SYMBOL(cpu_core_map);
  57. static cpumask_t smp_commenced_mask;
  58. void smp_info(struct seq_file *m)
  59. {
  60. int i;
  61. seq_printf(m, "State:\n");
  62. for_each_online_cpu(i)
  63. seq_printf(m, "CPU%d:\t\tonline\n", i);
  64. }
  65. void smp_bogo(struct seq_file *m)
  66. {
  67. int i;
  68. for_each_online_cpu(i)
  69. seq_printf(m,
  70. "Cpu%dClkTck\t: %016lx\n",
  71. i, cpu_data(i).clock_tick);
  72. }
  73. static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock);
  74. extern void setup_sparc64_timer(void);
  75. static volatile unsigned long callin_flag = 0;
  76. void __devinit smp_callin(void)
  77. {
  78. int cpuid = hard_smp_processor_id();
  79. __local_per_cpu_offset = __per_cpu_offset(cpuid);
  80. if (tlb_type == hypervisor)
  81. sun4v_ktsb_register();
  82. __flush_tlb_all();
  83. setup_sparc64_timer();
  84. if (cheetah_pcache_forced_on)
  85. cheetah_enable_pcache();
  86. local_irq_enable();
  87. callin_flag = 1;
  88. __asm__ __volatile__("membar #Sync\n\t"
  89. "flush %%g6" : : : "memory");
  90. /* Clear this or we will die instantly when we
  91. * schedule back to this idler...
  92. */
  93. current_thread_info()->new_child = 0;
  94. /* Attach to the address space of init_task. */
  95. atomic_inc(&init_mm.mm_count);
  96. current->active_mm = &init_mm;
  97. while (!cpu_isset(cpuid, smp_commenced_mask))
  98. rmb();
  99. spin_lock(&call_lock);
  100. cpu_set(cpuid, cpu_online_map);
  101. spin_unlock(&call_lock);
  102. /* idle thread is expected to have preempt disabled */
  103. preempt_disable();
  104. }
  105. void cpu_panic(void)
  106. {
  107. printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
  108. panic("SMP bolixed\n");
  109. }
  110. /* This tick register synchronization scheme is taken entirely from
  111. * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  112. *
  113. * The only change I've made is to rework it so that the master
  114. * initiates the synchonization instead of the slave. -DaveM
  115. */
  116. #define MASTER 0
  117. #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
  118. #define NUM_ROUNDS 64 /* magic value */
  119. #define NUM_ITERS 5 /* likewise */
  120. static DEFINE_SPINLOCK(itc_sync_lock);
  121. static unsigned long go[SLAVE + 1];
  122. #define DEBUG_TICK_SYNC 0
  123. static inline long get_delta (long *rt, long *master)
  124. {
  125. unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
  126. unsigned long tcenter, t0, t1, tm;
  127. unsigned long i;
  128. for (i = 0; i < NUM_ITERS; i++) {
  129. t0 = tick_ops->get_tick();
  130. go[MASTER] = 1;
  131. membar_storeload();
  132. while (!(tm = go[SLAVE]))
  133. rmb();
  134. go[SLAVE] = 0;
  135. wmb();
  136. t1 = tick_ops->get_tick();
  137. if (t1 - t0 < best_t1 - best_t0)
  138. best_t0 = t0, best_t1 = t1, best_tm = tm;
  139. }
  140. *rt = best_t1 - best_t0;
  141. *master = best_tm - best_t0;
  142. /* average best_t0 and best_t1 without overflow: */
  143. tcenter = (best_t0/2 + best_t1/2);
  144. if (best_t0 % 2 + best_t1 % 2 == 2)
  145. tcenter++;
  146. return tcenter - best_tm;
  147. }
  148. void smp_synchronize_tick_client(void)
  149. {
  150. long i, delta, adj, adjust_latency = 0, done = 0;
  151. unsigned long flags, rt, master_time_stamp, bound;
  152. #if DEBUG_TICK_SYNC
  153. struct {
  154. long rt; /* roundtrip time */
  155. long master; /* master's timestamp */
  156. long diff; /* difference between midpoint and master's timestamp */
  157. long lat; /* estimate of itc adjustment latency */
  158. } t[NUM_ROUNDS];
  159. #endif
  160. go[MASTER] = 1;
  161. while (go[MASTER])
  162. rmb();
  163. local_irq_save(flags);
  164. {
  165. for (i = 0; i < NUM_ROUNDS; i++) {
  166. delta = get_delta(&rt, &master_time_stamp);
  167. if (delta == 0) {
  168. done = 1; /* let's lock on to this... */
  169. bound = rt;
  170. }
  171. if (!done) {
  172. if (i > 0) {
  173. adjust_latency += -delta;
  174. adj = -delta + adjust_latency/4;
  175. } else
  176. adj = -delta;
  177. tick_ops->add_tick(adj);
  178. }
  179. #if DEBUG_TICK_SYNC
  180. t[i].rt = rt;
  181. t[i].master = master_time_stamp;
  182. t[i].diff = delta;
  183. t[i].lat = adjust_latency/4;
  184. #endif
  185. }
  186. }
  187. local_irq_restore(flags);
  188. #if DEBUG_TICK_SYNC
  189. for (i = 0; i < NUM_ROUNDS; i++)
  190. printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
  191. t[i].rt, t[i].master, t[i].diff, t[i].lat);
  192. #endif
  193. printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
  194. "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
  195. }
  196. static void smp_start_sync_tick_client(int cpu);
  197. static void smp_synchronize_one_tick(int cpu)
  198. {
  199. unsigned long flags, i;
  200. go[MASTER] = 0;
  201. smp_start_sync_tick_client(cpu);
  202. /* wait for client to be ready */
  203. while (!go[MASTER])
  204. rmb();
  205. /* now let the client proceed into his loop */
  206. go[MASTER] = 0;
  207. membar_storeload();
  208. spin_lock_irqsave(&itc_sync_lock, flags);
  209. {
  210. for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
  211. while (!go[MASTER])
  212. rmb();
  213. go[MASTER] = 0;
  214. wmb();
  215. go[SLAVE] = tick_ops->get_tick();
  216. membar_storeload();
  217. }
  218. }
  219. spin_unlock_irqrestore(&itc_sync_lock, flags);
  220. }
  221. #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
  222. /* XXX Put this in some common place. XXX */
  223. static unsigned long kimage_addr_to_ra(void *p)
  224. {
  225. unsigned long val = (unsigned long) p;
  226. return kern_base + (val - KERNBASE);
  227. }
  228. static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg)
  229. {
  230. extern unsigned long sparc64_ttable_tl0;
  231. extern unsigned long kern_locked_tte_data;
  232. extern int bigkernel;
  233. struct hvtramp_descr *hdesc;
  234. unsigned long trampoline_ra;
  235. struct trap_per_cpu *tb;
  236. u64 tte_vaddr, tte_data;
  237. unsigned long hv_err;
  238. hdesc = kzalloc(sizeof(*hdesc), GFP_KERNEL);
  239. if (!hdesc) {
  240. printk(KERN_ERR "ldom_startcpu_cpuid: Cannot allocate "
  241. "hvtramp_descr.\n");
  242. return;
  243. }
  244. hdesc->cpu = cpu;
  245. hdesc->num_mappings = (bigkernel ? 2 : 1);
  246. tb = &trap_block[cpu];
  247. tb->hdesc = hdesc;
  248. hdesc->fault_info_va = (unsigned long) &tb->fault_info;
  249. hdesc->fault_info_pa = kimage_addr_to_ra(&tb->fault_info);
  250. hdesc->thread_reg = thread_reg;
  251. tte_vaddr = (unsigned long) KERNBASE;
  252. tte_data = kern_locked_tte_data;
  253. hdesc->maps[0].vaddr = tte_vaddr;
  254. hdesc->maps[0].tte = tte_data;
  255. if (bigkernel) {
  256. tte_vaddr += 0x400000;
  257. tte_data += 0x400000;
  258. hdesc->maps[1].vaddr = tte_vaddr;
  259. hdesc->maps[1].tte = tte_data;
  260. }
  261. trampoline_ra = kimage_addr_to_ra(hv_cpu_startup);
  262. hv_err = sun4v_cpu_start(cpu, trampoline_ra,
  263. kimage_addr_to_ra(&sparc64_ttable_tl0),
  264. __pa(hdesc));
  265. if (hv_err)
  266. printk(KERN_ERR "ldom_startcpu_cpuid: sun4v_cpu_start() "
  267. "gives error %lu\n", hv_err);
  268. }
  269. #endif
  270. extern unsigned long sparc64_cpu_startup;
  271. /* The OBP cpu startup callback truncates the 3rd arg cookie to
  272. * 32-bits (I think) so to be safe we have it read the pointer
  273. * contained here so we work on >4GB machines. -DaveM
  274. */
  275. static struct thread_info *cpu_new_thread = NULL;
  276. static int __devinit smp_boot_one_cpu(unsigned int cpu)
  277. {
  278. struct trap_per_cpu *tb = &trap_block[cpu];
  279. unsigned long entry =
  280. (unsigned long)(&sparc64_cpu_startup);
  281. unsigned long cookie =
  282. (unsigned long)(&cpu_new_thread);
  283. struct task_struct *p;
  284. int timeout, ret;
  285. p = fork_idle(cpu);
  286. if (IS_ERR(p))
  287. return PTR_ERR(p);
  288. callin_flag = 0;
  289. cpu_new_thread = task_thread_info(p);
  290. if (tlb_type == hypervisor) {
  291. #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
  292. if (ldom_domaining_enabled)
  293. ldom_startcpu_cpuid(cpu,
  294. (unsigned long) cpu_new_thread);
  295. else
  296. #endif
  297. prom_startcpu_cpuid(cpu, entry, cookie);
  298. } else {
  299. struct device_node *dp = of_find_node_by_cpuid(cpu);
  300. prom_startcpu(dp->node, entry, cookie);
  301. }
  302. for (timeout = 0; timeout < 50000; timeout++) {
  303. if (callin_flag)
  304. break;
  305. udelay(100);
  306. }
  307. if (callin_flag) {
  308. ret = 0;
  309. } else {
  310. printk("Processor %d is stuck.\n", cpu);
  311. ret = -ENODEV;
  312. }
  313. cpu_new_thread = NULL;
  314. if (tb->hdesc) {
  315. kfree(tb->hdesc);
  316. tb->hdesc = NULL;
  317. }
  318. return ret;
  319. }
  320. static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
  321. {
  322. u64 result, target;
  323. int stuck, tmp;
  324. if (this_is_starfire) {
  325. /* map to real upaid */
  326. cpu = (((cpu & 0x3c) << 1) |
  327. ((cpu & 0x40) >> 4) |
  328. (cpu & 0x3));
  329. }
  330. target = (cpu << 14) | 0x70;
  331. again:
  332. /* Ok, this is the real Spitfire Errata #54.
  333. * One must read back from a UDB internal register
  334. * after writes to the UDB interrupt dispatch, but
  335. * before the membar Sync for that write.
  336. * So we use the high UDB control register (ASI 0x7f,
  337. * ADDR 0x20) for the dummy read. -DaveM
  338. */
  339. tmp = 0x40;
  340. __asm__ __volatile__(
  341. "wrpr %1, %2, %%pstate\n\t"
  342. "stxa %4, [%0] %3\n\t"
  343. "stxa %5, [%0+%8] %3\n\t"
  344. "add %0, %8, %0\n\t"
  345. "stxa %6, [%0+%8] %3\n\t"
  346. "membar #Sync\n\t"
  347. "stxa %%g0, [%7] %3\n\t"
  348. "membar #Sync\n\t"
  349. "mov 0x20, %%g1\n\t"
  350. "ldxa [%%g1] 0x7f, %%g0\n\t"
  351. "membar #Sync"
  352. : "=r" (tmp)
  353. : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
  354. "r" (data0), "r" (data1), "r" (data2), "r" (target),
  355. "r" (0x10), "0" (tmp)
  356. : "g1");
  357. /* NOTE: PSTATE_IE is still clear. */
  358. stuck = 100000;
  359. do {
  360. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  361. : "=r" (result)
  362. : "i" (ASI_INTR_DISPATCH_STAT));
  363. if (result == 0) {
  364. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  365. : : "r" (pstate));
  366. return;
  367. }
  368. stuck -= 1;
  369. if (stuck == 0)
  370. break;
  371. } while (result & 0x1);
  372. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  373. : : "r" (pstate));
  374. if (stuck == 0) {
  375. printk("CPU[%d]: mondo stuckage result[%016lx]\n",
  376. smp_processor_id(), result);
  377. } else {
  378. udelay(2);
  379. goto again;
  380. }
  381. }
  382. static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  383. {
  384. u64 pstate;
  385. int i;
  386. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  387. for_each_cpu_mask(i, mask)
  388. spitfire_xcall_helper(data0, data1, data2, pstate, i);
  389. }
  390. /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
  391. * packet, but we have no use for that. However we do take advantage of
  392. * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
  393. */
  394. static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  395. {
  396. u64 pstate, ver;
  397. int nack_busy_id, is_jbus, need_more;
  398. if (cpus_empty(mask))
  399. return;
  400. /* Unfortunately, someone at Sun had the brilliant idea to make the
  401. * busy/nack fields hard-coded by ITID number for this Ultra-III
  402. * derivative processor.
  403. */
  404. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  405. is_jbus = ((ver >> 32) == __JALAPENO_ID ||
  406. (ver >> 32) == __SERRANO_ID);
  407. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  408. retry:
  409. need_more = 0;
  410. __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
  411. : : "r" (pstate), "i" (PSTATE_IE));
  412. /* Setup the dispatch data registers. */
  413. __asm__ __volatile__("stxa %0, [%3] %6\n\t"
  414. "stxa %1, [%4] %6\n\t"
  415. "stxa %2, [%5] %6\n\t"
  416. "membar #Sync\n\t"
  417. : /* no outputs */
  418. : "r" (data0), "r" (data1), "r" (data2),
  419. "r" (0x40), "r" (0x50), "r" (0x60),
  420. "i" (ASI_INTR_W));
  421. nack_busy_id = 0;
  422. {
  423. int i;
  424. for_each_cpu_mask(i, mask) {
  425. u64 target = (i << 14) | 0x70;
  426. if (!is_jbus)
  427. target |= (nack_busy_id << 24);
  428. __asm__ __volatile__(
  429. "stxa %%g0, [%0] %1\n\t"
  430. "membar #Sync\n\t"
  431. : /* no outputs */
  432. : "r" (target), "i" (ASI_INTR_W));
  433. nack_busy_id++;
  434. if (nack_busy_id == 32) {
  435. need_more = 1;
  436. break;
  437. }
  438. }
  439. }
  440. /* Now, poll for completion. */
  441. {
  442. u64 dispatch_stat;
  443. long stuck;
  444. stuck = 100000 * nack_busy_id;
  445. do {
  446. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  447. : "=r" (dispatch_stat)
  448. : "i" (ASI_INTR_DISPATCH_STAT));
  449. if (dispatch_stat == 0UL) {
  450. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  451. : : "r" (pstate));
  452. if (unlikely(need_more)) {
  453. int i, cnt = 0;
  454. for_each_cpu_mask(i, mask) {
  455. cpu_clear(i, mask);
  456. cnt++;
  457. if (cnt == 32)
  458. break;
  459. }
  460. goto retry;
  461. }
  462. return;
  463. }
  464. if (!--stuck)
  465. break;
  466. } while (dispatch_stat & 0x5555555555555555UL);
  467. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  468. : : "r" (pstate));
  469. if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
  470. /* Busy bits will not clear, continue instead
  471. * of freezing up on this cpu.
  472. */
  473. printk("CPU[%d]: mondo stuckage result[%016lx]\n",
  474. smp_processor_id(), dispatch_stat);
  475. } else {
  476. int i, this_busy_nack = 0;
  477. /* Delay some random time with interrupts enabled
  478. * to prevent deadlock.
  479. */
  480. udelay(2 * nack_busy_id);
  481. /* Clear out the mask bits for cpus which did not
  482. * NACK us.
  483. */
  484. for_each_cpu_mask(i, mask) {
  485. u64 check_mask;
  486. if (is_jbus)
  487. check_mask = (0x2UL << (2*i));
  488. else
  489. check_mask = (0x2UL <<
  490. this_busy_nack);
  491. if ((dispatch_stat & check_mask) == 0)
  492. cpu_clear(i, mask);
  493. this_busy_nack += 2;
  494. if (this_busy_nack == 64)
  495. break;
  496. }
  497. goto retry;
  498. }
  499. }
  500. }
  501. /* Multi-cpu list version. */
  502. static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  503. {
  504. struct trap_per_cpu *tb;
  505. u16 *cpu_list;
  506. u64 *mondo;
  507. cpumask_t error_mask;
  508. unsigned long flags, status;
  509. int cnt, retries, this_cpu, prev_sent, i;
  510. if (cpus_empty(mask))
  511. return;
  512. /* We have to do this whole thing with interrupts fully disabled.
  513. * Otherwise if we send an xcall from interrupt context it will
  514. * corrupt both our mondo block and cpu list state.
  515. *
  516. * One consequence of this is that we cannot use timeout mechanisms
  517. * that depend upon interrupts being delivered locally. So, for
  518. * example, we cannot sample jiffies and expect it to advance.
  519. *
  520. * Fortunately, udelay() uses %stick/%tick so we can use that.
  521. */
  522. local_irq_save(flags);
  523. this_cpu = smp_processor_id();
  524. tb = &trap_block[this_cpu];
  525. mondo = __va(tb->cpu_mondo_block_pa);
  526. mondo[0] = data0;
  527. mondo[1] = data1;
  528. mondo[2] = data2;
  529. wmb();
  530. cpu_list = __va(tb->cpu_list_pa);
  531. /* Setup the initial cpu list. */
  532. cnt = 0;
  533. for_each_cpu_mask(i, mask)
  534. cpu_list[cnt++] = i;
  535. cpus_clear(error_mask);
  536. retries = 0;
  537. prev_sent = 0;
  538. do {
  539. int forward_progress, n_sent;
  540. status = sun4v_cpu_mondo_send(cnt,
  541. tb->cpu_list_pa,
  542. tb->cpu_mondo_block_pa);
  543. /* HV_EOK means all cpus received the xcall, we're done. */
  544. if (likely(status == HV_EOK))
  545. break;
  546. /* First, see if we made any forward progress.
  547. *
  548. * The hypervisor indicates successful sends by setting
  549. * cpu list entries to the value 0xffff.
  550. */
  551. n_sent = 0;
  552. for (i = 0; i < cnt; i++) {
  553. if (likely(cpu_list[i] == 0xffff))
  554. n_sent++;
  555. }
  556. forward_progress = 0;
  557. if (n_sent > prev_sent)
  558. forward_progress = 1;
  559. prev_sent = n_sent;
  560. /* If we get a HV_ECPUERROR, then one or more of the cpus
  561. * in the list are in error state. Use the cpu_state()
  562. * hypervisor call to find out which cpus are in error state.
  563. */
  564. if (unlikely(status == HV_ECPUERROR)) {
  565. for (i = 0; i < cnt; i++) {
  566. long err;
  567. u16 cpu;
  568. cpu = cpu_list[i];
  569. if (cpu == 0xffff)
  570. continue;
  571. err = sun4v_cpu_state(cpu);
  572. if (err >= 0 &&
  573. err == HV_CPU_STATE_ERROR) {
  574. cpu_list[i] = 0xffff;
  575. cpu_set(cpu, error_mask);
  576. }
  577. }
  578. } else if (unlikely(status != HV_EWOULDBLOCK))
  579. goto fatal_mondo_error;
  580. /* Don't bother rewriting the CPU list, just leave the
  581. * 0xffff and non-0xffff entries in there and the
  582. * hypervisor will do the right thing.
  583. *
  584. * Only advance timeout state if we didn't make any
  585. * forward progress.
  586. */
  587. if (unlikely(!forward_progress)) {
  588. if (unlikely(++retries > 10000))
  589. goto fatal_mondo_timeout;
  590. /* Delay a little bit to let other cpus catch up
  591. * on their cpu mondo queue work.
  592. */
  593. udelay(2 * cnt);
  594. }
  595. } while (1);
  596. local_irq_restore(flags);
  597. if (unlikely(!cpus_empty(error_mask)))
  598. goto fatal_mondo_cpu_error;
  599. return;
  600. fatal_mondo_cpu_error:
  601. printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
  602. "were in error state\n",
  603. this_cpu);
  604. printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
  605. for_each_cpu_mask(i, error_mask)
  606. printk("%d ", i);
  607. printk("]\n");
  608. return;
  609. fatal_mondo_timeout:
  610. local_irq_restore(flags);
  611. printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
  612. " progress after %d retries.\n",
  613. this_cpu, retries);
  614. goto dump_cpu_list_and_out;
  615. fatal_mondo_error:
  616. local_irq_restore(flags);
  617. printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
  618. this_cpu, status);
  619. printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
  620. "mondo_block_pa(%lx)\n",
  621. this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
  622. dump_cpu_list_and_out:
  623. printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
  624. for (i = 0; i < cnt; i++)
  625. printk("%u ", cpu_list[i]);
  626. printk("]\n");
  627. }
  628. /* Send cross call to all processors mentioned in MASK
  629. * except self.
  630. */
  631. static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
  632. {
  633. u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
  634. int this_cpu = get_cpu();
  635. cpus_and(mask, mask, cpu_online_map);
  636. cpu_clear(this_cpu, mask);
  637. if (tlb_type == spitfire)
  638. spitfire_xcall_deliver(data0, data1, data2, mask);
  639. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  640. cheetah_xcall_deliver(data0, data1, data2, mask);
  641. else
  642. hypervisor_xcall_deliver(data0, data1, data2, mask);
  643. /* NOTE: Caller runs local copy on master. */
  644. put_cpu();
  645. }
  646. extern unsigned long xcall_sync_tick;
  647. static void smp_start_sync_tick_client(int cpu)
  648. {
  649. cpumask_t mask = cpumask_of_cpu(cpu);
  650. smp_cross_call_masked(&xcall_sync_tick,
  651. 0, 0, 0, mask);
  652. }
  653. /* Send cross call to all processors except self. */
  654. #define smp_cross_call(func, ctx, data1, data2) \
  655. smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
  656. struct call_data_struct {
  657. void (*func) (void *info);
  658. void *info;
  659. atomic_t finished;
  660. int wait;
  661. };
  662. static struct call_data_struct *call_data;
  663. extern unsigned long xcall_call_function;
  664. /**
  665. * smp_call_function(): Run a function on all other CPUs.
  666. * @func: The function to run. This must be fast and non-blocking.
  667. * @info: An arbitrary pointer to pass to the function.
  668. * @nonatomic: currently unused.
  669. * @wait: If true, wait (atomically) until function has completed on other CPUs.
  670. *
  671. * Returns 0 on success, else a negative status code. Does not return until
  672. * remote CPUs are nearly ready to execute <<func>> or are or have executed.
  673. *
  674. * You must not call this function with disabled interrupts or from a
  675. * hardware interrupt handler or from a bottom half handler.
  676. */
  677. static int smp_call_function_mask(void (*func)(void *info), void *info,
  678. int nonatomic, int wait, cpumask_t mask)
  679. {
  680. struct call_data_struct data;
  681. int cpus;
  682. /* Can deadlock when called with interrupts disabled */
  683. WARN_ON(irqs_disabled());
  684. data.func = func;
  685. data.info = info;
  686. atomic_set(&data.finished, 0);
  687. data.wait = wait;
  688. spin_lock(&call_lock);
  689. cpu_clear(smp_processor_id(), mask);
  690. cpus = cpus_weight(mask);
  691. if (!cpus)
  692. goto out_unlock;
  693. call_data = &data;
  694. mb();
  695. smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
  696. /* Wait for response */
  697. while (atomic_read(&data.finished) != cpus)
  698. cpu_relax();
  699. out_unlock:
  700. spin_unlock(&call_lock);
  701. return 0;
  702. }
  703. int smp_call_function(void (*func)(void *info), void *info,
  704. int nonatomic, int wait)
  705. {
  706. return smp_call_function_mask(func, info, nonatomic, wait,
  707. cpu_online_map);
  708. }
  709. void smp_call_function_client(int irq, struct pt_regs *regs)
  710. {
  711. void (*func) (void *info) = call_data->func;
  712. void *info = call_data->info;
  713. clear_softint(1 << irq);
  714. if (call_data->wait) {
  715. /* let initiator proceed only after completion */
  716. func(info);
  717. atomic_inc(&call_data->finished);
  718. } else {
  719. /* let initiator proceed after getting data */
  720. atomic_inc(&call_data->finished);
  721. func(info);
  722. }
  723. }
  724. static void tsb_sync(void *info)
  725. {
  726. struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()];
  727. struct mm_struct *mm = info;
  728. /* It is not valid to test "currrent->active_mm == mm" here.
  729. *
  730. * The value of "current" is not changed atomically with
  731. * switch_mm(). But that's OK, we just need to check the
  732. * current cpu's trap block PGD physical address.
  733. */
  734. if (tp->pgd_paddr == __pa(mm->pgd))
  735. tsb_context_switch(mm);
  736. }
  737. void smp_tsb_sync(struct mm_struct *mm)
  738. {
  739. smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
  740. }
  741. extern unsigned long xcall_flush_tlb_mm;
  742. extern unsigned long xcall_flush_tlb_pending;
  743. extern unsigned long xcall_flush_tlb_kernel_range;
  744. extern unsigned long xcall_report_regs;
  745. extern unsigned long xcall_receive_signal;
  746. extern unsigned long xcall_new_mmu_context_version;
  747. #ifdef DCACHE_ALIASING_POSSIBLE
  748. extern unsigned long xcall_flush_dcache_page_cheetah;
  749. #endif
  750. extern unsigned long xcall_flush_dcache_page_spitfire;
  751. #ifdef CONFIG_DEBUG_DCFLUSH
  752. extern atomic_t dcpage_flushes;
  753. extern atomic_t dcpage_flushes_xcall;
  754. #endif
  755. static __inline__ void __local_flush_dcache_page(struct page *page)
  756. {
  757. #ifdef DCACHE_ALIASING_POSSIBLE
  758. __flush_dcache_page(page_address(page),
  759. ((tlb_type == spitfire) &&
  760. page_mapping(page) != NULL));
  761. #else
  762. if (page_mapping(page) != NULL &&
  763. tlb_type == spitfire)
  764. __flush_icache_page(__pa(page_address(page)));
  765. #endif
  766. }
  767. void smp_flush_dcache_page_impl(struct page *page, int cpu)
  768. {
  769. cpumask_t mask = cpumask_of_cpu(cpu);
  770. int this_cpu;
  771. if (tlb_type == hypervisor)
  772. return;
  773. #ifdef CONFIG_DEBUG_DCFLUSH
  774. atomic_inc(&dcpage_flushes);
  775. #endif
  776. this_cpu = get_cpu();
  777. if (cpu == this_cpu) {
  778. __local_flush_dcache_page(page);
  779. } else if (cpu_online(cpu)) {
  780. void *pg_addr = page_address(page);
  781. u64 data0;
  782. if (tlb_type == spitfire) {
  783. data0 =
  784. ((u64)&xcall_flush_dcache_page_spitfire);
  785. if (page_mapping(page) != NULL)
  786. data0 |= ((u64)1 << 32);
  787. spitfire_xcall_deliver(data0,
  788. __pa(pg_addr),
  789. (u64) pg_addr,
  790. mask);
  791. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  792. #ifdef DCACHE_ALIASING_POSSIBLE
  793. data0 =
  794. ((u64)&xcall_flush_dcache_page_cheetah);
  795. cheetah_xcall_deliver(data0,
  796. __pa(pg_addr),
  797. 0, mask);
  798. #endif
  799. }
  800. #ifdef CONFIG_DEBUG_DCFLUSH
  801. atomic_inc(&dcpage_flushes_xcall);
  802. #endif
  803. }
  804. put_cpu();
  805. }
  806. void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
  807. {
  808. void *pg_addr = page_address(page);
  809. cpumask_t mask = cpu_online_map;
  810. u64 data0;
  811. int this_cpu;
  812. if (tlb_type == hypervisor)
  813. return;
  814. this_cpu = get_cpu();
  815. cpu_clear(this_cpu, mask);
  816. #ifdef CONFIG_DEBUG_DCFLUSH
  817. atomic_inc(&dcpage_flushes);
  818. #endif
  819. if (cpus_empty(mask))
  820. goto flush_self;
  821. if (tlb_type == spitfire) {
  822. data0 = ((u64)&xcall_flush_dcache_page_spitfire);
  823. if (page_mapping(page) != NULL)
  824. data0 |= ((u64)1 << 32);
  825. spitfire_xcall_deliver(data0,
  826. __pa(pg_addr),
  827. (u64) pg_addr,
  828. mask);
  829. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  830. #ifdef DCACHE_ALIASING_POSSIBLE
  831. data0 = ((u64)&xcall_flush_dcache_page_cheetah);
  832. cheetah_xcall_deliver(data0,
  833. __pa(pg_addr),
  834. 0, mask);
  835. #endif
  836. }
  837. #ifdef CONFIG_DEBUG_DCFLUSH
  838. atomic_inc(&dcpage_flushes_xcall);
  839. #endif
  840. flush_self:
  841. __local_flush_dcache_page(page);
  842. put_cpu();
  843. }
  844. static void __smp_receive_signal_mask(cpumask_t mask)
  845. {
  846. smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
  847. }
  848. void smp_receive_signal(int cpu)
  849. {
  850. cpumask_t mask = cpumask_of_cpu(cpu);
  851. if (cpu_online(cpu))
  852. __smp_receive_signal_mask(mask);
  853. }
  854. void smp_receive_signal_client(int irq, struct pt_regs *regs)
  855. {
  856. clear_softint(1 << irq);
  857. }
  858. void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
  859. {
  860. struct mm_struct *mm;
  861. unsigned long flags;
  862. clear_softint(1 << irq);
  863. /* See if we need to allocate a new TLB context because
  864. * the version of the one we are using is now out of date.
  865. */
  866. mm = current->active_mm;
  867. if (unlikely(!mm || (mm == &init_mm)))
  868. return;
  869. spin_lock_irqsave(&mm->context.lock, flags);
  870. if (unlikely(!CTX_VALID(mm->context)))
  871. get_new_mmu_context(mm);
  872. spin_unlock_irqrestore(&mm->context.lock, flags);
  873. load_secondary_context(mm);
  874. __flush_tlb_mm(CTX_HWBITS(mm->context),
  875. SECONDARY_CONTEXT);
  876. }
  877. void smp_new_mmu_context_version(void)
  878. {
  879. smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
  880. }
  881. void smp_report_regs(void)
  882. {
  883. smp_cross_call(&xcall_report_regs, 0, 0, 0);
  884. }
  885. /* We know that the window frames of the user have been flushed
  886. * to the stack before we get here because all callers of us
  887. * are flush_tlb_*() routines, and these run after flush_cache_*()
  888. * which performs the flushw.
  889. *
  890. * The SMP TLB coherency scheme we use works as follows:
  891. *
  892. * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
  893. * space has (potentially) executed on, this is the heuristic
  894. * we use to avoid doing cross calls.
  895. *
  896. * Also, for flushing from kswapd and also for clones, we
  897. * use cpu_vm_mask as the list of cpus to make run the TLB.
  898. *
  899. * 2) TLB context numbers are shared globally across all processors
  900. * in the system, this allows us to play several games to avoid
  901. * cross calls.
  902. *
  903. * One invariant is that when a cpu switches to a process, and
  904. * that processes tsk->active_mm->cpu_vm_mask does not have the
  905. * current cpu's bit set, that tlb context is flushed locally.
  906. *
  907. * If the address space is non-shared (ie. mm->count == 1) we avoid
  908. * cross calls when we want to flush the currently running process's
  909. * tlb state. This is done by clearing all cpu bits except the current
  910. * processor's in current->active_mm->cpu_vm_mask and performing the
  911. * flush locally only. This will force any subsequent cpus which run
  912. * this task to flush the context from the local tlb if the process
  913. * migrates to another cpu (again).
  914. *
  915. * 3) For shared address spaces (threads) and swapping we bite the
  916. * bullet for most cases and perform the cross call (but only to
  917. * the cpus listed in cpu_vm_mask).
  918. *
  919. * The performance gain from "optimizing" away the cross call for threads is
  920. * questionable (in theory the big win for threads is the massive sharing of
  921. * address space state across processors).
  922. */
  923. /* This currently is only used by the hugetlb arch pre-fault
  924. * hook on UltraSPARC-III+ and later when changing the pagesize
  925. * bits of the context register for an address space.
  926. */
  927. void smp_flush_tlb_mm(struct mm_struct *mm)
  928. {
  929. u32 ctx = CTX_HWBITS(mm->context);
  930. int cpu = get_cpu();
  931. if (atomic_read(&mm->mm_users) == 1) {
  932. mm->cpu_vm_mask = cpumask_of_cpu(cpu);
  933. goto local_flush_and_out;
  934. }
  935. smp_cross_call_masked(&xcall_flush_tlb_mm,
  936. ctx, 0, 0,
  937. mm->cpu_vm_mask);
  938. local_flush_and_out:
  939. __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
  940. put_cpu();
  941. }
  942. void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
  943. {
  944. u32 ctx = CTX_HWBITS(mm->context);
  945. int cpu = get_cpu();
  946. if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
  947. mm->cpu_vm_mask = cpumask_of_cpu(cpu);
  948. else
  949. smp_cross_call_masked(&xcall_flush_tlb_pending,
  950. ctx, nr, (unsigned long) vaddrs,
  951. mm->cpu_vm_mask);
  952. __flush_tlb_pending(ctx, nr, vaddrs);
  953. put_cpu();
  954. }
  955. void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  956. {
  957. start &= PAGE_MASK;
  958. end = PAGE_ALIGN(end);
  959. if (start != end) {
  960. smp_cross_call(&xcall_flush_tlb_kernel_range,
  961. 0, start, end);
  962. __flush_tlb_kernel_range(start, end);
  963. }
  964. }
  965. /* CPU capture. */
  966. /* #define CAPTURE_DEBUG */
  967. extern unsigned long xcall_capture;
  968. static atomic_t smp_capture_depth = ATOMIC_INIT(0);
  969. static atomic_t smp_capture_registry = ATOMIC_INIT(0);
  970. static unsigned long penguins_are_doing_time;
  971. void smp_capture(void)
  972. {
  973. int result = atomic_add_ret(1, &smp_capture_depth);
  974. if (result == 1) {
  975. int ncpus = num_online_cpus();
  976. #ifdef CAPTURE_DEBUG
  977. printk("CPU[%d]: Sending penguins to jail...",
  978. smp_processor_id());
  979. #endif
  980. penguins_are_doing_time = 1;
  981. membar_storestore_loadstore();
  982. atomic_inc(&smp_capture_registry);
  983. smp_cross_call(&xcall_capture, 0, 0, 0);
  984. while (atomic_read(&smp_capture_registry) != ncpus)
  985. rmb();
  986. #ifdef CAPTURE_DEBUG
  987. printk("done\n");
  988. #endif
  989. }
  990. }
  991. void smp_release(void)
  992. {
  993. if (atomic_dec_and_test(&smp_capture_depth)) {
  994. #ifdef CAPTURE_DEBUG
  995. printk("CPU[%d]: Giving pardon to "
  996. "imprisoned penguins\n",
  997. smp_processor_id());
  998. #endif
  999. penguins_are_doing_time = 0;
  1000. membar_storeload_storestore();
  1001. atomic_dec(&smp_capture_registry);
  1002. }
  1003. }
  1004. /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
  1005. * can service tlb flush xcalls...
  1006. */
  1007. extern void prom_world(int);
  1008. void smp_penguin_jailcell(int irq, struct pt_regs *regs)
  1009. {
  1010. clear_softint(1 << irq);
  1011. preempt_disable();
  1012. __asm__ __volatile__("flushw");
  1013. prom_world(1);
  1014. atomic_inc(&smp_capture_registry);
  1015. membar_storeload_storestore();
  1016. while (penguins_are_doing_time)
  1017. rmb();
  1018. atomic_dec(&smp_capture_registry);
  1019. prom_world(0);
  1020. preempt_enable();
  1021. }
  1022. /* /proc/profile writes can call this, don't __init it please. */
  1023. int setup_profiling_timer(unsigned int multiplier)
  1024. {
  1025. return -EINVAL;
  1026. }
  1027. void __init smp_prepare_cpus(unsigned int max_cpus)
  1028. {
  1029. }
  1030. void __devinit smp_prepare_boot_cpu(void)
  1031. {
  1032. }
  1033. void __devinit smp_fill_in_sib_core_maps(void)
  1034. {
  1035. unsigned int i;
  1036. for_each_present_cpu(i) {
  1037. unsigned int j;
  1038. cpus_clear(cpu_core_map[i]);
  1039. if (cpu_data(i).core_id == 0) {
  1040. cpu_set(i, cpu_core_map[i]);
  1041. continue;
  1042. }
  1043. for_each_present_cpu(j) {
  1044. if (cpu_data(i).core_id ==
  1045. cpu_data(j).core_id)
  1046. cpu_set(j, cpu_core_map[i]);
  1047. }
  1048. }
  1049. for_each_present_cpu(i) {
  1050. unsigned int j;
  1051. cpus_clear(cpu_sibling_map[i]);
  1052. if (cpu_data(i).proc_id == -1) {
  1053. cpu_set(i, cpu_sibling_map[i]);
  1054. continue;
  1055. }
  1056. for_each_present_cpu(j) {
  1057. if (cpu_data(i).proc_id ==
  1058. cpu_data(j).proc_id)
  1059. cpu_set(j, cpu_sibling_map[i]);
  1060. }
  1061. }
  1062. }
  1063. int __cpuinit __cpu_up(unsigned int cpu)
  1064. {
  1065. int ret = smp_boot_one_cpu(cpu);
  1066. if (!ret) {
  1067. cpu_set(cpu, smp_commenced_mask);
  1068. while (!cpu_isset(cpu, cpu_online_map))
  1069. mb();
  1070. if (!cpu_isset(cpu, cpu_online_map)) {
  1071. ret = -ENODEV;
  1072. } else {
  1073. /* On SUN4V, writes to %tick and %stick are
  1074. * not allowed.
  1075. */
  1076. if (tlb_type != hypervisor)
  1077. smp_synchronize_one_tick(cpu);
  1078. }
  1079. }
  1080. return ret;
  1081. }
  1082. #ifdef CONFIG_HOTPLUG_CPU
  1083. void cpu_play_dead(void)
  1084. {
  1085. int cpu = smp_processor_id();
  1086. unsigned long pstate;
  1087. idle_task_exit();
  1088. if (tlb_type == hypervisor) {
  1089. struct trap_per_cpu *tb = &trap_block[cpu];
  1090. sun4v_cpu_qconf(HV_CPU_QUEUE_CPU_MONDO,
  1091. tb->cpu_mondo_pa, 0);
  1092. sun4v_cpu_qconf(HV_CPU_QUEUE_DEVICE_MONDO,
  1093. tb->dev_mondo_pa, 0);
  1094. sun4v_cpu_qconf(HV_CPU_QUEUE_RES_ERROR,
  1095. tb->resum_mondo_pa, 0);
  1096. sun4v_cpu_qconf(HV_CPU_QUEUE_NONRES_ERROR,
  1097. tb->nonresum_mondo_pa, 0);
  1098. }
  1099. cpu_clear(cpu, smp_commenced_mask);
  1100. membar_safe("#Sync");
  1101. local_irq_disable();
  1102. __asm__ __volatile__(
  1103. "rdpr %%pstate, %0\n\t"
  1104. "wrpr %0, %1, %%pstate"
  1105. : "=r" (pstate)
  1106. : "i" (PSTATE_IE));
  1107. while (1)
  1108. barrier();
  1109. }
  1110. int __cpu_disable(void)
  1111. {
  1112. int cpu = smp_processor_id();
  1113. cpuinfo_sparc *c;
  1114. int i;
  1115. for_each_cpu_mask(i, cpu_core_map[cpu])
  1116. cpu_clear(cpu, cpu_core_map[i]);
  1117. cpus_clear(cpu_core_map[cpu]);
  1118. for_each_cpu_mask(i, cpu_sibling_map[cpu])
  1119. cpu_clear(cpu, cpu_sibling_map[i]);
  1120. cpus_clear(cpu_sibling_map[cpu]);
  1121. c = &cpu_data(cpu);
  1122. c->core_id = 0;
  1123. c->proc_id = -1;
  1124. spin_lock(&call_lock);
  1125. cpu_clear(cpu, cpu_online_map);
  1126. spin_unlock(&call_lock);
  1127. smp_wmb();
  1128. /* Make sure no interrupts point to this cpu. */
  1129. fixup_irqs();
  1130. local_irq_enable();
  1131. mdelay(1);
  1132. local_irq_disable();
  1133. return 0;
  1134. }
  1135. void __cpu_die(unsigned int cpu)
  1136. {
  1137. int i;
  1138. for (i = 0; i < 100; i++) {
  1139. smp_rmb();
  1140. if (!cpu_isset(cpu, smp_commenced_mask))
  1141. break;
  1142. msleep(100);
  1143. }
  1144. if (cpu_isset(cpu, smp_commenced_mask)) {
  1145. printk(KERN_ERR "CPU %u didn't die...\n", cpu);
  1146. } else {
  1147. #if defined(CONFIG_SUN_LDOMS)
  1148. unsigned long hv_err;
  1149. int limit = 100;
  1150. do {
  1151. hv_err = sun4v_cpu_stop(cpu);
  1152. if (hv_err == HV_EOK) {
  1153. cpu_clear(cpu, cpu_present_map);
  1154. break;
  1155. }
  1156. } while (--limit > 0);
  1157. if (limit <= 0) {
  1158. printk(KERN_ERR "sun4v_cpu_stop() fails err=%lu\n",
  1159. hv_err);
  1160. }
  1161. #endif
  1162. }
  1163. }
  1164. #endif
  1165. void __init smp_cpus_done(unsigned int max_cpus)
  1166. {
  1167. }
  1168. void smp_send_reschedule(int cpu)
  1169. {
  1170. smp_receive_signal(cpu);
  1171. }
  1172. /* This is a nop because we capture all other cpus
  1173. * anyways when making the PROM active.
  1174. */
  1175. void smp_send_stop(void)
  1176. {
  1177. }
  1178. unsigned long __per_cpu_base __read_mostly;
  1179. unsigned long __per_cpu_shift __read_mostly;
  1180. EXPORT_SYMBOL(__per_cpu_base);
  1181. EXPORT_SYMBOL(__per_cpu_shift);
  1182. void __init real_setup_per_cpu_areas(void)
  1183. {
  1184. unsigned long goal, size, i;
  1185. char *ptr;
  1186. /* Copy section for each CPU (we discard the original) */
  1187. goal = PERCPU_ENOUGH_ROOM;
  1188. __per_cpu_shift = PAGE_SHIFT;
  1189. for (size = PAGE_SIZE; size < goal; size <<= 1UL)
  1190. __per_cpu_shift++;
  1191. ptr = alloc_bootmem_pages(size * NR_CPUS);
  1192. __per_cpu_base = ptr - __per_cpu_start;
  1193. for (i = 0; i < NR_CPUS; i++, ptr += size)
  1194. memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
  1195. /* Setup %g5 for the boot cpu. */
  1196. __local_per_cpu_offset = __per_cpu_offset(smp_processor_id());
  1197. }