smp.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419
  1. /* smp.c: Sparc64 SMP support.
  2. *
  3. * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
  4. */
  5. #include <linux/module.h>
  6. #include <linux/kernel.h>
  7. #include <linux/sched.h>
  8. #include <linux/mm.h>
  9. #include <linux/pagemap.h>
  10. #include <linux/threads.h>
  11. #include <linux/smp.h>
  12. #include <linux/smp_lock.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/delay.h>
  16. #include <linux/init.h>
  17. #include <linux/spinlock.h>
  18. #include <linux/fs.h>
  19. #include <linux/seq_file.h>
  20. #include <linux/cache.h>
  21. #include <linux/jiffies.h>
  22. #include <linux/profile.h>
  23. #include <linux/bootmem.h>
  24. #include <asm/head.h>
  25. #include <asm/ptrace.h>
  26. #include <asm/atomic.h>
  27. #include <asm/tlbflush.h>
  28. #include <asm/mmu_context.h>
  29. #include <asm/cpudata.h>
  30. #include <asm/irq.h>
  31. #include <asm/page.h>
  32. #include <asm/pgtable.h>
  33. #include <asm/oplib.h>
  34. #include <asm/uaccess.h>
  35. #include <asm/timer.h>
  36. #include <asm/starfire.h>
  37. #include <asm/tlb.h>
  38. #include <asm/sections.h>
  39. extern void calibrate_delay(void);
  40. /* Please don't make this stuff initdata!!! --DaveM */
  41. static unsigned char boot_cpu_id;
  42. cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
  43. cpumask_t phys_cpu_present_map __read_mostly = CPU_MASK_NONE;
  44. static cpumask_t smp_commenced_mask;
  45. static cpumask_t cpu_callout_map;
  46. void smp_info(struct seq_file *m)
  47. {
  48. int i;
  49. seq_printf(m, "State:\n");
  50. for (i = 0; i < NR_CPUS; i++) {
  51. if (cpu_online(i))
  52. seq_printf(m,
  53. "CPU%d:\t\tonline\n", i);
  54. }
  55. }
  56. void smp_bogo(struct seq_file *m)
  57. {
  58. int i;
  59. for (i = 0; i < NR_CPUS; i++)
  60. if (cpu_online(i))
  61. seq_printf(m,
  62. "Cpu%dBogo\t: %lu.%02lu\n"
  63. "Cpu%dClkTck\t: %016lx\n",
  64. i, cpu_data(i).udelay_val / (500000/HZ),
  65. (cpu_data(i).udelay_val / (5000/HZ)) % 100,
  66. i, cpu_data(i).clock_tick);
  67. }
  68. void __init smp_store_cpu_info(int id)
  69. {
  70. int cpu_node, def;
  71. /* multiplier and counter set by
  72. smp_setup_percpu_timer() */
  73. cpu_data(id).udelay_val = loops_per_jiffy;
  74. cpu_find_by_mid(id, &cpu_node);
  75. cpu_data(id).clock_tick = prom_getintdefault(cpu_node,
  76. "clock-frequency", 0);
  77. def = ((tlb_type == hypervisor) ? (8 * 1024) : (16 * 1024));
  78. cpu_data(id).dcache_size = prom_getintdefault(cpu_node, "dcache-size",
  79. def);
  80. def = 32;
  81. cpu_data(id).dcache_line_size =
  82. prom_getintdefault(cpu_node, "dcache-line-size", def);
  83. def = 16 * 1024;
  84. cpu_data(id).icache_size = prom_getintdefault(cpu_node, "icache-size",
  85. def);
  86. def = 32;
  87. cpu_data(id).icache_line_size =
  88. prom_getintdefault(cpu_node, "icache-line-size", def);
  89. def = ((tlb_type == hypervisor) ?
  90. (3 * 1024 * 1024) :
  91. (4 * 1024 * 1024));
  92. cpu_data(id).ecache_size = prom_getintdefault(cpu_node, "ecache-size",
  93. def);
  94. def = 64;
  95. cpu_data(id).ecache_line_size =
  96. prom_getintdefault(cpu_node, "ecache-line-size", def);
  97. printk("CPU[%d]: Caches "
  98. "D[sz(%d):line_sz(%d)] "
  99. "I[sz(%d):line_sz(%d)] "
  100. "E[sz(%d):line_sz(%d)]\n",
  101. id,
  102. cpu_data(id).dcache_size, cpu_data(id).dcache_line_size,
  103. cpu_data(id).icache_size, cpu_data(id).icache_line_size,
  104. cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
  105. }
  106. static void smp_setup_percpu_timer(void);
  107. static volatile unsigned long callin_flag = 0;
  108. void __init smp_callin(void)
  109. {
  110. int cpuid = hard_smp_processor_id();
  111. __local_per_cpu_offset = __per_cpu_offset(cpuid);
  112. if (tlb_type == hypervisor)
  113. sun4v_ktsb_register();
  114. __flush_tlb_all();
  115. smp_setup_percpu_timer();
  116. if (cheetah_pcache_forced_on)
  117. cheetah_enable_pcache();
  118. local_irq_enable();
  119. calibrate_delay();
  120. smp_store_cpu_info(cpuid);
  121. callin_flag = 1;
  122. __asm__ __volatile__("membar #Sync\n\t"
  123. "flush %%g6" : : : "memory");
  124. /* Clear this or we will die instantly when we
  125. * schedule back to this idler...
  126. */
  127. current_thread_info()->new_child = 0;
  128. /* Attach to the address space of init_task. */
  129. atomic_inc(&init_mm.mm_count);
  130. current->active_mm = &init_mm;
  131. while (!cpu_isset(cpuid, smp_commenced_mask))
  132. rmb();
  133. cpu_set(cpuid, cpu_online_map);
  134. /* idle thread is expected to have preempt disabled */
  135. preempt_disable();
  136. }
  137. void cpu_panic(void)
  138. {
  139. printk("CPU[%d]: Returns from cpu_idle!\n", smp_processor_id());
  140. panic("SMP bolixed\n");
  141. }
  142. static unsigned long current_tick_offset __read_mostly;
  143. /* This tick register synchronization scheme is taken entirely from
  144. * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
  145. *
  146. * The only change I've made is to rework it so that the master
  147. * initiates the synchonization instead of the slave. -DaveM
  148. */
  149. #define MASTER 0
  150. #define SLAVE (SMP_CACHE_BYTES/sizeof(unsigned long))
  151. #define NUM_ROUNDS 64 /* magic value */
  152. #define NUM_ITERS 5 /* likewise */
  153. static DEFINE_SPINLOCK(itc_sync_lock);
  154. static unsigned long go[SLAVE + 1];
  155. #define DEBUG_TICK_SYNC 0
  156. static inline long get_delta (long *rt, long *master)
  157. {
  158. unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
  159. unsigned long tcenter, t0, t1, tm;
  160. unsigned long i;
  161. for (i = 0; i < NUM_ITERS; i++) {
  162. t0 = tick_ops->get_tick();
  163. go[MASTER] = 1;
  164. membar_storeload();
  165. while (!(tm = go[SLAVE]))
  166. rmb();
  167. go[SLAVE] = 0;
  168. wmb();
  169. t1 = tick_ops->get_tick();
  170. if (t1 - t0 < best_t1 - best_t0)
  171. best_t0 = t0, best_t1 = t1, best_tm = tm;
  172. }
  173. *rt = best_t1 - best_t0;
  174. *master = best_tm - best_t0;
  175. /* average best_t0 and best_t1 without overflow: */
  176. tcenter = (best_t0/2 + best_t1/2);
  177. if (best_t0 % 2 + best_t1 % 2 == 2)
  178. tcenter++;
  179. return tcenter - best_tm;
  180. }
  181. void smp_synchronize_tick_client(void)
  182. {
  183. long i, delta, adj, adjust_latency = 0, done = 0;
  184. unsigned long flags, rt, master_time_stamp, bound;
  185. #if DEBUG_TICK_SYNC
  186. struct {
  187. long rt; /* roundtrip time */
  188. long master; /* master's timestamp */
  189. long diff; /* difference between midpoint and master's timestamp */
  190. long lat; /* estimate of itc adjustment latency */
  191. } t[NUM_ROUNDS];
  192. #endif
  193. go[MASTER] = 1;
  194. while (go[MASTER])
  195. rmb();
  196. local_irq_save(flags);
  197. {
  198. for (i = 0; i < NUM_ROUNDS; i++) {
  199. delta = get_delta(&rt, &master_time_stamp);
  200. if (delta == 0) {
  201. done = 1; /* let's lock on to this... */
  202. bound = rt;
  203. }
  204. if (!done) {
  205. if (i > 0) {
  206. adjust_latency += -delta;
  207. adj = -delta + adjust_latency/4;
  208. } else
  209. adj = -delta;
  210. tick_ops->add_tick(adj, current_tick_offset);
  211. }
  212. #if DEBUG_TICK_SYNC
  213. t[i].rt = rt;
  214. t[i].master = master_time_stamp;
  215. t[i].diff = delta;
  216. t[i].lat = adjust_latency/4;
  217. #endif
  218. }
  219. }
  220. local_irq_restore(flags);
  221. #if DEBUG_TICK_SYNC
  222. for (i = 0; i < NUM_ROUNDS; i++)
  223. printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
  224. t[i].rt, t[i].master, t[i].diff, t[i].lat);
  225. #endif
  226. printk(KERN_INFO "CPU %d: synchronized TICK with master CPU (last diff %ld cycles,"
  227. "maxerr %lu cycles)\n", smp_processor_id(), delta, rt);
  228. }
  229. static void smp_start_sync_tick_client(int cpu);
  230. static void smp_synchronize_one_tick(int cpu)
  231. {
  232. unsigned long flags, i;
  233. go[MASTER] = 0;
  234. smp_start_sync_tick_client(cpu);
  235. /* wait for client to be ready */
  236. while (!go[MASTER])
  237. rmb();
  238. /* now let the client proceed into his loop */
  239. go[MASTER] = 0;
  240. membar_storeload();
  241. spin_lock_irqsave(&itc_sync_lock, flags);
  242. {
  243. for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) {
  244. while (!go[MASTER])
  245. rmb();
  246. go[MASTER] = 0;
  247. wmb();
  248. go[SLAVE] = tick_ops->get_tick();
  249. membar_storeload();
  250. }
  251. }
  252. spin_unlock_irqrestore(&itc_sync_lock, flags);
  253. }
  254. extern void sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load);
  255. extern unsigned long sparc64_cpu_startup;
  256. /* The OBP cpu startup callback truncates the 3rd arg cookie to
  257. * 32-bits (I think) so to be safe we have it read the pointer
  258. * contained here so we work on >4GB machines. -DaveM
  259. */
  260. static struct thread_info *cpu_new_thread = NULL;
  261. static int __devinit smp_boot_one_cpu(unsigned int cpu)
  262. {
  263. unsigned long entry =
  264. (unsigned long)(&sparc64_cpu_startup);
  265. unsigned long cookie =
  266. (unsigned long)(&cpu_new_thread);
  267. struct task_struct *p;
  268. int timeout, ret;
  269. p = fork_idle(cpu);
  270. callin_flag = 0;
  271. cpu_new_thread = task_thread_info(p);
  272. cpu_set(cpu, cpu_callout_map);
  273. if (tlb_type == hypervisor) {
  274. /* Alloc the mondo queues, cpu will load them. */
  275. sun4v_init_mondo_queues(0, cpu, 1, 0);
  276. prom_startcpu_cpuid(cpu, entry, cookie);
  277. } else {
  278. int cpu_node;
  279. cpu_find_by_mid(cpu, &cpu_node);
  280. prom_startcpu(cpu_node, entry, cookie);
  281. }
  282. for (timeout = 0; timeout < 5000000; timeout++) {
  283. if (callin_flag)
  284. break;
  285. udelay(100);
  286. }
  287. if (callin_flag) {
  288. ret = 0;
  289. } else {
  290. printk("Processor %d is stuck.\n", cpu);
  291. cpu_clear(cpu, cpu_callout_map);
  292. ret = -ENODEV;
  293. }
  294. cpu_new_thread = NULL;
  295. return ret;
  296. }
  297. static void spitfire_xcall_helper(u64 data0, u64 data1, u64 data2, u64 pstate, unsigned long cpu)
  298. {
  299. u64 result, target;
  300. int stuck, tmp;
  301. if (this_is_starfire) {
  302. /* map to real upaid */
  303. cpu = (((cpu & 0x3c) << 1) |
  304. ((cpu & 0x40) >> 4) |
  305. (cpu & 0x3));
  306. }
  307. target = (cpu << 14) | 0x70;
  308. again:
  309. /* Ok, this is the real Spitfire Errata #54.
  310. * One must read back from a UDB internal register
  311. * after writes to the UDB interrupt dispatch, but
  312. * before the membar Sync for that write.
  313. * So we use the high UDB control register (ASI 0x7f,
  314. * ADDR 0x20) for the dummy read. -DaveM
  315. */
  316. tmp = 0x40;
  317. __asm__ __volatile__(
  318. "wrpr %1, %2, %%pstate\n\t"
  319. "stxa %4, [%0] %3\n\t"
  320. "stxa %5, [%0+%8] %3\n\t"
  321. "add %0, %8, %0\n\t"
  322. "stxa %6, [%0+%8] %3\n\t"
  323. "membar #Sync\n\t"
  324. "stxa %%g0, [%7] %3\n\t"
  325. "membar #Sync\n\t"
  326. "mov 0x20, %%g1\n\t"
  327. "ldxa [%%g1] 0x7f, %%g0\n\t"
  328. "membar #Sync"
  329. : "=r" (tmp)
  330. : "r" (pstate), "i" (PSTATE_IE), "i" (ASI_INTR_W),
  331. "r" (data0), "r" (data1), "r" (data2), "r" (target),
  332. "r" (0x10), "0" (tmp)
  333. : "g1");
  334. /* NOTE: PSTATE_IE is still clear. */
  335. stuck = 100000;
  336. do {
  337. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  338. : "=r" (result)
  339. : "i" (ASI_INTR_DISPATCH_STAT));
  340. if (result == 0) {
  341. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  342. : : "r" (pstate));
  343. return;
  344. }
  345. stuck -= 1;
  346. if (stuck == 0)
  347. break;
  348. } while (result & 0x1);
  349. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  350. : : "r" (pstate));
  351. if (stuck == 0) {
  352. printk("CPU[%d]: mondo stuckage result[%016lx]\n",
  353. smp_processor_id(), result);
  354. } else {
  355. udelay(2);
  356. goto again;
  357. }
  358. }
  359. static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  360. {
  361. u64 pstate;
  362. int i;
  363. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  364. for_each_cpu_mask(i, mask)
  365. spitfire_xcall_helper(data0, data1, data2, pstate, i);
  366. }
  367. /* Cheetah now allows to send the whole 64-bytes of data in the interrupt
  368. * packet, but we have no use for that. However we do take advantage of
  369. * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
  370. */
  371. static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  372. {
  373. u64 pstate, ver;
  374. int nack_busy_id, is_jbus;
  375. if (cpus_empty(mask))
  376. return;
  377. /* Unfortunately, someone at Sun had the brilliant idea to make the
  378. * busy/nack fields hard-coded by ITID number for this Ultra-III
  379. * derivative processor.
  380. */
  381. __asm__ ("rdpr %%ver, %0" : "=r" (ver));
  382. is_jbus = ((ver >> 32) == __JALAPENO_ID ||
  383. (ver >> 32) == __SERRANO_ID);
  384. __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
  385. retry:
  386. __asm__ __volatile__("wrpr %0, %1, %%pstate\n\t"
  387. : : "r" (pstate), "i" (PSTATE_IE));
  388. /* Setup the dispatch data registers. */
  389. __asm__ __volatile__("stxa %0, [%3] %6\n\t"
  390. "stxa %1, [%4] %6\n\t"
  391. "stxa %2, [%5] %6\n\t"
  392. "membar #Sync\n\t"
  393. : /* no outputs */
  394. : "r" (data0), "r" (data1), "r" (data2),
  395. "r" (0x40), "r" (0x50), "r" (0x60),
  396. "i" (ASI_INTR_W));
  397. nack_busy_id = 0;
  398. {
  399. int i;
  400. for_each_cpu_mask(i, mask) {
  401. u64 target = (i << 14) | 0x70;
  402. if (!is_jbus)
  403. target |= (nack_busy_id << 24);
  404. __asm__ __volatile__(
  405. "stxa %%g0, [%0] %1\n\t"
  406. "membar #Sync\n\t"
  407. : /* no outputs */
  408. : "r" (target), "i" (ASI_INTR_W));
  409. nack_busy_id++;
  410. }
  411. }
  412. /* Now, poll for completion. */
  413. {
  414. u64 dispatch_stat;
  415. long stuck;
  416. stuck = 100000 * nack_busy_id;
  417. do {
  418. __asm__ __volatile__("ldxa [%%g0] %1, %0"
  419. : "=r" (dispatch_stat)
  420. : "i" (ASI_INTR_DISPATCH_STAT));
  421. if (dispatch_stat == 0UL) {
  422. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  423. : : "r" (pstate));
  424. return;
  425. }
  426. if (!--stuck)
  427. break;
  428. } while (dispatch_stat & 0x5555555555555555UL);
  429. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  430. : : "r" (pstate));
  431. if ((dispatch_stat & ~(0x5555555555555555UL)) == 0) {
  432. /* Busy bits will not clear, continue instead
  433. * of freezing up on this cpu.
  434. */
  435. printk("CPU[%d]: mondo stuckage result[%016lx]\n",
  436. smp_processor_id(), dispatch_stat);
  437. } else {
  438. int i, this_busy_nack = 0;
  439. /* Delay some random time with interrupts enabled
  440. * to prevent deadlock.
  441. */
  442. udelay(2 * nack_busy_id);
  443. /* Clear out the mask bits for cpus which did not
  444. * NACK us.
  445. */
  446. for_each_cpu_mask(i, mask) {
  447. u64 check_mask;
  448. if (is_jbus)
  449. check_mask = (0x2UL << (2*i));
  450. else
  451. check_mask = (0x2UL <<
  452. this_busy_nack);
  453. if ((dispatch_stat & check_mask) == 0)
  454. cpu_clear(i, mask);
  455. this_busy_nack += 2;
  456. }
  457. goto retry;
  458. }
  459. }
  460. }
  461. /* Multi-cpu list version. */
  462. static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
  463. {
  464. struct trap_per_cpu *tb;
  465. u16 *cpu_list;
  466. u64 *mondo;
  467. cpumask_t error_mask;
  468. unsigned long flags, status;
  469. int cnt, retries, this_cpu, prev_sent, i;
  470. /* We have to do this whole thing with interrupts fully disabled.
  471. * Otherwise if we send an xcall from interrupt context it will
  472. * corrupt both our mondo block and cpu list state.
  473. *
  474. * One consequence of this is that we cannot use timeout mechanisms
  475. * that depend upon interrupts being delivered locally. So, for
  476. * example, we cannot sample jiffies and expect it to advance.
  477. *
  478. * Fortunately, udelay() uses %stick/%tick so we can use that.
  479. */
  480. local_irq_save(flags);
  481. this_cpu = smp_processor_id();
  482. tb = &trap_block[this_cpu];
  483. mondo = __va(tb->cpu_mondo_block_pa);
  484. mondo[0] = data0;
  485. mondo[1] = data1;
  486. mondo[2] = data2;
  487. wmb();
  488. cpu_list = __va(tb->cpu_list_pa);
  489. /* Setup the initial cpu list. */
  490. cnt = 0;
  491. for_each_cpu_mask(i, mask)
  492. cpu_list[cnt++] = i;
  493. cpus_clear(error_mask);
  494. retries = 0;
  495. prev_sent = 0;
  496. do {
  497. int forward_progress, n_sent;
  498. status = sun4v_cpu_mondo_send(cnt,
  499. tb->cpu_list_pa,
  500. tb->cpu_mondo_block_pa);
  501. /* HV_EOK means all cpus received the xcall, we're done. */
  502. if (likely(status == HV_EOK))
  503. break;
  504. /* First, see if we made any forward progress.
  505. *
  506. * The hypervisor indicates successful sends by setting
  507. * cpu list entries to the value 0xffff.
  508. */
  509. n_sent = 0;
  510. for (i = 0; i < cnt; i++) {
  511. if (likely(cpu_list[i] == 0xffff))
  512. n_sent++;
  513. }
  514. forward_progress = 0;
  515. if (n_sent > prev_sent)
  516. forward_progress = 1;
  517. prev_sent = n_sent;
  518. /* If we get a HV_ECPUERROR, then one or more of the cpus
  519. * in the list are in error state. Use the cpu_state()
  520. * hypervisor call to find out which cpus are in error state.
  521. */
  522. if (unlikely(status == HV_ECPUERROR)) {
  523. for (i = 0; i < cnt; i++) {
  524. long err;
  525. u16 cpu;
  526. cpu = cpu_list[i];
  527. if (cpu == 0xffff)
  528. continue;
  529. err = sun4v_cpu_state(cpu);
  530. if (err >= 0 &&
  531. err == HV_CPU_STATE_ERROR) {
  532. cpu_list[i] = 0xffff;
  533. cpu_set(cpu, error_mask);
  534. }
  535. }
  536. } else if (unlikely(status != HV_EWOULDBLOCK))
  537. goto fatal_mondo_error;
  538. /* Don't bother rewriting the CPU list, just leave the
  539. * 0xffff and non-0xffff entries in there and the
  540. * hypervisor will do the right thing.
  541. *
  542. * Only advance timeout state if we didn't make any
  543. * forward progress.
  544. */
  545. if (unlikely(!forward_progress)) {
  546. if (unlikely(++retries > 10000))
  547. goto fatal_mondo_timeout;
  548. /* Delay a little bit to let other cpus catch up
  549. * on their cpu mondo queue work.
  550. */
  551. udelay(2 * cnt);
  552. }
  553. } while (1);
  554. local_irq_restore(flags);
  555. if (unlikely(!cpus_empty(error_mask)))
  556. goto fatal_mondo_cpu_error;
  557. return;
  558. fatal_mondo_cpu_error:
  559. printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
  560. "were in error state\n",
  561. this_cpu);
  562. printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
  563. for_each_cpu_mask(i, error_mask)
  564. printk("%d ", i);
  565. printk("]\n");
  566. return;
  567. fatal_mondo_timeout:
  568. local_irq_restore(flags);
  569. printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
  570. " progress after %d retries.\n",
  571. this_cpu, retries);
  572. goto dump_cpu_list_and_out;
  573. fatal_mondo_error:
  574. local_irq_restore(flags);
  575. printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
  576. this_cpu, status);
  577. printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
  578. "mondo_block_pa(%lx)\n",
  579. this_cpu, cnt, tb->cpu_list_pa, tb->cpu_mondo_block_pa);
  580. dump_cpu_list_and_out:
  581. printk(KERN_CRIT "CPU[%d]: CPU list [ ", this_cpu);
  582. for (i = 0; i < cnt; i++)
  583. printk("%u ", cpu_list[i]);
  584. printk("]\n");
  585. }
  586. /* Send cross call to all processors mentioned in MASK
  587. * except self.
  588. */
  589. static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, cpumask_t mask)
  590. {
  591. u64 data0 = (((u64)ctx)<<32 | (((u64)func) & 0xffffffff));
  592. int this_cpu = get_cpu();
  593. cpus_and(mask, mask, cpu_online_map);
  594. cpu_clear(this_cpu, mask);
  595. if (tlb_type == spitfire)
  596. spitfire_xcall_deliver(data0, data1, data2, mask);
  597. else if (tlb_type == cheetah || tlb_type == cheetah_plus)
  598. cheetah_xcall_deliver(data0, data1, data2, mask);
  599. else
  600. hypervisor_xcall_deliver(data0, data1, data2, mask);
  601. /* NOTE: Caller runs local copy on master. */
  602. put_cpu();
  603. }
  604. extern unsigned long xcall_sync_tick;
  605. static void smp_start_sync_tick_client(int cpu)
  606. {
  607. cpumask_t mask = cpumask_of_cpu(cpu);
  608. smp_cross_call_masked(&xcall_sync_tick,
  609. 0, 0, 0, mask);
  610. }
  611. /* Send cross call to all processors except self. */
  612. #define smp_cross_call(func, ctx, data1, data2) \
  613. smp_cross_call_masked(func, ctx, data1, data2, cpu_online_map)
  614. struct call_data_struct {
  615. void (*func) (void *info);
  616. void *info;
  617. atomic_t finished;
  618. int wait;
  619. };
  620. static DEFINE_SPINLOCK(call_lock);
  621. static struct call_data_struct *call_data;
  622. extern unsigned long xcall_call_function;
  623. /*
  624. * You must not call this function with disabled interrupts or from a
  625. * hardware interrupt handler or from a bottom half handler.
  626. */
  627. static int smp_call_function_mask(void (*func)(void *info), void *info,
  628. int nonatomic, int wait, cpumask_t mask)
  629. {
  630. struct call_data_struct data;
  631. int cpus;
  632. long timeout;
  633. /* Can deadlock when called with interrupts disabled */
  634. WARN_ON(irqs_disabled());
  635. data.func = func;
  636. data.info = info;
  637. atomic_set(&data.finished, 0);
  638. data.wait = wait;
  639. spin_lock(&call_lock);
  640. cpu_clear(smp_processor_id(), mask);
  641. cpus = cpus_weight(mask);
  642. if (!cpus)
  643. goto out_unlock;
  644. call_data = &data;
  645. smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask);
  646. /*
  647. * Wait for other cpus to complete function or at
  648. * least snap the call data.
  649. */
  650. timeout = 1000000;
  651. while (atomic_read(&data.finished) != cpus) {
  652. if (--timeout <= 0)
  653. goto out_timeout;
  654. barrier();
  655. udelay(1);
  656. }
  657. out_unlock:
  658. spin_unlock(&call_lock);
  659. return 0;
  660. out_timeout:
  661. spin_unlock(&call_lock);
  662. printk("XCALL: Remote cpus not responding, ncpus=%d finished=%d\n",
  663. cpus, atomic_read(&data.finished));
  664. return 0;
  665. }
  666. int smp_call_function(void (*func)(void *info), void *info,
  667. int nonatomic, int wait)
  668. {
  669. return smp_call_function_mask(func, info, nonatomic, wait,
  670. cpu_online_map);
  671. }
  672. void smp_call_function_client(int irq, struct pt_regs *regs)
  673. {
  674. void (*func) (void *info) = call_data->func;
  675. void *info = call_data->info;
  676. clear_softint(1 << irq);
  677. if (call_data->wait) {
  678. /* let initiator proceed only after completion */
  679. func(info);
  680. atomic_inc(&call_data->finished);
  681. } else {
  682. /* let initiator proceed after getting data */
  683. atomic_inc(&call_data->finished);
  684. func(info);
  685. }
  686. }
  687. static void tsb_sync(void *info)
  688. {
  689. struct mm_struct *mm = info;
  690. if (current->active_mm == mm)
  691. tsb_context_switch(mm);
  692. }
  693. void smp_tsb_sync(struct mm_struct *mm)
  694. {
  695. smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask);
  696. }
  697. extern unsigned long xcall_flush_tlb_mm;
  698. extern unsigned long xcall_flush_tlb_pending;
  699. extern unsigned long xcall_flush_tlb_kernel_range;
  700. extern unsigned long xcall_report_regs;
  701. extern unsigned long xcall_receive_signal;
  702. extern unsigned long xcall_new_mmu_context_version;
  703. #ifdef DCACHE_ALIASING_POSSIBLE
  704. extern unsigned long xcall_flush_dcache_page_cheetah;
  705. #endif
  706. extern unsigned long xcall_flush_dcache_page_spitfire;
  707. #ifdef CONFIG_DEBUG_DCFLUSH
  708. extern atomic_t dcpage_flushes;
  709. extern atomic_t dcpage_flushes_xcall;
  710. #endif
  711. static __inline__ void __local_flush_dcache_page(struct page *page)
  712. {
  713. #ifdef DCACHE_ALIASING_POSSIBLE
  714. __flush_dcache_page(page_address(page),
  715. ((tlb_type == spitfire) &&
  716. page_mapping(page) != NULL));
  717. #else
  718. if (page_mapping(page) != NULL &&
  719. tlb_type == spitfire)
  720. __flush_icache_page(__pa(page_address(page)));
  721. #endif
  722. }
  723. void smp_flush_dcache_page_impl(struct page *page, int cpu)
  724. {
  725. cpumask_t mask = cpumask_of_cpu(cpu);
  726. int this_cpu;
  727. if (tlb_type == hypervisor)
  728. return;
  729. #ifdef CONFIG_DEBUG_DCFLUSH
  730. atomic_inc(&dcpage_flushes);
  731. #endif
  732. this_cpu = get_cpu();
  733. if (cpu == this_cpu) {
  734. __local_flush_dcache_page(page);
  735. } else if (cpu_online(cpu)) {
  736. void *pg_addr = page_address(page);
  737. u64 data0;
  738. if (tlb_type == spitfire) {
  739. data0 =
  740. ((u64)&xcall_flush_dcache_page_spitfire);
  741. if (page_mapping(page) != NULL)
  742. data0 |= ((u64)1 << 32);
  743. spitfire_xcall_deliver(data0,
  744. __pa(pg_addr),
  745. (u64) pg_addr,
  746. mask);
  747. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  748. #ifdef DCACHE_ALIASING_POSSIBLE
  749. data0 =
  750. ((u64)&xcall_flush_dcache_page_cheetah);
  751. cheetah_xcall_deliver(data0,
  752. __pa(pg_addr),
  753. 0, mask);
  754. #endif
  755. }
  756. #ifdef CONFIG_DEBUG_DCFLUSH
  757. atomic_inc(&dcpage_flushes_xcall);
  758. #endif
  759. }
  760. put_cpu();
  761. }
  762. void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
  763. {
  764. void *pg_addr = page_address(page);
  765. cpumask_t mask = cpu_online_map;
  766. u64 data0;
  767. int this_cpu;
  768. if (tlb_type == hypervisor)
  769. return;
  770. this_cpu = get_cpu();
  771. cpu_clear(this_cpu, mask);
  772. #ifdef CONFIG_DEBUG_DCFLUSH
  773. atomic_inc(&dcpage_flushes);
  774. #endif
  775. if (cpus_empty(mask))
  776. goto flush_self;
  777. if (tlb_type == spitfire) {
  778. data0 = ((u64)&xcall_flush_dcache_page_spitfire);
  779. if (page_mapping(page) != NULL)
  780. data0 |= ((u64)1 << 32);
  781. spitfire_xcall_deliver(data0,
  782. __pa(pg_addr),
  783. (u64) pg_addr,
  784. mask);
  785. } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
  786. #ifdef DCACHE_ALIASING_POSSIBLE
  787. data0 = ((u64)&xcall_flush_dcache_page_cheetah);
  788. cheetah_xcall_deliver(data0,
  789. __pa(pg_addr),
  790. 0, mask);
  791. #endif
  792. }
  793. #ifdef CONFIG_DEBUG_DCFLUSH
  794. atomic_inc(&dcpage_flushes_xcall);
  795. #endif
  796. flush_self:
  797. __local_flush_dcache_page(page);
  798. put_cpu();
  799. }
  800. static void __smp_receive_signal_mask(cpumask_t mask)
  801. {
  802. smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask);
  803. }
  804. void smp_receive_signal(int cpu)
  805. {
  806. cpumask_t mask = cpumask_of_cpu(cpu);
  807. if (cpu_online(cpu))
  808. __smp_receive_signal_mask(mask);
  809. }
  810. void smp_receive_signal_client(int irq, struct pt_regs *regs)
  811. {
  812. clear_softint(1 << irq);
  813. }
  814. void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs)
  815. {
  816. struct mm_struct *mm;
  817. unsigned long flags;
  818. clear_softint(1 << irq);
  819. /* See if we need to allocate a new TLB context because
  820. * the version of the one we are using is now out of date.
  821. */
  822. mm = current->active_mm;
  823. if (unlikely(!mm || (mm == &init_mm)))
  824. return;
  825. spin_lock_irqsave(&mm->context.lock, flags);
  826. if (unlikely(!CTX_VALID(mm->context)))
  827. get_new_mmu_context(mm);
  828. spin_unlock_irqrestore(&mm->context.lock, flags);
  829. load_secondary_context(mm);
  830. __flush_tlb_mm(CTX_HWBITS(mm->context),
  831. SECONDARY_CONTEXT);
  832. }
  833. void smp_new_mmu_context_version(void)
  834. {
  835. smp_cross_call(&xcall_new_mmu_context_version, 0, 0, 0);
  836. }
  837. void smp_report_regs(void)
  838. {
  839. smp_cross_call(&xcall_report_regs, 0, 0, 0);
  840. }
  841. /* We know that the window frames of the user have been flushed
  842. * to the stack before we get here because all callers of us
  843. * are flush_tlb_*() routines, and these run after flush_cache_*()
  844. * which performs the flushw.
  845. *
  846. * The SMP TLB coherency scheme we use works as follows:
  847. *
  848. * 1) mm->cpu_vm_mask is a bit mask of which cpus an address
  849. * space has (potentially) executed on, this is the heuristic
  850. * we use to avoid doing cross calls.
  851. *
  852. * Also, for flushing from kswapd and also for clones, we
  853. * use cpu_vm_mask as the list of cpus to make run the TLB.
  854. *
  855. * 2) TLB context numbers are shared globally across all processors
  856. * in the system, this allows us to play several games to avoid
  857. * cross calls.
  858. *
  859. * One invariant is that when a cpu switches to a process, and
  860. * that processes tsk->active_mm->cpu_vm_mask does not have the
  861. * current cpu's bit set, that tlb context is flushed locally.
  862. *
  863. * If the address space is non-shared (ie. mm->count == 1) we avoid
  864. * cross calls when we want to flush the currently running process's
  865. * tlb state. This is done by clearing all cpu bits except the current
  866. * processor's in current->active_mm->cpu_vm_mask and performing the
  867. * flush locally only. This will force any subsequent cpus which run
  868. * this task to flush the context from the local tlb if the process
  869. * migrates to another cpu (again).
  870. *
  871. * 3) For shared address spaces (threads) and swapping we bite the
  872. * bullet for most cases and perform the cross call (but only to
  873. * the cpus listed in cpu_vm_mask).
  874. *
  875. * The performance gain from "optimizing" away the cross call for threads is
  876. * questionable (in theory the big win for threads is the massive sharing of
  877. * address space state across processors).
  878. */
  879. /* This currently is only used by the hugetlb arch pre-fault
  880. * hook on UltraSPARC-III+ and later when changing the pagesize
  881. * bits of the context register for an address space.
  882. */
  883. void smp_flush_tlb_mm(struct mm_struct *mm)
  884. {
  885. u32 ctx = CTX_HWBITS(mm->context);
  886. int cpu = get_cpu();
  887. if (atomic_read(&mm->mm_users) == 1) {
  888. mm->cpu_vm_mask = cpumask_of_cpu(cpu);
  889. goto local_flush_and_out;
  890. }
  891. smp_cross_call_masked(&xcall_flush_tlb_mm,
  892. ctx, 0, 0,
  893. mm->cpu_vm_mask);
  894. local_flush_and_out:
  895. __flush_tlb_mm(ctx, SECONDARY_CONTEXT);
  896. put_cpu();
  897. }
  898. void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
  899. {
  900. u32 ctx = CTX_HWBITS(mm->context);
  901. int cpu = get_cpu();
  902. if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
  903. mm->cpu_vm_mask = cpumask_of_cpu(cpu);
  904. else
  905. smp_cross_call_masked(&xcall_flush_tlb_pending,
  906. ctx, nr, (unsigned long) vaddrs,
  907. mm->cpu_vm_mask);
  908. __flush_tlb_pending(ctx, nr, vaddrs);
  909. put_cpu();
  910. }
  911. void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
  912. {
  913. start &= PAGE_MASK;
  914. end = PAGE_ALIGN(end);
  915. if (start != end) {
  916. smp_cross_call(&xcall_flush_tlb_kernel_range,
  917. 0, start, end);
  918. __flush_tlb_kernel_range(start, end);
  919. }
  920. }
  921. /* CPU capture. */
  922. /* #define CAPTURE_DEBUG */
  923. extern unsigned long xcall_capture;
  924. static atomic_t smp_capture_depth = ATOMIC_INIT(0);
  925. static atomic_t smp_capture_registry = ATOMIC_INIT(0);
  926. static unsigned long penguins_are_doing_time;
  927. void smp_capture(void)
  928. {
  929. int result = atomic_add_ret(1, &smp_capture_depth);
  930. if (result == 1) {
  931. int ncpus = num_online_cpus();
  932. #ifdef CAPTURE_DEBUG
  933. printk("CPU[%d]: Sending penguins to jail...",
  934. smp_processor_id());
  935. #endif
  936. penguins_are_doing_time = 1;
  937. membar_storestore_loadstore();
  938. atomic_inc(&smp_capture_registry);
  939. smp_cross_call(&xcall_capture, 0, 0, 0);
  940. while (atomic_read(&smp_capture_registry) != ncpus)
  941. rmb();
  942. #ifdef CAPTURE_DEBUG
  943. printk("done\n");
  944. #endif
  945. }
  946. }
  947. void smp_release(void)
  948. {
  949. if (atomic_dec_and_test(&smp_capture_depth)) {
  950. #ifdef CAPTURE_DEBUG
  951. printk("CPU[%d]: Giving pardon to "
  952. "imprisoned penguins\n",
  953. smp_processor_id());
  954. #endif
  955. penguins_are_doing_time = 0;
  956. membar_storeload_storestore();
  957. atomic_dec(&smp_capture_registry);
  958. }
  959. }
  960. /* Imprisoned penguins run with %pil == 15, but PSTATE_IE set, so they
  961. * can service tlb flush xcalls...
  962. */
  963. extern void prom_world(int);
  964. void smp_penguin_jailcell(int irq, struct pt_regs *regs)
  965. {
  966. clear_softint(1 << irq);
  967. preempt_disable();
  968. __asm__ __volatile__("flushw");
  969. prom_world(1);
  970. atomic_inc(&smp_capture_registry);
  971. membar_storeload_storestore();
  972. while (penguins_are_doing_time)
  973. rmb();
  974. atomic_dec(&smp_capture_registry);
  975. prom_world(0);
  976. preempt_enable();
  977. }
  978. #define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
  979. #define prof_counter(__cpu) cpu_data(__cpu).counter
  980. void smp_percpu_timer_interrupt(struct pt_regs *regs)
  981. {
  982. unsigned long compare, tick, pstate;
  983. int cpu = smp_processor_id();
  984. int user = user_mode(regs);
  985. /*
  986. * Check for level 14 softint.
  987. */
  988. {
  989. unsigned long tick_mask = tick_ops->softint_mask;
  990. if (!(get_softint() & tick_mask)) {
  991. extern void handler_irq(int, struct pt_regs *);
  992. handler_irq(14, regs);
  993. return;
  994. }
  995. clear_softint(tick_mask);
  996. }
  997. do {
  998. profile_tick(CPU_PROFILING, regs);
  999. if (!--prof_counter(cpu)) {
  1000. irq_enter();
  1001. if (cpu == boot_cpu_id) {
  1002. kstat_this_cpu.irqs[0]++;
  1003. timer_tick_interrupt(regs);
  1004. }
  1005. update_process_times(user);
  1006. irq_exit();
  1007. prof_counter(cpu) = prof_multiplier(cpu);
  1008. }
  1009. /* Guarantee that the following sequences execute
  1010. * uninterrupted.
  1011. */
  1012. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  1013. "wrpr %0, %1, %%pstate"
  1014. : "=r" (pstate)
  1015. : "i" (PSTATE_IE));
  1016. compare = tick_ops->add_compare(current_tick_offset);
  1017. tick = tick_ops->get_tick();
  1018. /* Restore PSTATE_IE. */
  1019. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  1020. : /* no outputs */
  1021. : "r" (pstate));
  1022. } while (time_after_eq(tick, compare));
  1023. }
  1024. static void __init smp_setup_percpu_timer(void)
  1025. {
  1026. int cpu = smp_processor_id();
  1027. unsigned long pstate;
  1028. prof_counter(cpu) = prof_multiplier(cpu) = 1;
  1029. /* Guarantee that the following sequences execute
  1030. * uninterrupted.
  1031. */
  1032. __asm__ __volatile__("rdpr %%pstate, %0\n\t"
  1033. "wrpr %0, %1, %%pstate"
  1034. : "=r" (pstate)
  1035. : "i" (PSTATE_IE));
  1036. tick_ops->init_tick(current_tick_offset);
  1037. /* Restore PSTATE_IE. */
  1038. __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
  1039. : /* no outputs */
  1040. : "r" (pstate));
  1041. }
  1042. void __init smp_tick_init(void)
  1043. {
  1044. boot_cpu_id = hard_smp_processor_id();
  1045. current_tick_offset = timer_tick_offset;
  1046. cpu_set(boot_cpu_id, cpu_online_map);
  1047. prof_counter(boot_cpu_id) = prof_multiplier(boot_cpu_id) = 1;
  1048. }
  1049. /* /proc/profile writes can call this, don't __init it please. */
  1050. static DEFINE_SPINLOCK(prof_setup_lock);
  1051. int setup_profiling_timer(unsigned int multiplier)
  1052. {
  1053. unsigned long flags;
  1054. int i;
  1055. if ((!multiplier) || (timer_tick_offset / multiplier) < 1000)
  1056. return -EINVAL;
  1057. spin_lock_irqsave(&prof_setup_lock, flags);
  1058. for (i = 0; i < NR_CPUS; i++)
  1059. prof_multiplier(i) = multiplier;
  1060. current_tick_offset = (timer_tick_offset / multiplier);
  1061. spin_unlock_irqrestore(&prof_setup_lock, flags);
  1062. return 0;
  1063. }
  1064. /* Constrain the number of cpus to max_cpus. */
  1065. void __init smp_prepare_cpus(unsigned int max_cpus)
  1066. {
  1067. if (num_possible_cpus() > max_cpus) {
  1068. int instance, mid;
  1069. instance = 0;
  1070. while (!cpu_find_by_instance(instance, NULL, &mid)) {
  1071. if (mid != boot_cpu_id) {
  1072. cpu_clear(mid, phys_cpu_present_map);
  1073. if (num_possible_cpus() <= max_cpus)
  1074. break;
  1075. }
  1076. instance++;
  1077. }
  1078. }
  1079. smp_store_cpu_info(boot_cpu_id);
  1080. }
  1081. /* Set this up early so that things like the scheduler can init
  1082. * properly. We use the same cpu mask for both the present and
  1083. * possible cpu map.
  1084. */
  1085. void __init smp_setup_cpu_possible_map(void)
  1086. {
  1087. int instance, mid;
  1088. instance = 0;
  1089. while (!cpu_find_by_instance(instance, NULL, &mid)) {
  1090. if (mid < NR_CPUS)
  1091. cpu_set(mid, phys_cpu_present_map);
  1092. instance++;
  1093. }
  1094. }
  1095. void __devinit smp_prepare_boot_cpu(void)
  1096. {
  1097. int cpu = hard_smp_processor_id();
  1098. if (cpu >= NR_CPUS) {
  1099. prom_printf("Serious problem, boot cpu id >= NR_CPUS\n");
  1100. prom_halt();
  1101. }
  1102. current_thread_info()->cpu = cpu;
  1103. __local_per_cpu_offset = __per_cpu_offset(cpu);
  1104. cpu_set(smp_processor_id(), cpu_online_map);
  1105. cpu_set(smp_processor_id(), phys_cpu_present_map);
  1106. }
  1107. int __devinit __cpu_up(unsigned int cpu)
  1108. {
  1109. int ret = smp_boot_one_cpu(cpu);
  1110. if (!ret) {
  1111. cpu_set(cpu, smp_commenced_mask);
  1112. while (!cpu_isset(cpu, cpu_online_map))
  1113. mb();
  1114. if (!cpu_isset(cpu, cpu_online_map)) {
  1115. ret = -ENODEV;
  1116. } else {
  1117. /* On SUN4V, writes to %tick and %stick are
  1118. * not allowed.
  1119. */
  1120. if (tlb_type != hypervisor)
  1121. smp_synchronize_one_tick(cpu);
  1122. }
  1123. }
  1124. return ret;
  1125. }
  1126. void __init smp_cpus_done(unsigned int max_cpus)
  1127. {
  1128. unsigned long bogosum = 0;
  1129. int i;
  1130. for (i = 0; i < NR_CPUS; i++) {
  1131. if (cpu_online(i))
  1132. bogosum += cpu_data(i).udelay_val;
  1133. }
  1134. printk("Total of %ld processors activated "
  1135. "(%lu.%02lu BogoMIPS).\n",
  1136. (long) num_online_cpus(),
  1137. bogosum/(500000/HZ),
  1138. (bogosum/(5000/HZ))%100);
  1139. }
  1140. void smp_send_reschedule(int cpu)
  1141. {
  1142. smp_receive_signal(cpu);
  1143. }
  1144. /* This is a nop because we capture all other cpus
  1145. * anyways when making the PROM active.
  1146. */
  1147. void smp_send_stop(void)
  1148. {
  1149. }
  1150. unsigned long __per_cpu_base __read_mostly;
  1151. unsigned long __per_cpu_shift __read_mostly;
  1152. EXPORT_SYMBOL(__per_cpu_base);
  1153. EXPORT_SYMBOL(__per_cpu_shift);
  1154. void __init setup_per_cpu_areas(void)
  1155. {
  1156. unsigned long goal, size, i;
  1157. char *ptr;
  1158. /* Copy section for each CPU (we discard the original) */
  1159. goal = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
  1160. #ifdef CONFIG_MODULES
  1161. if (goal < PERCPU_ENOUGH_ROOM)
  1162. goal = PERCPU_ENOUGH_ROOM;
  1163. #endif
  1164. __per_cpu_shift = 0;
  1165. for (size = 1UL; size < goal; size <<= 1UL)
  1166. __per_cpu_shift++;
  1167. ptr = alloc_bootmem(size * NR_CPUS);
  1168. __per_cpu_base = ptr - __per_cpu_start;
  1169. for (i = 0; i < NR_CPUS; i++, ptr += size)
  1170. memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
  1171. }