smp_32.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717
  1. /*
  2. * Intel SMP support routines.
  3. *
  4. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  5. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  6. *
  7. * This code is released under the GNU General Public License version 2 or
  8. * later.
  9. */
  10. #include <linux/init.h>
  11. #include <linux/mm.h>
  12. #include <linux/delay.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/kernel_stat.h>
  15. #include <linux/mc146818rtc.h>
  16. #include <linux/cache.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/cpu.h>
  19. #include <linux/module.h>
  20. #include <asm/mtrr.h>
  21. #include <asm/tlbflush.h>
  22. #include <asm/mmu_context.h>
  23. #include <mach_apic.h>
  24. /*
  25. * Some notes on x86 processor bugs affecting SMP operation:
  26. *
  27. * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
  28. * The Linux implications for SMP are handled as follows:
  29. *
  30. * Pentium III / [Xeon]
  31. * None of the E1AP-E3AP errata are visible to the user.
  32. *
  33. * E1AP. see PII A1AP
  34. * E2AP. see PII A2AP
  35. * E3AP. see PII A3AP
  36. *
  37. * Pentium II / [Xeon]
  38. * None of the A1AP-A3AP errata are visible to the user.
  39. *
  40. * A1AP. see PPro 1AP
  41. * A2AP. see PPro 2AP
  42. * A3AP. see PPro 7AP
  43. *
  44. * Pentium Pro
  45. * None of 1AP-9AP errata are visible to the normal user,
  46. * except occasional delivery of 'spurious interrupt' as trap #15.
  47. * This is very rare and a non-problem.
  48. *
  49. * 1AP. Linux maps APIC as non-cacheable
  50. * 2AP. worked around in hardware
  51. * 3AP. fixed in C0 and above steppings microcode update.
  52. * Linux does not use excessive STARTUP_IPIs.
  53. * 4AP. worked around in hardware
  54. * 5AP. symmetric IO mode (normal Linux operation) not affected.
  55. * 'noapic' mode has vector 0xf filled out properly.
  56. * 6AP. 'noapic' mode might be affected - fixed in later steppings
  57. * 7AP. We do not assume writes to the LVT deassering IRQs
  58. * 8AP. We do not enable low power mode (deep sleep) during MP bootup
  59. * 9AP. We do not use mixed mode
  60. *
  61. * Pentium
  62. * There is a marginal case where REP MOVS on 100MHz SMP
  63. * machines with B stepping processors can fail. XXX should provide
  64. * an L1cache=Writethrough or L1cache=off option.
  65. *
  66. * B stepping CPUs may hang. There are hardware work arounds
  67. * for this. We warn about it in case your board doesn't have the work
  68. * arounds. Basically that's so I can tell anyone with a B stepping
  69. * CPU and SMP problems "tough".
  70. *
  71. * Specific items [From Pentium Processor Specification Update]
  72. *
  73. * 1AP. Linux doesn't use remote read
  74. * 2AP. Linux doesn't trust APIC errors
  75. * 3AP. We work around this
  76. * 4AP. Linux never generated 3 interrupts of the same priority
  77. * to cause a lost local interrupt.
  78. * 5AP. Remote read is never used
  79. * 6AP. not affected - worked around in hardware
  80. * 7AP. not affected - worked around in hardware
  81. * 8AP. worked around in hardware - we get explicit CS errors if not
  82. * 9AP. only 'noapic' mode affected. Might generate spurious
  83. * interrupts, we log only the first one and count the
  84. * rest silently.
  85. * 10AP. not affected - worked around in hardware
  86. * 11AP. Linux reads the APIC between writes to avoid this, as per
  87. * the documentation. Make sure you preserve this as it affects
  88. * the C stepping chips too.
  89. * 12AP. not affected - worked around in hardware
  90. * 13AP. not affected - worked around in hardware
  91. * 14AP. we always deassert INIT during bootup
  92. * 15AP. not affected - worked around in hardware
  93. * 16AP. not affected - worked around in hardware
  94. * 17AP. not affected - worked around in hardware
  95. * 18AP. not affected - worked around in hardware
  96. * 19AP. not affected - worked around in BIOS
  97. *
  98. * If this sounds worrying believe me these bugs are either ___RARE___,
  99. * or are signal timing bugs worked around in hardware and there's
  100. * about nothing of note with C stepping upwards.
  101. */
  102. DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
  103. /*
  104. * the following functions deal with sending IPIs between CPUs.
  105. *
  106. * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
  107. */
  108. static inline int __prepare_ICR (unsigned int shortcut, int vector)
  109. {
  110. unsigned int icr = shortcut | APIC_DEST_LOGICAL;
  111. switch (vector) {
  112. default:
  113. icr |= APIC_DM_FIXED | vector;
  114. break;
  115. case NMI_VECTOR:
  116. icr |= APIC_DM_NMI;
  117. break;
  118. }
  119. return icr;
  120. }
  121. static inline int __prepare_ICR2 (unsigned int mask)
  122. {
  123. return SET_APIC_DEST_FIELD(mask);
  124. }
  125. void __send_IPI_shortcut(unsigned int shortcut, int vector)
  126. {
  127. /*
  128. * Subtle. In the case of the 'never do double writes' workaround
  129. * we have to lock out interrupts to be safe. As we don't care
  130. * of the value read we use an atomic rmw access to avoid costly
  131. * cli/sti. Otherwise we use an even cheaper single atomic write
  132. * to the APIC.
  133. */
  134. unsigned int cfg;
  135. /*
  136. * Wait for idle.
  137. */
  138. apic_wait_icr_idle();
  139. /*
  140. * No need to touch the target chip field
  141. */
  142. cfg = __prepare_ICR(shortcut, vector);
  143. /*
  144. * Send the IPI. The write to APIC_ICR fires this off.
  145. */
  146. apic_write_around(APIC_ICR, cfg);
  147. }
  148. void fastcall send_IPI_self(int vector)
  149. {
  150. __send_IPI_shortcut(APIC_DEST_SELF, vector);
  151. }
  152. /*
  153. * This is used to send an IPI with no shorthand notation (the destination is
  154. * specified in bits 56 to 63 of the ICR).
  155. */
  156. static inline void __send_IPI_dest_field(unsigned long mask, int vector)
  157. {
  158. unsigned long cfg;
  159. /*
  160. * Wait for idle.
  161. */
  162. if (unlikely(vector == NMI_VECTOR))
  163. safe_apic_wait_icr_idle();
  164. else
  165. apic_wait_icr_idle();
  166. /*
  167. * prepare target chip field
  168. */
  169. cfg = __prepare_ICR2(mask);
  170. apic_write_around(APIC_ICR2, cfg);
  171. /*
  172. * program the ICR
  173. */
  174. cfg = __prepare_ICR(0, vector);
  175. /*
  176. * Send the IPI. The write to APIC_ICR fires this off.
  177. */
  178. apic_write_around(APIC_ICR, cfg);
  179. }
  180. /*
  181. * This is only used on smaller machines.
  182. */
  183. void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
  184. {
  185. unsigned long mask = cpus_addr(cpumask)[0];
  186. unsigned long flags;
  187. local_irq_save(flags);
  188. WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
  189. __send_IPI_dest_field(mask, vector);
  190. local_irq_restore(flags);
  191. }
  192. void send_IPI_mask_sequence(cpumask_t mask, int vector)
  193. {
  194. unsigned long flags;
  195. unsigned int query_cpu;
  196. /*
  197. * Hack. The clustered APIC addressing mode doesn't allow us to send
  198. * to an arbitrary mask, so I do a unicasts to each CPU instead. This
  199. * should be modified to do 1 message per cluster ID - mbligh
  200. */
  201. local_irq_save(flags);
  202. for (query_cpu = 0; query_cpu < NR_CPUS; ++query_cpu) {
  203. if (cpu_isset(query_cpu, mask)) {
  204. __send_IPI_dest_field(cpu_to_logical_apicid(query_cpu),
  205. vector);
  206. }
  207. }
  208. local_irq_restore(flags);
  209. }
  210. #include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
  211. /*
  212. * Smarter SMP flushing macros.
  213. * c/o Linus Torvalds.
  214. *
  215. * These mean you can really definitely utterly forget about
  216. * writing to user space from interrupts. (Its not allowed anyway).
  217. *
  218. * Optimizations Manfred Spraul <manfred@colorfullife.com>
  219. */
  220. static cpumask_t flush_cpumask;
  221. static struct mm_struct * flush_mm;
  222. static unsigned long flush_va;
  223. static DEFINE_SPINLOCK(tlbstate_lock);
  224. /*
  225. * We cannot call mmdrop() because we are in interrupt context,
  226. * instead update mm->cpu_vm_mask.
  227. *
  228. * We need to reload %cr3 since the page tables may be going
  229. * away from under us..
  230. */
  231. void leave_mm(unsigned long cpu)
  232. {
  233. if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
  234. BUG();
  235. cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
  236. load_cr3(swapper_pg_dir);
  237. }
  238. /*
  239. *
  240. * The flush IPI assumes that a thread switch happens in this order:
  241. * [cpu0: the cpu that switches]
  242. * 1) switch_mm() either 1a) or 1b)
  243. * 1a) thread switch to a different mm
  244. * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
  245. * Stop ipi delivery for the old mm. This is not synchronized with
  246. * the other cpus, but smp_invalidate_interrupt ignore flush ipis
  247. * for the wrong mm, and in the worst case we perform a superfluous
  248. * tlb flush.
  249. * 1a2) set cpu_tlbstate to TLBSTATE_OK
  250. * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
  251. * was in lazy tlb mode.
  252. * 1a3) update cpu_tlbstate[].active_mm
  253. * Now cpu0 accepts tlb flushes for the new mm.
  254. * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
  255. * Now the other cpus will send tlb flush ipis.
  256. * 1a4) change cr3.
  257. * 1b) thread switch without mm change
  258. * cpu_tlbstate[].active_mm is correct, cpu0 already handles
  259. * flush ipis.
  260. * 1b1) set cpu_tlbstate to TLBSTATE_OK
  261. * 1b2) test_and_set the cpu bit in cpu_vm_mask.
  262. * Atomically set the bit [other cpus will start sending flush ipis],
  263. * and test the bit.
  264. * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
  265. * 2) switch %%esp, ie current
  266. *
  267. * The interrupt must handle 2 special cases:
  268. * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
  269. * - the cpu performs speculative tlb reads, i.e. even if the cpu only
  270. * runs in kernel space, the cpu could load tlb entries for user space
  271. * pages.
  272. *
  273. * The good news is that cpu_tlbstate is local to each cpu, no
  274. * write/read ordering problems.
  275. */
  276. /*
  277. * TLB flush IPI:
  278. *
  279. * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
  280. * 2) Leave the mm if we are in the lazy tlb mode.
  281. */
  282. fastcall void smp_invalidate_interrupt(struct pt_regs *regs)
  283. {
  284. unsigned long cpu;
  285. cpu = get_cpu();
  286. if (!cpu_isset(cpu, flush_cpumask))
  287. goto out;
  288. /*
  289. * This was a BUG() but until someone can quote me the
  290. * line from the intel manual that guarantees an IPI to
  291. * multiple CPUs is retried _only_ on the erroring CPUs
  292. * its staying as a return
  293. *
  294. * BUG();
  295. */
  296. if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
  297. if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
  298. if (flush_va == TLB_FLUSH_ALL)
  299. local_flush_tlb();
  300. else
  301. __flush_tlb_one(flush_va);
  302. } else
  303. leave_mm(cpu);
  304. }
  305. ack_APIC_irq();
  306. smp_mb__before_clear_bit();
  307. cpu_clear(cpu, flush_cpumask);
  308. smp_mb__after_clear_bit();
  309. out:
  310. put_cpu_no_resched();
  311. __get_cpu_var(irq_stat).irq_tlb_count++;
  312. }
  313. void native_flush_tlb_others(const cpumask_t *cpumaskp, struct mm_struct *mm,
  314. unsigned long va)
  315. {
  316. cpumask_t cpumask = *cpumaskp;
  317. /*
  318. * A couple of (to be removed) sanity checks:
  319. *
  320. * - current CPU must not be in mask
  321. * - mask must exist :)
  322. */
  323. BUG_ON(cpus_empty(cpumask));
  324. BUG_ON(cpu_isset(smp_processor_id(), cpumask));
  325. BUG_ON(!mm);
  326. #ifdef CONFIG_HOTPLUG_CPU
  327. /* If a CPU which we ran on has gone down, OK. */
  328. cpus_and(cpumask, cpumask, cpu_online_map);
  329. if (unlikely(cpus_empty(cpumask)))
  330. return;
  331. #endif
  332. /*
  333. * i'm not happy about this global shared spinlock in the
  334. * MM hot path, but we'll see how contended it is.
  335. * AK: x86-64 has a faster method that could be ported.
  336. */
  337. spin_lock(&tlbstate_lock);
  338. flush_mm = mm;
  339. flush_va = va;
  340. cpus_or(flush_cpumask, cpumask, flush_cpumask);
  341. /*
  342. * We have to send the IPI only to
  343. * CPUs affected.
  344. */
  345. send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
  346. while (!cpus_empty(flush_cpumask))
  347. /* nothing. lockup detection does not belong here */
  348. cpu_relax();
  349. flush_mm = NULL;
  350. flush_va = 0;
  351. spin_unlock(&tlbstate_lock);
  352. }
  353. void flush_tlb_current_task(void)
  354. {
  355. struct mm_struct *mm = current->mm;
  356. cpumask_t cpu_mask;
  357. preempt_disable();
  358. cpu_mask = mm->cpu_vm_mask;
  359. cpu_clear(smp_processor_id(), cpu_mask);
  360. local_flush_tlb();
  361. if (!cpus_empty(cpu_mask))
  362. flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
  363. preempt_enable();
  364. }
  365. void flush_tlb_mm (struct mm_struct * mm)
  366. {
  367. cpumask_t cpu_mask;
  368. preempt_disable();
  369. cpu_mask = mm->cpu_vm_mask;
  370. cpu_clear(smp_processor_id(), cpu_mask);
  371. if (current->active_mm == mm) {
  372. if (current->mm)
  373. local_flush_tlb();
  374. else
  375. leave_mm(smp_processor_id());
  376. }
  377. if (!cpus_empty(cpu_mask))
  378. flush_tlb_others(cpu_mask, mm, TLB_FLUSH_ALL);
  379. preempt_enable();
  380. }
  381. void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
  382. {
  383. struct mm_struct *mm = vma->vm_mm;
  384. cpumask_t cpu_mask;
  385. preempt_disable();
  386. cpu_mask = mm->cpu_vm_mask;
  387. cpu_clear(smp_processor_id(), cpu_mask);
  388. if (current->active_mm == mm) {
  389. if(current->mm)
  390. __flush_tlb_one(va);
  391. else
  392. leave_mm(smp_processor_id());
  393. }
  394. if (!cpus_empty(cpu_mask))
  395. flush_tlb_others(cpu_mask, mm, va);
  396. preempt_enable();
  397. }
  398. EXPORT_SYMBOL(flush_tlb_page);
  399. static void do_flush_tlb_all(void* info)
  400. {
  401. unsigned long cpu = smp_processor_id();
  402. __flush_tlb_all();
  403. if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
  404. leave_mm(cpu);
  405. }
  406. void flush_tlb_all(void)
  407. {
  408. on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
  409. }
  410. /*
  411. * this function sends a 'reschedule' IPI to another CPU.
  412. * it goes straight through and wastes no time serializing
  413. * anything. Worst case is that we lose a reschedule ...
  414. */
  415. static void native_smp_send_reschedule(int cpu)
  416. {
  417. WARN_ON(cpu_is_offline(cpu));
  418. send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
  419. }
  420. /*
  421. * Structure and data for smp_call_function(). This is designed to minimise
  422. * static memory requirements. It also looks cleaner.
  423. */
  424. static DEFINE_SPINLOCK(call_lock);
  425. struct call_data_struct {
  426. void (*func) (void *info);
  427. void *info;
  428. atomic_t started;
  429. atomic_t finished;
  430. int wait;
  431. };
  432. void lock_ipi_call_lock(void)
  433. {
  434. spin_lock_irq(&call_lock);
  435. }
  436. void unlock_ipi_call_lock(void)
  437. {
  438. spin_unlock_irq(&call_lock);
  439. }
  440. static struct call_data_struct *call_data;
  441. static void __smp_call_function(void (*func) (void *info), void *info,
  442. int nonatomic, int wait)
  443. {
  444. struct call_data_struct data;
  445. int cpus = num_online_cpus() - 1;
  446. if (!cpus)
  447. return;
  448. data.func = func;
  449. data.info = info;
  450. atomic_set(&data.started, 0);
  451. data.wait = wait;
  452. if (wait)
  453. atomic_set(&data.finished, 0);
  454. call_data = &data;
  455. mb();
  456. /* Send a message to all other CPUs and wait for them to respond */
  457. send_IPI_allbutself(CALL_FUNCTION_VECTOR);
  458. /* Wait for response */
  459. while (atomic_read(&data.started) != cpus)
  460. cpu_relax();
  461. if (wait)
  462. while (atomic_read(&data.finished) != cpus)
  463. cpu_relax();
  464. }
  465. /**
  466. * smp_call_function_mask(): Run a function on a set of other CPUs.
  467. * @mask: The set of cpus to run on. Must not include the current cpu.
  468. * @func: The function to run. This must be fast and non-blocking.
  469. * @info: An arbitrary pointer to pass to the function.
  470. * @wait: If true, wait (atomically) until function has completed on other CPUs.
  471. *
  472. * Returns 0 on success, else a negative status code.
  473. *
  474. * If @wait is true, then returns once @func has returned; otherwise
  475. * it returns just before the target cpu calls @func.
  476. *
  477. * You must not call this function with disabled interrupts or from a
  478. * hardware interrupt handler or from a bottom half handler.
  479. */
  480. static int
  481. native_smp_call_function_mask(cpumask_t mask,
  482. void (*func)(void *), void *info,
  483. int wait)
  484. {
  485. struct call_data_struct data;
  486. cpumask_t allbutself;
  487. int cpus;
  488. /* Can deadlock when called with interrupts disabled */
  489. WARN_ON(irqs_disabled());
  490. /* Holding any lock stops cpus from going down. */
  491. spin_lock(&call_lock);
  492. allbutself = cpu_online_map;
  493. cpu_clear(smp_processor_id(), allbutself);
  494. cpus_and(mask, mask, allbutself);
  495. cpus = cpus_weight(mask);
  496. if (!cpus) {
  497. spin_unlock(&call_lock);
  498. return 0;
  499. }
  500. data.func = func;
  501. data.info = info;
  502. atomic_set(&data.started, 0);
  503. data.wait = wait;
  504. if (wait)
  505. atomic_set(&data.finished, 0);
  506. call_data = &data;
  507. mb();
  508. /* Send a message to other CPUs */
  509. if (cpus_equal(mask, allbutself))
  510. send_IPI_allbutself(CALL_FUNCTION_VECTOR);
  511. else
  512. send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
  513. /* Wait for response */
  514. while (atomic_read(&data.started) != cpus)
  515. cpu_relax();
  516. if (wait)
  517. while (atomic_read(&data.finished) != cpus)
  518. cpu_relax();
  519. spin_unlock(&call_lock);
  520. return 0;
  521. }
  522. static void stop_this_cpu (void * dummy)
  523. {
  524. local_irq_disable();
  525. /*
  526. * Remove this CPU:
  527. */
  528. cpu_clear(smp_processor_id(), cpu_online_map);
  529. disable_local_APIC();
  530. if (cpu_data(smp_processor_id()).hlt_works_ok)
  531. for(;;) halt();
  532. for (;;);
  533. }
  534. /*
  535. * this function calls the 'stop' function on all other CPUs in the system.
  536. */
  537. static void native_smp_send_stop(void)
  538. {
  539. /* Don't deadlock on the call lock in panic */
  540. int nolock = !spin_trylock(&call_lock);
  541. unsigned long flags;
  542. local_irq_save(flags);
  543. __smp_call_function(stop_this_cpu, NULL, 0, 0);
  544. if (!nolock)
  545. spin_unlock(&call_lock);
  546. disable_local_APIC();
  547. local_irq_restore(flags);
  548. }
  549. /*
  550. * Reschedule call back. Nothing to do,
  551. * all the work is done automatically when
  552. * we return from the interrupt.
  553. */
  554. fastcall void smp_reschedule_interrupt(struct pt_regs *regs)
  555. {
  556. ack_APIC_irq();
  557. __get_cpu_var(irq_stat).irq_resched_count++;
  558. }
  559. fastcall void smp_call_function_interrupt(struct pt_regs *regs)
  560. {
  561. void (*func) (void *info) = call_data->func;
  562. void *info = call_data->info;
  563. int wait = call_data->wait;
  564. ack_APIC_irq();
  565. /*
  566. * Notify initiating CPU that I've grabbed the data and am
  567. * about to execute the function
  568. */
  569. mb();
  570. atomic_inc(&call_data->started);
  571. /*
  572. * At this point the info structure may be out of scope unless wait==1
  573. */
  574. irq_enter();
  575. (*func)(info);
  576. __get_cpu_var(irq_stat).irq_call_count++;
  577. irq_exit();
  578. if (wait) {
  579. mb();
  580. atomic_inc(&call_data->finished);
  581. }
  582. }
  583. static int convert_apicid_to_cpu(int apic_id)
  584. {
  585. int i;
  586. for (i = 0; i < NR_CPUS; i++) {
  587. if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
  588. return i;
  589. }
  590. return -1;
  591. }
  592. int safe_smp_processor_id(void)
  593. {
  594. int apicid, cpuid;
  595. if (!boot_cpu_has(X86_FEATURE_APIC))
  596. return 0;
  597. apicid = hard_smp_processor_id();
  598. if (apicid == BAD_APICID)
  599. return 0;
  600. cpuid = convert_apicid_to_cpu(apicid);
  601. return cpuid >= 0 ? cpuid : 0;
  602. }
  603. struct smp_ops smp_ops = {
  604. .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
  605. .smp_prepare_cpus = native_smp_prepare_cpus,
  606. .cpu_up = native_cpu_up,
  607. .smp_cpus_done = native_smp_cpus_done,
  608. .smp_send_stop = native_smp_send_stop,
  609. .smp_send_reschedule = native_smp_send_reschedule,
  610. .smp_call_function_mask = native_smp_call_function_mask,
  611. };
  612. int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
  613. void *info, int wait)
  614. {
  615. return smp_ops.smp_call_function_mask(mask, func, info, wait);
  616. }
  617. EXPORT_SYMBOL(smp_call_function_mask);