smp.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948
  1. /*
  2. * linux/arch/m32r/kernel/smp.c
  3. *
  4. * M32R SMP support routines.
  5. *
  6. * Copyright (c) 2001, 2002 Hitoshi Yamamoto
  7. *
  8. * Taken from i386 version.
  9. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  10. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  11. *
  12. * This code is released under the GNU General Public License version 2 or
  13. * later.
  14. */
  15. #undef DEBUG_SMP
  16. #include <linux/irq.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/profile.h>
  22. #include <linux/cpu.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/atomic.h>
  26. #include <asm/io.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/m32r.h>
  29. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  30. /* Data structures and variables */
  31. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  32. /*
  33. * Structure and data for smp_call_function(). This is designed to minimise
  34. * static memory requirements. It also looks cleaner.
  35. */
  36. static DEFINE_SPINLOCK(call_lock);
  37. struct call_data_struct {
  38. void (*func) (void *info);
  39. void *info;
  40. atomic_t started;
  41. atomic_t finished;
  42. int wait;
  43. } __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
  44. static struct call_data_struct *call_data;
  45. /*
  46. * For flush_cache_all()
  47. */
  48. static DEFINE_SPINLOCK(flushcache_lock);
  49. static volatile unsigned long flushcache_cpumask = 0;
  50. /*
  51. * For flush_tlb_others()
  52. */
  53. static volatile cpumask_t flush_cpumask;
  54. static struct mm_struct *flush_mm;
  55. static struct vm_area_struct *flush_vma;
  56. static volatile unsigned long flush_va;
  57. static DEFINE_SPINLOCK(tlbstate_lock);
  58. #define FLUSH_ALL 0xffffffff
  59. DECLARE_PER_CPU(int, prof_multiplier);
  60. DECLARE_PER_CPU(int, prof_old_multiplier);
  61. DECLARE_PER_CPU(int, prof_counter);
  62. extern spinlock_t ipi_lock[];
  63. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  64. /* Function Prototypes */
  65. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  66. void smp_send_reschedule(int);
  67. void smp_reschedule_interrupt(void);
  68. void smp_flush_cache_all(void);
  69. void smp_flush_cache_all_interrupt(void);
  70. void smp_flush_tlb_all(void);
  71. static void flush_tlb_all_ipi(void *);
  72. void smp_flush_tlb_mm(struct mm_struct *);
  73. void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
  74. unsigned long);
  75. void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
  76. static void flush_tlb_others(cpumask_t, struct mm_struct *,
  77. struct vm_area_struct *, unsigned long);
  78. void smp_invalidate_interrupt(void);
  79. void smp_send_stop(void);
  80. static void stop_this_cpu(void *);
  81. int smp_call_function(void (*) (void *), void *, int, int);
  82. void smp_call_function_interrupt(void);
  83. void smp_send_timer(void);
  84. void smp_ipi_timer_interrupt(struct pt_regs *);
  85. void smp_local_timer_interrupt(void);
  86. void send_IPI_allbutself(int, int);
  87. static void send_IPI_mask(cpumask_t, int, int);
  88. unsigned long send_IPI_mask_phys(cpumask_t, int, int);
  89. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  90. /* Rescheduling request Routines */
  91. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  92. /*==========================================================================*
  93. * Name: smp_send_reschedule
  94. *
  95. * Description: This routine requests other CPU to execute rescheduling.
  96. * 1.Send 'RESCHEDULE_IPI' to other CPU.
  97. * Request other CPU to execute 'smp_reschedule_interrupt()'.
  98. *
  99. * Born on Date: 2002.02.05
  100. *
  101. * Arguments: cpu_id - Target CPU ID
  102. *
  103. * Returns: void (cannot fail)
  104. *
  105. * Modification log:
  106. * Date Who Description
  107. * ---------- --- --------------------------------------------------------
  108. *
  109. *==========================================================================*/
  110. void smp_send_reschedule(int cpu_id)
  111. {
  112. WARN_ON(cpu_is_offline(cpu_id));
  113. send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
  114. }
  115. /*==========================================================================*
  116. * Name: smp_reschedule_interrupt
  117. *
  118. * Description: This routine executes on CPU which received
  119. * 'RESCHEDULE_IPI'.
  120. * Rescheduling is processed at the exit of interrupt
  121. * operation.
  122. *
  123. * Born on Date: 2002.02.05
  124. *
  125. * Arguments: NONE
  126. *
  127. * Returns: void (cannot fail)
  128. *
  129. * Modification log:
  130. * Date Who Description
  131. * ---------- --- --------------------------------------------------------
  132. *
  133. *==========================================================================*/
  134. void smp_reschedule_interrupt(void)
  135. {
  136. /* nothing to do */
  137. }
  138. /*==========================================================================*
  139. * Name: smp_flush_cache_all
  140. *
  141. * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
  142. * CPUs in the system.
  143. *
  144. * Born on Date: 2003-05-28
  145. *
  146. * Arguments: NONE
  147. *
  148. * Returns: void (cannot fail)
  149. *
  150. * Modification log:
  151. * Date Who Description
  152. * ---------- --- --------------------------------------------------------
  153. *
  154. *==========================================================================*/
  155. void smp_flush_cache_all(void)
  156. {
  157. cpumask_t cpumask;
  158. unsigned long *mask;
  159. preempt_disable();
  160. cpumask = cpu_online_map;
  161. cpu_clear(smp_processor_id(), cpumask);
  162. spin_lock(&flushcache_lock);
  163. mask=cpus_addr(cpumask);
  164. atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
  165. send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
  166. _flush_cache_copyback_all();
  167. while (flushcache_cpumask)
  168. mb();
  169. spin_unlock(&flushcache_lock);
  170. preempt_enable();
  171. }
  172. void smp_flush_cache_all_interrupt(void)
  173. {
  174. _flush_cache_copyback_all();
  175. clear_bit(smp_processor_id(), &flushcache_cpumask);
  176. }
  177. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  178. /* TLB flush request Routins */
  179. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  180. /*==========================================================================*
  181. * Name: smp_flush_tlb_all
  182. *
  183. * Description: This routine flushes all processes TLBs.
  184. * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
  185. * 2.Execute 'do_flush_tlb_all_local()'.
  186. *
  187. * Born on Date: 2002.02.05
  188. *
  189. * Arguments: NONE
  190. *
  191. * Returns: void (cannot fail)
  192. *
  193. * Modification log:
  194. * Date Who Description
  195. * ---------- --- --------------------------------------------------------
  196. *
  197. *==========================================================================*/
  198. void smp_flush_tlb_all(void)
  199. {
  200. unsigned long flags;
  201. preempt_disable();
  202. local_irq_save(flags);
  203. __flush_tlb_all();
  204. local_irq_restore(flags);
  205. smp_call_function(flush_tlb_all_ipi, NULL, 1, 1);
  206. preempt_enable();
  207. }
  208. /*==========================================================================*
  209. * Name: flush_tlb_all_ipi
  210. *
  211. * Description: This routine flushes all local TLBs.
  212. * 1.Execute 'do_flush_tlb_all_local()'.
  213. *
  214. * Born on Date: 2002.02.05
  215. *
  216. * Arguments: *info - not used
  217. *
  218. * Returns: void (cannot fail)
  219. *
  220. * Modification log:
  221. * Date Who Description
  222. * ---------- --- --------------------------------------------------------
  223. *
  224. *==========================================================================*/
  225. static void flush_tlb_all_ipi(void *info)
  226. {
  227. __flush_tlb_all();
  228. }
  229. /*==========================================================================*
  230. * Name: smp_flush_tlb_mm
  231. *
  232. * Description: This routine flushes the specified mm context TLB's.
  233. *
  234. * Born on Date: 2002.02.05
  235. *
  236. * Arguments: *mm - a pointer to the mm struct for flush TLB
  237. *
  238. * Returns: void (cannot fail)
  239. *
  240. * Modification log:
  241. * Date Who Description
  242. * ---------- --- --------------------------------------------------------
  243. *
  244. *==========================================================================*/
  245. void smp_flush_tlb_mm(struct mm_struct *mm)
  246. {
  247. int cpu_id;
  248. cpumask_t cpu_mask;
  249. unsigned long *mmc;
  250. unsigned long flags;
  251. preempt_disable();
  252. cpu_id = smp_processor_id();
  253. mmc = &mm->context[cpu_id];
  254. cpu_mask = mm->cpu_vm_mask;
  255. cpu_clear(cpu_id, cpu_mask);
  256. if (*mmc != NO_CONTEXT) {
  257. local_irq_save(flags);
  258. *mmc = NO_CONTEXT;
  259. if (mm == current->mm)
  260. activate_context(mm);
  261. else
  262. cpu_clear(cpu_id, mm->cpu_vm_mask);
  263. local_irq_restore(flags);
  264. }
  265. if (!cpus_empty(cpu_mask))
  266. flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
  267. preempt_enable();
  268. }
  269. /*==========================================================================*
  270. * Name: smp_flush_tlb_range
  271. *
  272. * Description: This routine flushes a range of pages.
  273. *
  274. * Born on Date: 2002.02.05
  275. *
  276. * Arguments: *mm - a pointer to the mm struct for flush TLB
  277. * start - not used
  278. * end - not used
  279. *
  280. * Returns: void (cannot fail)
  281. *
  282. * Modification log:
  283. * Date Who Description
  284. * ---------- --- --------------------------------------------------------
  285. *
  286. *==========================================================================*/
  287. void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  288. unsigned long end)
  289. {
  290. smp_flush_tlb_mm(vma->vm_mm);
  291. }
  292. /*==========================================================================*
  293. * Name: smp_flush_tlb_page
  294. *
  295. * Description: This routine flushes one page.
  296. *
  297. * Born on Date: 2002.02.05
  298. *
  299. * Arguments: *vma - a pointer to the vma struct include va
  300. * va - virtual address for flush TLB
  301. *
  302. * Returns: void (cannot fail)
  303. *
  304. * Modification log:
  305. * Date Who Description
  306. * ---------- --- --------------------------------------------------------
  307. *
  308. *==========================================================================*/
  309. void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  310. {
  311. struct mm_struct *mm = vma->vm_mm;
  312. int cpu_id;
  313. cpumask_t cpu_mask;
  314. unsigned long *mmc;
  315. unsigned long flags;
  316. preempt_disable();
  317. cpu_id = smp_processor_id();
  318. mmc = &mm->context[cpu_id];
  319. cpu_mask = mm->cpu_vm_mask;
  320. cpu_clear(cpu_id, cpu_mask);
  321. #ifdef DEBUG_SMP
  322. if (!mm)
  323. BUG();
  324. #endif
  325. if (*mmc != NO_CONTEXT) {
  326. local_irq_save(flags);
  327. va &= PAGE_MASK;
  328. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  329. __flush_tlb_page(va);
  330. local_irq_restore(flags);
  331. }
  332. if (!cpus_empty(cpu_mask))
  333. flush_tlb_others(cpu_mask, mm, vma, va);
  334. preempt_enable();
  335. }
  336. /*==========================================================================*
  337. * Name: flush_tlb_others
  338. *
  339. * Description: This routine requests other CPU to execute flush TLB.
  340. * 1.Setup parmeters.
  341. * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
  342. * Request other CPU to execute 'smp_invalidate_interrupt()'.
  343. * 3.Wait for other CPUs operation finished.
  344. *
  345. * Born on Date: 2002.02.05
  346. *
  347. * Arguments: cpumask - bitmap of target CPUs
  348. * *mm - a pointer to the mm struct for flush TLB
  349. * *vma - a pointer to the vma struct include va
  350. * va - virtual address for flush TLB
  351. *
  352. * Returns: void (cannot fail)
  353. *
  354. * Modification log:
  355. * Date Who Description
  356. * ---------- --- --------------------------------------------------------
  357. *
  358. *==========================================================================*/
  359. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  360. struct vm_area_struct *vma, unsigned long va)
  361. {
  362. unsigned long *mask;
  363. #ifdef DEBUG_SMP
  364. unsigned long flags;
  365. __save_flags(flags);
  366. if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
  367. BUG();
  368. #endif /* DEBUG_SMP */
  369. /*
  370. * A couple of (to be removed) sanity checks:
  371. *
  372. * - we do not send IPIs to not-yet booted CPUs.
  373. * - current CPU must not be in mask
  374. * - mask must exist :)
  375. */
  376. BUG_ON(cpus_empty(cpumask));
  377. BUG_ON(cpu_isset(smp_processor_id(), cpumask));
  378. BUG_ON(!mm);
  379. /* If a CPU which we ran on has gone down, OK. */
  380. cpus_and(cpumask, cpumask, cpu_online_map);
  381. if (cpus_empty(cpumask))
  382. return;
  383. /*
  384. * i'm not happy about this global shared spinlock in the
  385. * MM hot path, but we'll see how contended it is.
  386. * Temporarily this turns IRQs off, so that lockups are
  387. * detected by the NMI watchdog.
  388. */
  389. spin_lock(&tlbstate_lock);
  390. flush_mm = mm;
  391. flush_vma = vma;
  392. flush_va = va;
  393. mask=cpus_addr(cpumask);
  394. atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
  395. /*
  396. * We have to send the IPI only to
  397. * CPUs affected.
  398. */
  399. send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
  400. while (!cpus_empty(flush_cpumask)) {
  401. /* nothing. lockup detection does not belong here */
  402. mb();
  403. }
  404. flush_mm = NULL;
  405. flush_vma = NULL;
  406. flush_va = 0;
  407. spin_unlock(&tlbstate_lock);
  408. }
  409. /*==========================================================================*
  410. * Name: smp_invalidate_interrupt
  411. *
  412. * Description: This routine executes on CPU which received
  413. * 'INVALIDATE_TLB_IPI'.
  414. * 1.Flush local TLB.
  415. * 2.Report flush TLB process was finished.
  416. *
  417. * Born on Date: 2002.02.05
  418. *
  419. * Arguments: NONE
  420. *
  421. * Returns: void (cannot fail)
  422. *
  423. * Modification log:
  424. * Date Who Description
  425. * ---------- --- --------------------------------------------------------
  426. *
  427. *==========================================================================*/
  428. void smp_invalidate_interrupt(void)
  429. {
  430. int cpu_id = smp_processor_id();
  431. unsigned long *mmc = &flush_mm->context[cpu_id];
  432. if (!cpu_isset(cpu_id, flush_cpumask))
  433. return;
  434. if (flush_va == FLUSH_ALL) {
  435. *mmc = NO_CONTEXT;
  436. if (flush_mm == current->active_mm)
  437. activate_context(flush_mm);
  438. else
  439. cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
  440. } else {
  441. unsigned long va = flush_va;
  442. if (*mmc != NO_CONTEXT) {
  443. va &= PAGE_MASK;
  444. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  445. __flush_tlb_page(va);
  446. }
  447. }
  448. cpu_clear(cpu_id, flush_cpumask);
  449. }
  450. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  451. /* Stop CPU request Routins */
  452. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  453. /*==========================================================================*
  454. * Name: smp_send_stop
  455. *
  456. * Description: This routine requests stop all CPUs.
  457. * 1.Request other CPU to execute 'stop_this_cpu()'.
  458. *
  459. * Born on Date: 2002.02.05
  460. *
  461. * Arguments: NONE
  462. *
  463. * Returns: void (cannot fail)
  464. *
  465. * Modification log:
  466. * Date Who Description
  467. * ---------- --- --------------------------------------------------------
  468. *
  469. *==========================================================================*/
  470. void smp_send_stop(void)
  471. {
  472. smp_call_function(stop_this_cpu, NULL, 1, 0);
  473. }
  474. /*==========================================================================*
  475. * Name: stop_this_cpu
  476. *
  477. * Description: This routine halt CPU.
  478. *
  479. * Born on Date: 2002.02.05
  480. *
  481. * Arguments: NONE
  482. *
  483. * Returns: void (cannot fail)
  484. *
  485. * Modification log:
  486. * Date Who Description
  487. * ---------- --- --------------------------------------------------------
  488. *
  489. *==========================================================================*/
  490. static void stop_this_cpu(void *dummy)
  491. {
  492. int cpu_id = smp_processor_id();
  493. /*
  494. * Remove this CPU:
  495. */
  496. cpu_clear(cpu_id, cpu_online_map);
  497. /*
  498. * PSW IE = 1;
  499. * IMASK = 0;
  500. * goto SLEEP
  501. */
  502. local_irq_disable();
  503. outl(0, M32R_ICU_IMASK_PORTL);
  504. inl(M32R_ICU_IMASK_PORTL); /* dummy read */
  505. local_irq_enable();
  506. for ( ; ; );
  507. }
  508. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  509. /* Call function Routins */
  510. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  511. /*==========================================================================*
  512. * Name: smp_call_function
  513. *
  514. * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs
  515. * in the system.
  516. *
  517. * Born on Date: 2002.02.05
  518. *
  519. * Arguments: *func - The function to run. This must be fast and
  520. * non-blocking.
  521. * *info - An arbitrary pointer to pass to the function.
  522. * nonatomic - currently unused.
  523. * wait - If true, wait (atomically) until function has
  524. * completed on other CPUs.
  525. *
  526. * Returns: 0 on success, else a negative status code. Does not return
  527. * until remote CPUs are nearly ready to execute <<func>> or
  528. * are or have executed.
  529. *
  530. * Cautions: You must not call this function with disabled interrupts or
  531. * from a hardware interrupt handler, you may call it from a
  532. * bottom half handler.
  533. *
  534. * Modification log:
  535. * Date Who Description
  536. * ---------- --- --------------------------------------------------------
  537. *
  538. *==========================================================================*/
  539. int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
  540. int wait)
  541. {
  542. struct call_data_struct data;
  543. int cpus;
  544. #ifdef DEBUG_SMP
  545. unsigned long flags;
  546. __save_flags(flags);
  547. if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
  548. BUG();
  549. #endif /* DEBUG_SMP */
  550. /* Holding any lock stops cpus from going down. */
  551. spin_lock(&call_lock);
  552. cpus = num_online_cpus() - 1;
  553. if (!cpus) {
  554. spin_unlock(&call_lock);
  555. return 0;
  556. }
  557. /* Can deadlock when called with interrupts disabled */
  558. WARN_ON(irqs_disabled());
  559. data.func = func;
  560. data.info = info;
  561. atomic_set(&data.started, 0);
  562. data.wait = wait;
  563. if (wait)
  564. atomic_set(&data.finished, 0);
  565. call_data = &data;
  566. mb();
  567. /* Send a message to all other CPUs and wait for them to respond */
  568. send_IPI_allbutself(CALL_FUNCTION_IPI, 0);
  569. /* Wait for response */
  570. while (atomic_read(&data.started) != cpus)
  571. barrier();
  572. if (wait)
  573. while (atomic_read(&data.finished) != cpus)
  574. barrier();
  575. spin_unlock(&call_lock);
  576. return 0;
  577. }
  578. /*==========================================================================*
  579. * Name: smp_call_function_interrupt
  580. *
  581. * Description: This routine executes on CPU which received
  582. * 'CALL_FUNCTION_IPI'.
  583. *
  584. * Born on Date: 2002.02.05
  585. *
  586. * Arguments: NONE
  587. *
  588. * Returns: void (cannot fail)
  589. *
  590. * Modification log:
  591. * Date Who Description
  592. * ---------- --- --------------------------------------------------------
  593. *
  594. *==========================================================================*/
  595. void smp_call_function_interrupt(void)
  596. {
  597. void (*func) (void *info) = call_data->func;
  598. void *info = call_data->info;
  599. int wait = call_data->wait;
  600. /*
  601. * Notify initiating CPU that I've grabbed the data and am
  602. * about to execute the function
  603. */
  604. mb();
  605. atomic_inc(&call_data->started);
  606. /*
  607. * At this point the info structure may be out of scope unless wait==1
  608. */
  609. irq_enter();
  610. (*func)(info);
  611. irq_exit();
  612. if (wait) {
  613. mb();
  614. atomic_inc(&call_data->finished);
  615. }
  616. }
  617. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  618. /* Timer Routins */
  619. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  620. /*==========================================================================*
  621. * Name: smp_send_timer
  622. *
  623. * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
  624. * in the system.
  625. *
  626. * Born on Date: 2002.02.05
  627. *
  628. * Arguments: NONE
  629. *
  630. * Returns: void (cannot fail)
  631. *
  632. * Modification log:
  633. * Date Who Description
  634. * ---------- --- --------------------------------------------------------
  635. *
  636. *==========================================================================*/
  637. void smp_send_timer(void)
  638. {
  639. send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
  640. }
  641. /*==========================================================================*
  642. * Name: smp_send_timer
  643. *
  644. * Description: This routine executes on CPU which received
  645. * 'LOCAL_TIMER_IPI'.
  646. *
  647. * Born on Date: 2002.02.05
  648. *
  649. * Arguments: *regs - a pointer to the saved regster info
  650. *
  651. * Returns: void (cannot fail)
  652. *
  653. * Modification log:
  654. * Date Who Description
  655. * ---------- --- --------------------------------------------------------
  656. *
  657. *==========================================================================*/
  658. void smp_ipi_timer_interrupt(struct pt_regs *regs)
  659. {
  660. struct pt_regs *old_regs;
  661. old_regs = set_irq_regs(regs);
  662. irq_enter();
  663. smp_local_timer_interrupt();
  664. irq_exit();
  665. set_irq_regs(old_regs);
  666. }
  667. /*==========================================================================*
  668. * Name: smp_local_timer_interrupt
  669. *
  670. * Description: Local timer interrupt handler. It does both profiling and
  671. * process statistics/rescheduling.
  672. * We do profiling in every local tick, statistics/rescheduling
  673. * happen only every 'profiling multiplier' ticks. The default
  674. * multiplier is 1 and it can be changed by writing the new
  675. * multiplier value into /proc/profile.
  676. *
  677. * Born on Date: 2002.02.05
  678. *
  679. * Arguments: *regs - a pointer to the saved regster info
  680. *
  681. * Returns: void (cannot fail)
  682. *
  683. * Original: arch/i386/kernel/apic.c
  684. *
  685. * Modification log:
  686. * Date Who Description
  687. * ---------- --- --------------------------------------------------------
  688. * 2003-06-24 hy use per_cpu structure.
  689. *==========================================================================*/
  690. void smp_local_timer_interrupt(void)
  691. {
  692. int user = user_mode(get_irq_regs());
  693. int cpu_id = smp_processor_id();
  694. /*
  695. * The profiling function is SMP safe. (nothing can mess
  696. * around with "current", and the profiling counters are
  697. * updated with atomic operations). This is especially
  698. * useful with a profiling multiplier != 1
  699. */
  700. profile_tick(CPU_PROFILING);
  701. if (--per_cpu(prof_counter, cpu_id) <= 0) {
  702. /*
  703. * The multiplier may have changed since the last time we got
  704. * to this point as a result of the user writing to
  705. * /proc/profile. In this case we need to adjust the APIC
  706. * timer accordingly.
  707. *
  708. * Interrupts are already masked off at this point.
  709. */
  710. per_cpu(prof_counter, cpu_id)
  711. = per_cpu(prof_multiplier, cpu_id);
  712. if (per_cpu(prof_counter, cpu_id)
  713. != per_cpu(prof_old_multiplier, cpu_id))
  714. {
  715. per_cpu(prof_old_multiplier, cpu_id)
  716. = per_cpu(prof_counter, cpu_id);
  717. }
  718. update_process_times(user);
  719. }
  720. }
  721. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  722. /* Send IPI Routins */
  723. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  724. /*==========================================================================*
  725. * Name: send_IPI_allbutself
  726. *
  727. * Description: This routine sends a IPI to all other CPUs in the system.
  728. *
  729. * Born on Date: 2002.02.05
  730. *
  731. * Arguments: ipi_num - Number of IPI
  732. * try - 0 : Send IPI certainly.
  733. * !0 : The following IPI is not sended when Target CPU
  734. * has not received the before IPI.
  735. *
  736. * Returns: void (cannot fail)
  737. *
  738. * Modification log:
  739. * Date Who Description
  740. * ---------- --- --------------------------------------------------------
  741. *
  742. *==========================================================================*/
  743. void send_IPI_allbutself(int ipi_num, int try)
  744. {
  745. cpumask_t cpumask;
  746. cpumask = cpu_online_map;
  747. cpu_clear(smp_processor_id(), cpumask);
  748. send_IPI_mask(cpumask, ipi_num, try);
  749. }
  750. /*==========================================================================*
  751. * Name: send_IPI_mask
  752. *
  753. * Description: This routine sends a IPI to CPUs in the system.
  754. *
  755. * Born on Date: 2002.02.05
  756. *
  757. * Arguments: cpu_mask - Bitmap of target CPUs logical ID
  758. * ipi_num - Number of IPI
  759. * try - 0 : Send IPI certainly.
  760. * !0 : The following IPI is not sended when Target CPU
  761. * has not received the before IPI.
  762. *
  763. * Returns: void (cannot fail)
  764. *
  765. * Modification log:
  766. * Date Who Description
  767. * ---------- --- --------------------------------------------------------
  768. *
  769. *==========================================================================*/
  770. static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
  771. {
  772. cpumask_t physid_mask, tmp;
  773. int cpu_id, phys_id;
  774. int num_cpus = num_online_cpus();
  775. if (num_cpus <= 1) /* NO MP */
  776. return;
  777. cpus_and(tmp, cpumask, cpu_online_map);
  778. BUG_ON(!cpus_equal(cpumask, tmp));
  779. physid_mask = CPU_MASK_NONE;
  780. for_each_cpu_mask(cpu_id, cpumask){
  781. if ((phys_id = cpu_to_physid(cpu_id)) != -1)
  782. cpu_set(phys_id, physid_mask);
  783. }
  784. send_IPI_mask_phys(physid_mask, ipi_num, try);
  785. }
  786. /*==========================================================================*
  787. * Name: send_IPI_mask_phys
  788. *
  789. * Description: This routine sends a IPI to other CPUs in the system.
  790. *
  791. * Born on Date: 2002.02.05
  792. *
  793. * Arguments: cpu_mask - Bitmap of target CPUs physical ID
  794. * ipi_num - Number of IPI
  795. * try - 0 : Send IPI certainly.
  796. * !0 : The following IPI is not sended when Target CPU
  797. * has not received the before IPI.
  798. *
  799. * Returns: IPICRi regster value.
  800. *
  801. * Modification log:
  802. * Date Who Description
  803. * ---------- --- --------------------------------------------------------
  804. *
  805. *==========================================================================*/
  806. unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
  807. int try)
  808. {
  809. spinlock_t *ipilock;
  810. volatile unsigned long *ipicr_addr;
  811. unsigned long ipicr_val;
  812. unsigned long my_physid_mask;
  813. unsigned long mask = cpus_addr(physid_mask)[0];
  814. if (mask & ~physids_coerce(phys_cpu_present_map))
  815. BUG();
  816. if (ipi_num >= NR_IPIS)
  817. BUG();
  818. mask <<= IPI_SHIFT;
  819. ipilock = &ipi_lock[ipi_num];
  820. ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
  821. + (ipi_num << 2));
  822. my_physid_mask = ~(1 << smp_processor_id());
  823. /*
  824. * lock ipi_lock[i]
  825. * check IPICRi == 0
  826. * write IPICRi (send IPIi)
  827. * unlock ipi_lock[i]
  828. */
  829. spin_lock(ipilock);
  830. __asm__ __volatile__ (
  831. ";; CHECK IPICRi == 0 \n\t"
  832. ".fillinsn \n"
  833. "1: \n\t"
  834. "ld %0, @%1 \n\t"
  835. "and %0, %4 \n\t"
  836. "beqz %0, 2f \n\t"
  837. "bnez %3, 3f \n\t"
  838. "bra 1b \n\t"
  839. ";; WRITE IPICRi (send IPIi) \n\t"
  840. ".fillinsn \n"
  841. "2: \n\t"
  842. "st %2, @%1 \n\t"
  843. ".fillinsn \n"
  844. "3: \n\t"
  845. : "=&r"(ipicr_val)
  846. : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
  847. : "memory"
  848. );
  849. spin_unlock(ipilock);
  850. return ipicr_val;
  851. }