smp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /*
  2. * linux/arch/m32r/kernel/smp.c
  3. *
  4. * M32R SMP support routines.
  5. *
  6. * Copyright (c) 2001, 2002 Hitoshi Yamamoto
  7. *
  8. * Taken from i386 version.
  9. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  10. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  11. *
  12. * This code is released under the GNU General Public License version 2 or
  13. * later.
  14. */
  15. #undef DEBUG_SMP
  16. #include <linux/irq.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/sched.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/mm.h>
  21. #include <linux/smp.h>
  22. #include <linux/profile.h>
  23. #include <linux/cpu.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/atomic.h>
  27. #include <asm/io.h>
  28. #include <asm/mmu_context.h>
  29. #include <asm/m32r.h>
  30. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  31. /* Data structures and variables */
  32. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  33. /*
  34. * For flush_cache_all()
  35. */
  36. static DEFINE_SPINLOCK(flushcache_lock);
  37. static volatile unsigned long flushcache_cpumask = 0;
  38. /*
  39. * For flush_tlb_others()
  40. */
  41. static volatile cpumask_t flush_cpumask;
  42. static struct mm_struct *flush_mm;
  43. static struct vm_area_struct *flush_vma;
  44. static volatile unsigned long flush_va;
  45. static DEFINE_SPINLOCK(tlbstate_lock);
  46. #define FLUSH_ALL 0xffffffff
  47. DECLARE_PER_CPU(int, prof_multiplier);
  48. DECLARE_PER_CPU(int, prof_old_multiplier);
  49. DECLARE_PER_CPU(int, prof_counter);
  50. extern spinlock_t ipi_lock[];
  51. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  52. /* Function Prototypes */
  53. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  54. void smp_send_reschedule(int);
  55. void smp_reschedule_interrupt(void);
  56. void smp_flush_cache_all(void);
  57. void smp_flush_cache_all_interrupt(void);
  58. void smp_flush_tlb_all(void);
  59. static void flush_tlb_all_ipi(void *);
  60. void smp_flush_tlb_mm(struct mm_struct *);
  61. void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
  62. unsigned long);
  63. void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
  64. static void flush_tlb_others(cpumask_t, struct mm_struct *,
  65. struct vm_area_struct *, unsigned long);
  66. void smp_invalidate_interrupt(void);
  67. void smp_send_stop(void);
  68. static void stop_this_cpu(void *);
  69. void smp_send_timer(void);
  70. void smp_ipi_timer_interrupt(struct pt_regs *);
  71. void smp_local_timer_interrupt(void);
  72. static void send_IPI_allbutself(int, int);
  73. static void send_IPI_mask(const struct cpumask *, int, int);
  74. unsigned long send_IPI_mask_phys(cpumask_t, int, int);
  75. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  76. /* Rescheduling request Routines */
  77. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  78. /*==========================================================================*
  79. * Name: smp_send_reschedule
  80. *
  81. * Description: This routine requests other CPU to execute rescheduling.
  82. * 1.Send 'RESCHEDULE_IPI' to other CPU.
  83. * Request other CPU to execute 'smp_reschedule_interrupt()'.
  84. *
  85. * Born on Date: 2002.02.05
  86. *
  87. * Arguments: cpu_id - Target CPU ID
  88. *
  89. * Returns: void (cannot fail)
  90. *
  91. * Modification log:
  92. * Date Who Description
  93. * ---------- --- --------------------------------------------------------
  94. *
  95. *==========================================================================*/
  96. void smp_send_reschedule(int cpu_id)
  97. {
  98. WARN_ON(cpu_is_offline(cpu_id));
  99. send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
  100. }
  101. /*==========================================================================*
  102. * Name: smp_reschedule_interrupt
  103. *
  104. * Description: This routine executes on CPU which received
  105. * 'RESCHEDULE_IPI'.
  106. * Rescheduling is processed at the exit of interrupt
  107. * operation.
  108. *
  109. * Born on Date: 2002.02.05
  110. *
  111. * Arguments: NONE
  112. *
  113. * Returns: void (cannot fail)
  114. *
  115. * Modification log:
  116. * Date Who Description
  117. * ---------- --- --------------------------------------------------------
  118. *
  119. *==========================================================================*/
  120. void smp_reschedule_interrupt(void)
  121. {
  122. /* nothing to do */
  123. }
  124. /*==========================================================================*
  125. * Name: smp_flush_cache_all
  126. *
  127. * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
  128. * CPUs in the system.
  129. *
  130. * Born on Date: 2003-05-28
  131. *
  132. * Arguments: NONE
  133. *
  134. * Returns: void (cannot fail)
  135. *
  136. * Modification log:
  137. * Date Who Description
  138. * ---------- --- --------------------------------------------------------
  139. *
  140. *==========================================================================*/
  141. void smp_flush_cache_all(void)
  142. {
  143. cpumask_t cpumask;
  144. unsigned long *mask;
  145. preempt_disable();
  146. cpumask = cpu_online_map;
  147. cpu_clear(smp_processor_id(), cpumask);
  148. spin_lock(&flushcache_lock);
  149. mask=cpus_addr(cpumask);
  150. atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
  151. send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
  152. _flush_cache_copyback_all();
  153. while (flushcache_cpumask)
  154. mb();
  155. spin_unlock(&flushcache_lock);
  156. preempt_enable();
  157. }
  158. void smp_flush_cache_all_interrupt(void)
  159. {
  160. _flush_cache_copyback_all();
  161. clear_bit(smp_processor_id(), &flushcache_cpumask);
  162. }
  163. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  164. /* TLB flush request Routines */
  165. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  166. /*==========================================================================*
  167. * Name: smp_flush_tlb_all
  168. *
  169. * Description: This routine flushes all processes TLBs.
  170. * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
  171. * 2.Execute 'do_flush_tlb_all_local()'.
  172. *
  173. * Born on Date: 2002.02.05
  174. *
  175. * Arguments: NONE
  176. *
  177. * Returns: void (cannot fail)
  178. *
  179. * Modification log:
  180. * Date Who Description
  181. * ---------- --- --------------------------------------------------------
  182. *
  183. *==========================================================================*/
  184. void smp_flush_tlb_all(void)
  185. {
  186. unsigned long flags;
  187. preempt_disable();
  188. local_irq_save(flags);
  189. __flush_tlb_all();
  190. local_irq_restore(flags);
  191. smp_call_function(flush_tlb_all_ipi, NULL, 1);
  192. preempt_enable();
  193. }
  194. /*==========================================================================*
  195. * Name: flush_tlb_all_ipi
  196. *
  197. * Description: This routine flushes all local TLBs.
  198. * 1.Execute 'do_flush_tlb_all_local()'.
  199. *
  200. * Born on Date: 2002.02.05
  201. *
  202. * Arguments: *info - not used
  203. *
  204. * Returns: void (cannot fail)
  205. *
  206. * Modification log:
  207. * Date Who Description
  208. * ---------- --- --------------------------------------------------------
  209. *
  210. *==========================================================================*/
  211. static void flush_tlb_all_ipi(void *info)
  212. {
  213. __flush_tlb_all();
  214. }
  215. /*==========================================================================*
  216. * Name: smp_flush_tlb_mm
  217. *
  218. * Description: This routine flushes the specified mm context TLB's.
  219. *
  220. * Born on Date: 2002.02.05
  221. *
  222. * Arguments: *mm - a pointer to the mm struct for flush TLB
  223. *
  224. * Returns: void (cannot fail)
  225. *
  226. * Modification log:
  227. * Date Who Description
  228. * ---------- --- --------------------------------------------------------
  229. *
  230. *==========================================================================*/
  231. void smp_flush_tlb_mm(struct mm_struct *mm)
  232. {
  233. int cpu_id;
  234. cpumask_t cpu_mask;
  235. unsigned long *mmc;
  236. unsigned long flags;
  237. preempt_disable();
  238. cpu_id = smp_processor_id();
  239. mmc = &mm->context[cpu_id];
  240. cpu_mask = *mm_cpumask(mm);
  241. cpu_clear(cpu_id, cpu_mask);
  242. if (*mmc != NO_CONTEXT) {
  243. local_irq_save(flags);
  244. *mmc = NO_CONTEXT;
  245. if (mm == current->mm)
  246. activate_context(mm);
  247. else
  248. cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
  249. local_irq_restore(flags);
  250. }
  251. if (!cpus_empty(cpu_mask))
  252. flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
  253. preempt_enable();
  254. }
  255. /*==========================================================================*
  256. * Name: smp_flush_tlb_range
  257. *
  258. * Description: This routine flushes a range of pages.
  259. *
  260. * Born on Date: 2002.02.05
  261. *
  262. * Arguments: *mm - a pointer to the mm struct for flush TLB
  263. * start - not used
  264. * end - not used
  265. *
  266. * Returns: void (cannot fail)
  267. *
  268. * Modification log:
  269. * Date Who Description
  270. * ---------- --- --------------------------------------------------------
  271. *
  272. *==========================================================================*/
  273. void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  274. unsigned long end)
  275. {
  276. smp_flush_tlb_mm(vma->vm_mm);
  277. }
  278. /*==========================================================================*
  279. * Name: smp_flush_tlb_page
  280. *
  281. * Description: This routine flushes one page.
  282. *
  283. * Born on Date: 2002.02.05
  284. *
  285. * Arguments: *vma - a pointer to the vma struct include va
  286. * va - virtual address for flush TLB
  287. *
  288. * Returns: void (cannot fail)
  289. *
  290. * Modification log:
  291. * Date Who Description
  292. * ---------- --- --------------------------------------------------------
  293. *
  294. *==========================================================================*/
  295. void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  296. {
  297. struct mm_struct *mm = vma->vm_mm;
  298. int cpu_id;
  299. cpumask_t cpu_mask;
  300. unsigned long *mmc;
  301. unsigned long flags;
  302. preempt_disable();
  303. cpu_id = smp_processor_id();
  304. mmc = &mm->context[cpu_id];
  305. cpu_mask = *mm_cpumask(mm);
  306. cpu_clear(cpu_id, cpu_mask);
  307. #ifdef DEBUG_SMP
  308. if (!mm)
  309. BUG();
  310. #endif
  311. if (*mmc != NO_CONTEXT) {
  312. local_irq_save(flags);
  313. va &= PAGE_MASK;
  314. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  315. __flush_tlb_page(va);
  316. local_irq_restore(flags);
  317. }
  318. if (!cpus_empty(cpu_mask))
  319. flush_tlb_others(cpu_mask, mm, vma, va);
  320. preempt_enable();
  321. }
  322. /*==========================================================================*
  323. * Name: flush_tlb_others
  324. *
  325. * Description: This routine requests other CPU to execute flush TLB.
  326. * 1.Setup parameters.
  327. * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
  328. * Request other CPU to execute 'smp_invalidate_interrupt()'.
  329. * 3.Wait for other CPUs operation finished.
  330. *
  331. * Born on Date: 2002.02.05
  332. *
  333. * Arguments: cpumask - bitmap of target CPUs
  334. * *mm - a pointer to the mm struct for flush TLB
  335. * *vma - a pointer to the vma struct include va
  336. * va - virtual address for flush TLB
  337. *
  338. * Returns: void (cannot fail)
  339. *
  340. * Modification log:
  341. * Date Who Description
  342. * ---------- --- --------------------------------------------------------
  343. *
  344. *==========================================================================*/
  345. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  346. struct vm_area_struct *vma, unsigned long va)
  347. {
  348. unsigned long *mask;
  349. #ifdef DEBUG_SMP
  350. unsigned long flags;
  351. __save_flags(flags);
  352. if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
  353. BUG();
  354. #endif /* DEBUG_SMP */
  355. /*
  356. * A couple of (to be removed) sanity checks:
  357. *
  358. * - we do not send IPIs to not-yet booted CPUs.
  359. * - current CPU must not be in mask
  360. * - mask must exist :)
  361. */
  362. BUG_ON(cpus_empty(cpumask));
  363. BUG_ON(cpu_isset(smp_processor_id(), cpumask));
  364. BUG_ON(!mm);
  365. /* If a CPU which we ran on has gone down, OK. */
  366. cpus_and(cpumask, cpumask, cpu_online_map);
  367. if (cpus_empty(cpumask))
  368. return;
  369. /*
  370. * i'm not happy about this global shared spinlock in the
  371. * MM hot path, but we'll see how contended it is.
  372. * Temporarily this turns IRQs off, so that lockups are
  373. * detected by the NMI watchdog.
  374. */
  375. spin_lock(&tlbstate_lock);
  376. flush_mm = mm;
  377. flush_vma = vma;
  378. flush_va = va;
  379. mask=cpus_addr(cpumask);
  380. atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
  381. /*
  382. * We have to send the IPI only to
  383. * CPUs affected.
  384. */
  385. send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
  386. while (!cpus_empty(flush_cpumask)) {
  387. /* nothing. lockup detection does not belong here */
  388. mb();
  389. }
  390. flush_mm = NULL;
  391. flush_vma = NULL;
  392. flush_va = 0;
  393. spin_unlock(&tlbstate_lock);
  394. }
  395. /*==========================================================================*
  396. * Name: smp_invalidate_interrupt
  397. *
  398. * Description: This routine executes on CPU which received
  399. * 'INVALIDATE_TLB_IPI'.
  400. * 1.Flush local TLB.
  401. * 2.Report flush TLB process was finished.
  402. *
  403. * Born on Date: 2002.02.05
  404. *
  405. * Arguments: NONE
  406. *
  407. * Returns: void (cannot fail)
  408. *
  409. * Modification log:
  410. * Date Who Description
  411. * ---------- --- --------------------------------------------------------
  412. *
  413. *==========================================================================*/
  414. void smp_invalidate_interrupt(void)
  415. {
  416. int cpu_id = smp_processor_id();
  417. unsigned long *mmc = &flush_mm->context[cpu_id];
  418. if (!cpu_isset(cpu_id, flush_cpumask))
  419. return;
  420. if (flush_va == FLUSH_ALL) {
  421. *mmc = NO_CONTEXT;
  422. if (flush_mm == current->active_mm)
  423. activate_context(flush_mm);
  424. else
  425. cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
  426. } else {
  427. unsigned long va = flush_va;
  428. if (*mmc != NO_CONTEXT) {
  429. va &= PAGE_MASK;
  430. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  431. __flush_tlb_page(va);
  432. }
  433. }
  434. cpu_clear(cpu_id, flush_cpumask);
  435. }
  436. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  437. /* Stop CPU request Routines */
  438. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  439. /*==========================================================================*
  440. * Name: smp_send_stop
  441. *
  442. * Description: This routine requests stop all CPUs.
  443. * 1.Request other CPU to execute 'stop_this_cpu()'.
  444. *
  445. * Born on Date: 2002.02.05
  446. *
  447. * Arguments: NONE
  448. *
  449. * Returns: void (cannot fail)
  450. *
  451. * Modification log:
  452. * Date Who Description
  453. * ---------- --- --------------------------------------------------------
  454. *
  455. *==========================================================================*/
  456. void smp_send_stop(void)
  457. {
  458. smp_call_function(stop_this_cpu, NULL, 0);
  459. }
  460. /*==========================================================================*
  461. * Name: stop_this_cpu
  462. *
  463. * Description: This routine halt CPU.
  464. *
  465. * Born on Date: 2002.02.05
  466. *
  467. * Arguments: NONE
  468. *
  469. * Returns: void (cannot fail)
  470. *
  471. * Modification log:
  472. * Date Who Description
  473. * ---------- --- --------------------------------------------------------
  474. *
  475. *==========================================================================*/
  476. static void stop_this_cpu(void *dummy)
  477. {
  478. int cpu_id = smp_processor_id();
  479. /*
  480. * Remove this CPU:
  481. */
  482. cpu_clear(cpu_id, cpu_online_map);
  483. /*
  484. * PSW IE = 1;
  485. * IMASK = 0;
  486. * goto SLEEP
  487. */
  488. local_irq_disable();
  489. outl(0, M32R_ICU_IMASK_PORTL);
  490. inl(M32R_ICU_IMASK_PORTL); /* dummy read */
  491. local_irq_enable();
  492. for ( ; ; );
  493. }
  494. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  495. {
  496. send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
  497. }
  498. void arch_send_call_function_single_ipi(int cpu)
  499. {
  500. send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
  501. }
  502. /*==========================================================================*
  503. * Name: smp_call_function_interrupt
  504. *
  505. * Description: This routine executes on CPU which received
  506. * 'CALL_FUNCTION_IPI'.
  507. *
  508. * Born on Date: 2002.02.05
  509. *
  510. * Arguments: NONE
  511. *
  512. * Returns: void (cannot fail)
  513. *
  514. * Modification log:
  515. * Date Who Description
  516. * ---------- --- --------------------------------------------------------
  517. *
  518. *==========================================================================*/
  519. void smp_call_function_interrupt(void)
  520. {
  521. irq_enter();
  522. generic_smp_call_function_interrupt();
  523. irq_exit();
  524. }
  525. void smp_call_function_single_interrupt(void)
  526. {
  527. irq_enter();
  528. generic_smp_call_function_single_interrupt();
  529. irq_exit();
  530. }
  531. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  532. /* Timer Routines */
  533. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  534. /*==========================================================================*
  535. * Name: smp_send_timer
  536. *
  537. * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
  538. * in the system.
  539. *
  540. * Born on Date: 2002.02.05
  541. *
  542. * Arguments: NONE
  543. *
  544. * Returns: void (cannot fail)
  545. *
  546. * Modification log:
  547. * Date Who Description
  548. * ---------- --- --------------------------------------------------------
  549. *
  550. *==========================================================================*/
  551. void smp_send_timer(void)
  552. {
  553. send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
  554. }
  555. /*==========================================================================*
  556. * Name: smp_send_timer
  557. *
  558. * Description: This routine executes on CPU which received
  559. * 'LOCAL_TIMER_IPI'.
  560. *
  561. * Born on Date: 2002.02.05
  562. *
  563. * Arguments: *regs - a pointer to the saved regster info
  564. *
  565. * Returns: void (cannot fail)
  566. *
  567. * Modification log:
  568. * Date Who Description
  569. * ---------- --- --------------------------------------------------------
  570. *
  571. *==========================================================================*/
  572. void smp_ipi_timer_interrupt(struct pt_regs *regs)
  573. {
  574. struct pt_regs *old_regs;
  575. old_regs = set_irq_regs(regs);
  576. irq_enter();
  577. smp_local_timer_interrupt();
  578. irq_exit();
  579. set_irq_regs(old_regs);
  580. }
  581. /*==========================================================================*
  582. * Name: smp_local_timer_interrupt
  583. *
  584. * Description: Local timer interrupt handler. It does both profiling and
  585. * process statistics/rescheduling.
  586. * We do profiling in every local tick, statistics/rescheduling
  587. * happen only every 'profiling multiplier' ticks. The default
  588. * multiplier is 1 and it can be changed by writing the new
  589. * multiplier value into /proc/profile.
  590. *
  591. * Born on Date: 2002.02.05
  592. *
  593. * Arguments: *regs - a pointer to the saved regster info
  594. *
  595. * Returns: void (cannot fail)
  596. *
  597. * Original: arch/i386/kernel/apic.c
  598. *
  599. * Modification log:
  600. * Date Who Description
  601. * ---------- --- --------------------------------------------------------
  602. * 2003-06-24 hy use per_cpu structure.
  603. *==========================================================================*/
  604. void smp_local_timer_interrupt(void)
  605. {
  606. int user = user_mode(get_irq_regs());
  607. int cpu_id = smp_processor_id();
  608. /*
  609. * The profiling function is SMP safe. (nothing can mess
  610. * around with "current", and the profiling counters are
  611. * updated with atomic operations). This is especially
  612. * useful with a profiling multiplier != 1
  613. */
  614. profile_tick(CPU_PROFILING);
  615. if (--per_cpu(prof_counter, cpu_id) <= 0) {
  616. /*
  617. * The multiplier may have changed since the last time we got
  618. * to this point as a result of the user writing to
  619. * /proc/profile. In this case we need to adjust the APIC
  620. * timer accordingly.
  621. *
  622. * Interrupts are already masked off at this point.
  623. */
  624. per_cpu(prof_counter, cpu_id)
  625. = per_cpu(prof_multiplier, cpu_id);
  626. if (per_cpu(prof_counter, cpu_id)
  627. != per_cpu(prof_old_multiplier, cpu_id))
  628. {
  629. per_cpu(prof_old_multiplier, cpu_id)
  630. = per_cpu(prof_counter, cpu_id);
  631. }
  632. update_process_times(user);
  633. }
  634. }
  635. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  636. /* Send IPI Routines */
  637. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  638. /*==========================================================================*
  639. * Name: send_IPI_allbutself
  640. *
  641. * Description: This routine sends a IPI to all other CPUs in the system.
  642. *
  643. * Born on Date: 2002.02.05
  644. *
  645. * Arguments: ipi_num - Number of IPI
  646. * try - 0 : Send IPI certainly.
  647. * !0 : The following IPI is not sent when Target CPU
  648. * has not received the before IPI.
  649. *
  650. * Returns: void (cannot fail)
  651. *
  652. * Modification log:
  653. * Date Who Description
  654. * ---------- --- --------------------------------------------------------
  655. *
  656. *==========================================================================*/
  657. static void send_IPI_allbutself(int ipi_num, int try)
  658. {
  659. cpumask_t cpumask;
  660. cpumask = cpu_online_map;
  661. cpu_clear(smp_processor_id(), cpumask);
  662. send_IPI_mask(&cpumask, ipi_num, try);
  663. }
  664. /*==========================================================================*
  665. * Name: send_IPI_mask
  666. *
  667. * Description: This routine sends a IPI to CPUs in the system.
  668. *
  669. * Born on Date: 2002.02.05
  670. *
  671. * Arguments: cpu_mask - Bitmap of target CPUs logical ID
  672. * ipi_num - Number of IPI
  673. * try - 0 : Send IPI certainly.
  674. * !0 : The following IPI is not sent when Target CPU
  675. * has not received the before IPI.
  676. *
  677. * Returns: void (cannot fail)
  678. *
  679. * Modification log:
  680. * Date Who Description
  681. * ---------- --- --------------------------------------------------------
  682. *
  683. *==========================================================================*/
  684. static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
  685. {
  686. cpumask_t physid_mask, tmp;
  687. int cpu_id, phys_id;
  688. int num_cpus = num_online_cpus();
  689. if (num_cpus <= 1) /* NO MP */
  690. return;
  691. cpumask_and(&tmp, cpumask, cpu_online_mask);
  692. BUG_ON(!cpumask_equal(cpumask, &tmp));
  693. physid_mask = CPU_MASK_NONE;
  694. for_each_cpu(cpu_id, cpumask) {
  695. if ((phys_id = cpu_to_physid(cpu_id)) != -1)
  696. cpu_set(phys_id, physid_mask);
  697. }
  698. send_IPI_mask_phys(physid_mask, ipi_num, try);
  699. }
  700. /*==========================================================================*
  701. * Name: send_IPI_mask_phys
  702. *
  703. * Description: This routine sends a IPI to other CPUs in the system.
  704. *
  705. * Born on Date: 2002.02.05
  706. *
  707. * Arguments: cpu_mask - Bitmap of target CPUs physical ID
  708. * ipi_num - Number of IPI
  709. * try - 0 : Send IPI certainly.
  710. * !0 : The following IPI is not sent when Target CPU
  711. * has not received the before IPI.
  712. *
  713. * Returns: IPICRi regster value.
  714. *
  715. * Modification log:
  716. * Date Who Description
  717. * ---------- --- --------------------------------------------------------
  718. *
  719. *==========================================================================*/
  720. unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
  721. int try)
  722. {
  723. spinlock_t *ipilock;
  724. volatile unsigned long *ipicr_addr;
  725. unsigned long ipicr_val;
  726. unsigned long my_physid_mask;
  727. unsigned long mask = cpus_addr(physid_mask)[0];
  728. if (mask & ~physids_coerce(phys_cpu_present_map))
  729. BUG();
  730. if (ipi_num >= NR_IPIS || ipi_num < 0)
  731. BUG();
  732. mask <<= IPI_SHIFT;
  733. ipilock = &ipi_lock[ipi_num];
  734. ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
  735. + (ipi_num << 2));
  736. my_physid_mask = ~(1 << smp_processor_id());
  737. /*
  738. * lock ipi_lock[i]
  739. * check IPICRi == 0
  740. * write IPICRi (send IPIi)
  741. * unlock ipi_lock[i]
  742. */
  743. spin_lock(ipilock);
  744. __asm__ __volatile__ (
  745. ";; CHECK IPICRi == 0 \n\t"
  746. ".fillinsn \n"
  747. "1: \n\t"
  748. "ld %0, @%1 \n\t"
  749. "and %0, %4 \n\t"
  750. "beqz %0, 2f \n\t"
  751. "bnez %3, 3f \n\t"
  752. "bra 1b \n\t"
  753. ";; WRITE IPICRi (send IPIi) \n\t"
  754. ".fillinsn \n"
  755. "2: \n\t"
  756. "st %2, @%1 \n\t"
  757. ".fillinsn \n"
  758. "3: \n\t"
  759. : "=&r"(ipicr_val)
  760. : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
  761. : "memory"
  762. );
  763. spin_unlock(ipilock);
  764. return ipicr_val;
  765. }