smp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845
  1. /*
  2. * linux/arch/m32r/kernel/smp.c
  3. *
  4. * M32R SMP support routines.
  5. *
  6. * Copyright (c) 2001, 2002 Hitoshi Yamamoto
  7. *
  8. * Taken from i386 version.
  9. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  10. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  11. *
  12. * This code is released under the GNU General Public License version 2 or
  13. * later.
  14. */
  15. #undef DEBUG_SMP
  16. #include <linux/irq.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/sched.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/mm.h>
  21. #include <linux/smp.h>
  22. #include <linux/profile.h>
  23. #include <linux/cpu.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/pgalloc.h>
  26. #include <asm/atomic.h>
  27. #include <asm/io.h>
  28. #include <asm/mmu_context.h>
  29. #include <asm/m32r.h>
  30. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  31. /* Data structures and variables */
  32. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  33. /*
  34. * For flush_cache_all()
  35. */
  36. static DEFINE_SPINLOCK(flushcache_lock);
  37. static volatile unsigned long flushcache_cpumask = 0;
  38. /*
  39. * For flush_tlb_others()
  40. */
  41. static volatile cpumask_t flush_cpumask;
  42. static struct mm_struct *flush_mm;
  43. static struct vm_area_struct *flush_vma;
  44. static volatile unsigned long flush_va;
  45. static DEFINE_SPINLOCK(tlbstate_lock);
  46. #define FLUSH_ALL 0xffffffff
  47. DECLARE_PER_CPU(int, prof_multiplier);
  48. DECLARE_PER_CPU(int, prof_old_multiplier);
  49. DECLARE_PER_CPU(int, prof_counter);
  50. extern spinlock_t ipi_lock[];
  51. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  52. /* Function Prototypes */
  53. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  54. void smp_send_reschedule(int);
  55. void smp_reschedule_interrupt(void);
  56. void smp_flush_cache_all(void);
  57. void smp_flush_cache_all_interrupt(void);
  58. void smp_flush_tlb_all(void);
  59. static void flush_tlb_all_ipi(void *);
  60. void smp_flush_tlb_mm(struct mm_struct *);
  61. void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
  62. unsigned long);
  63. void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
  64. static void flush_tlb_others(cpumask_t, struct mm_struct *,
  65. struct vm_area_struct *, unsigned long);
  66. void smp_invalidate_interrupt(void);
  67. void smp_send_stop(void);
  68. static void stop_this_cpu(void *);
  69. void smp_send_timer(void);
  70. void smp_ipi_timer_interrupt(struct pt_regs *);
  71. void smp_local_timer_interrupt(void);
  72. static void send_IPI_allbutself(int, int);
  73. static void send_IPI_mask(const struct cpumask *, int, int);
  74. unsigned long send_IPI_mask_phys(cpumask_t, int, int);
  75. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  76. /* Rescheduling request Routines */
  77. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  78. /*==========================================================================*
  79. * Name: smp_send_reschedule
  80. *
  81. * Description: This routine requests other CPU to execute rescheduling.
  82. * 1.Send 'RESCHEDULE_IPI' to other CPU.
  83. * Request other CPU to execute 'smp_reschedule_interrupt()'.
  84. *
  85. * Born on Date: 2002.02.05
  86. *
  87. * Arguments: cpu_id - Target CPU ID
  88. *
  89. * Returns: void (cannot fail)
  90. *
  91. * Modification log:
  92. * Date Who Description
  93. * ---------- --- --------------------------------------------------------
  94. *
  95. *==========================================================================*/
  96. void smp_send_reschedule(int cpu_id)
  97. {
  98. WARN_ON(cpu_is_offline(cpu_id));
  99. send_IPI_mask(cpumask_of(cpu_id), RESCHEDULE_IPI, 1);
  100. }
  101. /*==========================================================================*
  102. * Name: smp_reschedule_interrupt
  103. *
  104. * Description: This routine executes on CPU which received
  105. * 'RESCHEDULE_IPI'.
  106. *
  107. * Born on Date: 2002.02.05
  108. *
  109. * Arguments: NONE
  110. *
  111. * Returns: void (cannot fail)
  112. *
  113. * Modification log:
  114. * Date Who Description
  115. * ---------- --- --------------------------------------------------------
  116. *
  117. *==========================================================================*/
  118. void smp_reschedule_interrupt(void)
  119. {
  120. scheduler_ipi();
  121. }
  122. /*==========================================================================*
  123. * Name: smp_flush_cache_all
  124. *
  125. * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
  126. * CPUs in the system.
  127. *
  128. * Born on Date: 2003-05-28
  129. *
  130. * Arguments: NONE
  131. *
  132. * Returns: void (cannot fail)
  133. *
  134. * Modification log:
  135. * Date Who Description
  136. * ---------- --- --------------------------------------------------------
  137. *
  138. *==========================================================================*/
  139. void smp_flush_cache_all(void)
  140. {
  141. cpumask_t cpumask;
  142. unsigned long *mask;
  143. preempt_disable();
  144. cpumask = cpu_online_map;
  145. cpu_clear(smp_processor_id(), cpumask);
  146. spin_lock(&flushcache_lock);
  147. mask=cpus_addr(cpumask);
  148. atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
  149. send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0);
  150. _flush_cache_copyback_all();
  151. while (flushcache_cpumask)
  152. mb();
  153. spin_unlock(&flushcache_lock);
  154. preempt_enable();
  155. }
  156. void smp_flush_cache_all_interrupt(void)
  157. {
  158. _flush_cache_copyback_all();
  159. clear_bit(smp_processor_id(), &flushcache_cpumask);
  160. }
  161. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  162. /* TLB flush request Routines */
  163. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  164. /*==========================================================================*
  165. * Name: smp_flush_tlb_all
  166. *
  167. * Description: This routine flushes all processes TLBs.
  168. * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
  169. * 2.Execute 'do_flush_tlb_all_local()'.
  170. *
  171. * Born on Date: 2002.02.05
  172. *
  173. * Arguments: NONE
  174. *
  175. * Returns: void (cannot fail)
  176. *
  177. * Modification log:
  178. * Date Who Description
  179. * ---------- --- --------------------------------------------------------
  180. *
  181. *==========================================================================*/
  182. void smp_flush_tlb_all(void)
  183. {
  184. unsigned long flags;
  185. preempt_disable();
  186. local_irq_save(flags);
  187. __flush_tlb_all();
  188. local_irq_restore(flags);
  189. smp_call_function(flush_tlb_all_ipi, NULL, 1);
  190. preempt_enable();
  191. }
  192. /*==========================================================================*
  193. * Name: flush_tlb_all_ipi
  194. *
  195. * Description: This routine flushes all local TLBs.
  196. * 1.Execute 'do_flush_tlb_all_local()'.
  197. *
  198. * Born on Date: 2002.02.05
  199. *
  200. * Arguments: *info - not used
  201. *
  202. * Returns: void (cannot fail)
  203. *
  204. * Modification log:
  205. * Date Who Description
  206. * ---------- --- --------------------------------------------------------
  207. *
  208. *==========================================================================*/
  209. static void flush_tlb_all_ipi(void *info)
  210. {
  211. __flush_tlb_all();
  212. }
  213. /*==========================================================================*
  214. * Name: smp_flush_tlb_mm
  215. *
  216. * Description: This routine flushes the specified mm context TLB's.
  217. *
  218. * Born on Date: 2002.02.05
  219. *
  220. * Arguments: *mm - a pointer to the mm struct for flush TLB
  221. *
  222. * Returns: void (cannot fail)
  223. *
  224. * Modification log:
  225. * Date Who Description
  226. * ---------- --- --------------------------------------------------------
  227. *
  228. *==========================================================================*/
  229. void smp_flush_tlb_mm(struct mm_struct *mm)
  230. {
  231. int cpu_id;
  232. cpumask_t cpu_mask;
  233. unsigned long *mmc;
  234. unsigned long flags;
  235. preempt_disable();
  236. cpu_id = smp_processor_id();
  237. mmc = &mm->context[cpu_id];
  238. cpu_mask = *mm_cpumask(mm);
  239. cpu_clear(cpu_id, cpu_mask);
  240. if (*mmc != NO_CONTEXT) {
  241. local_irq_save(flags);
  242. *mmc = NO_CONTEXT;
  243. if (mm == current->mm)
  244. activate_context(mm);
  245. else
  246. cpumask_clear_cpu(cpu_id, mm_cpumask(mm));
  247. local_irq_restore(flags);
  248. }
  249. if (!cpus_empty(cpu_mask))
  250. flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
  251. preempt_enable();
  252. }
  253. /*==========================================================================*
  254. * Name: smp_flush_tlb_range
  255. *
  256. * Description: This routine flushes a range of pages.
  257. *
  258. * Born on Date: 2002.02.05
  259. *
  260. * Arguments: *mm - a pointer to the mm struct for flush TLB
  261. * start - not used
  262. * end - not used
  263. *
  264. * Returns: void (cannot fail)
  265. *
  266. * Modification log:
  267. * Date Who Description
  268. * ---------- --- --------------------------------------------------------
  269. *
  270. *==========================================================================*/
  271. void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  272. unsigned long end)
  273. {
  274. smp_flush_tlb_mm(vma->vm_mm);
  275. }
  276. /*==========================================================================*
  277. * Name: smp_flush_tlb_page
  278. *
  279. * Description: This routine flushes one page.
  280. *
  281. * Born on Date: 2002.02.05
  282. *
  283. * Arguments: *vma - a pointer to the vma struct include va
  284. * va - virtual address for flush TLB
  285. *
  286. * Returns: void (cannot fail)
  287. *
  288. * Modification log:
  289. * Date Who Description
  290. * ---------- --- --------------------------------------------------------
  291. *
  292. *==========================================================================*/
  293. void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  294. {
  295. struct mm_struct *mm = vma->vm_mm;
  296. int cpu_id;
  297. cpumask_t cpu_mask;
  298. unsigned long *mmc;
  299. unsigned long flags;
  300. preempt_disable();
  301. cpu_id = smp_processor_id();
  302. mmc = &mm->context[cpu_id];
  303. cpu_mask = *mm_cpumask(mm);
  304. cpu_clear(cpu_id, cpu_mask);
  305. #ifdef DEBUG_SMP
  306. if (!mm)
  307. BUG();
  308. #endif
  309. if (*mmc != NO_CONTEXT) {
  310. local_irq_save(flags);
  311. va &= PAGE_MASK;
  312. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  313. __flush_tlb_page(va);
  314. local_irq_restore(flags);
  315. }
  316. if (!cpus_empty(cpu_mask))
  317. flush_tlb_others(cpu_mask, mm, vma, va);
  318. preempt_enable();
  319. }
  320. /*==========================================================================*
  321. * Name: flush_tlb_others
  322. *
  323. * Description: This routine requests other CPU to execute flush TLB.
  324. * 1.Setup parameters.
  325. * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
  326. * Request other CPU to execute 'smp_invalidate_interrupt()'.
  327. * 3.Wait for other CPUs operation finished.
  328. *
  329. * Born on Date: 2002.02.05
  330. *
  331. * Arguments: cpumask - bitmap of target CPUs
  332. * *mm - a pointer to the mm struct for flush TLB
  333. * *vma - a pointer to the vma struct include va
  334. * va - virtual address for flush TLB
  335. *
  336. * Returns: void (cannot fail)
  337. *
  338. * Modification log:
  339. * Date Who Description
  340. * ---------- --- --------------------------------------------------------
  341. *
  342. *==========================================================================*/
  343. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  344. struct vm_area_struct *vma, unsigned long va)
  345. {
  346. unsigned long *mask;
  347. #ifdef DEBUG_SMP
  348. unsigned long flags;
  349. __save_flags(flags);
  350. if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
  351. BUG();
  352. #endif /* DEBUG_SMP */
  353. /*
  354. * A couple of (to be removed) sanity checks:
  355. *
  356. * - we do not send IPIs to not-yet booted CPUs.
  357. * - current CPU must not be in mask
  358. * - mask must exist :)
  359. */
  360. BUG_ON(cpus_empty(cpumask));
  361. BUG_ON(cpu_isset(smp_processor_id(), cpumask));
  362. BUG_ON(!mm);
  363. /* If a CPU which we ran on has gone down, OK. */
  364. cpus_and(cpumask, cpumask, cpu_online_map);
  365. if (cpus_empty(cpumask))
  366. return;
  367. /*
  368. * i'm not happy about this global shared spinlock in the
  369. * MM hot path, but we'll see how contended it is.
  370. * Temporarily this turns IRQs off, so that lockups are
  371. * detected by the NMI watchdog.
  372. */
  373. spin_lock(&tlbstate_lock);
  374. flush_mm = mm;
  375. flush_vma = vma;
  376. flush_va = va;
  377. mask=cpus_addr(cpumask);
  378. atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
  379. /*
  380. * We have to send the IPI only to
  381. * CPUs affected.
  382. */
  383. send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
  384. while (!cpus_empty(flush_cpumask)) {
  385. /* nothing. lockup detection does not belong here */
  386. mb();
  387. }
  388. flush_mm = NULL;
  389. flush_vma = NULL;
  390. flush_va = 0;
  391. spin_unlock(&tlbstate_lock);
  392. }
  393. /*==========================================================================*
  394. * Name: smp_invalidate_interrupt
  395. *
  396. * Description: This routine executes on CPU which received
  397. * 'INVALIDATE_TLB_IPI'.
  398. * 1.Flush local TLB.
  399. * 2.Report flush TLB process was finished.
  400. *
  401. * Born on Date: 2002.02.05
  402. *
  403. * Arguments: NONE
  404. *
  405. * Returns: void (cannot fail)
  406. *
  407. * Modification log:
  408. * Date Who Description
  409. * ---------- --- --------------------------------------------------------
  410. *
  411. *==========================================================================*/
  412. void smp_invalidate_interrupt(void)
  413. {
  414. int cpu_id = smp_processor_id();
  415. unsigned long *mmc = &flush_mm->context[cpu_id];
  416. if (!cpu_isset(cpu_id, flush_cpumask))
  417. return;
  418. if (flush_va == FLUSH_ALL) {
  419. *mmc = NO_CONTEXT;
  420. if (flush_mm == current->active_mm)
  421. activate_context(flush_mm);
  422. else
  423. cpumask_clear_cpu(cpu_id, mm_cpumask(flush_mm));
  424. } else {
  425. unsigned long va = flush_va;
  426. if (*mmc != NO_CONTEXT) {
  427. va &= PAGE_MASK;
  428. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  429. __flush_tlb_page(va);
  430. }
  431. }
  432. cpu_clear(cpu_id, flush_cpumask);
  433. }
  434. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  435. /* Stop CPU request Routines */
  436. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  437. /*==========================================================================*
  438. * Name: smp_send_stop
  439. *
  440. * Description: This routine requests stop all CPUs.
  441. * 1.Request other CPU to execute 'stop_this_cpu()'.
  442. *
  443. * Born on Date: 2002.02.05
  444. *
  445. * Arguments: NONE
  446. *
  447. * Returns: void (cannot fail)
  448. *
  449. * Modification log:
  450. * Date Who Description
  451. * ---------- --- --------------------------------------------------------
  452. *
  453. *==========================================================================*/
  454. void smp_send_stop(void)
  455. {
  456. smp_call_function(stop_this_cpu, NULL, 0);
  457. }
  458. /*==========================================================================*
  459. * Name: stop_this_cpu
  460. *
  461. * Description: This routine halt CPU.
  462. *
  463. * Born on Date: 2002.02.05
  464. *
  465. * Arguments: NONE
  466. *
  467. * Returns: void (cannot fail)
  468. *
  469. * Modification log:
  470. * Date Who Description
  471. * ---------- --- --------------------------------------------------------
  472. *
  473. *==========================================================================*/
  474. static void stop_this_cpu(void *dummy)
  475. {
  476. int cpu_id = smp_processor_id();
  477. /*
  478. * Remove this CPU:
  479. */
  480. cpu_clear(cpu_id, cpu_online_map);
  481. /*
  482. * PSW IE = 1;
  483. * IMASK = 0;
  484. * goto SLEEP
  485. */
  486. local_irq_disable();
  487. outl(0, M32R_ICU_IMASK_PORTL);
  488. inl(M32R_ICU_IMASK_PORTL); /* dummy read */
  489. local_irq_enable();
  490. for ( ; ; );
  491. }
  492. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  493. {
  494. send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
  495. }
  496. void arch_send_call_function_single_ipi(int cpu)
  497. {
  498. send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI, 0);
  499. }
  500. /*==========================================================================*
  501. * Name: smp_call_function_interrupt
  502. *
  503. * Description: This routine executes on CPU which received
  504. * 'CALL_FUNCTION_IPI'.
  505. *
  506. * Born on Date: 2002.02.05
  507. *
  508. * Arguments: NONE
  509. *
  510. * Returns: void (cannot fail)
  511. *
  512. * Modification log:
  513. * Date Who Description
  514. * ---------- --- --------------------------------------------------------
  515. *
  516. *==========================================================================*/
  517. void smp_call_function_interrupt(void)
  518. {
  519. irq_enter();
  520. generic_smp_call_function_interrupt();
  521. irq_exit();
  522. }
  523. void smp_call_function_single_interrupt(void)
  524. {
  525. irq_enter();
  526. generic_smp_call_function_single_interrupt();
  527. irq_exit();
  528. }
  529. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  530. /* Timer Routines */
  531. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  532. /*==========================================================================*
  533. * Name: smp_send_timer
  534. *
  535. * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
  536. * in the system.
  537. *
  538. * Born on Date: 2002.02.05
  539. *
  540. * Arguments: NONE
  541. *
  542. * Returns: void (cannot fail)
  543. *
  544. * Modification log:
  545. * Date Who Description
  546. * ---------- --- --------------------------------------------------------
  547. *
  548. *==========================================================================*/
  549. void smp_send_timer(void)
  550. {
  551. send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
  552. }
  553. /*==========================================================================*
  554. * Name: smp_send_timer
  555. *
  556. * Description: This routine executes on CPU which received
  557. * 'LOCAL_TIMER_IPI'.
  558. *
  559. * Born on Date: 2002.02.05
  560. *
  561. * Arguments: *regs - a pointer to the saved regster info
  562. *
  563. * Returns: void (cannot fail)
  564. *
  565. * Modification log:
  566. * Date Who Description
  567. * ---------- --- --------------------------------------------------------
  568. *
  569. *==========================================================================*/
  570. void smp_ipi_timer_interrupt(struct pt_regs *regs)
  571. {
  572. struct pt_regs *old_regs;
  573. old_regs = set_irq_regs(regs);
  574. irq_enter();
  575. smp_local_timer_interrupt();
  576. irq_exit();
  577. set_irq_regs(old_regs);
  578. }
  579. /*==========================================================================*
  580. * Name: smp_local_timer_interrupt
  581. *
  582. * Description: Local timer interrupt handler. It does both profiling and
  583. * process statistics/rescheduling.
  584. * We do profiling in every local tick, statistics/rescheduling
  585. * happen only every 'profiling multiplier' ticks. The default
  586. * multiplier is 1 and it can be changed by writing the new
  587. * multiplier value into /proc/profile.
  588. *
  589. * Born on Date: 2002.02.05
  590. *
  591. * Arguments: *regs - a pointer to the saved regster info
  592. *
  593. * Returns: void (cannot fail)
  594. *
  595. * Original: arch/i386/kernel/apic.c
  596. *
  597. * Modification log:
  598. * Date Who Description
  599. * ---------- --- --------------------------------------------------------
  600. * 2003-06-24 hy use per_cpu structure.
  601. *==========================================================================*/
  602. void smp_local_timer_interrupt(void)
  603. {
  604. int user = user_mode(get_irq_regs());
  605. int cpu_id = smp_processor_id();
  606. /*
  607. * The profiling function is SMP safe. (nothing can mess
  608. * around with "current", and the profiling counters are
  609. * updated with atomic operations). This is especially
  610. * useful with a profiling multiplier != 1
  611. */
  612. profile_tick(CPU_PROFILING);
  613. if (--per_cpu(prof_counter, cpu_id) <= 0) {
  614. /*
  615. * The multiplier may have changed since the last time we got
  616. * to this point as a result of the user writing to
  617. * /proc/profile. In this case we need to adjust the APIC
  618. * timer accordingly.
  619. *
  620. * Interrupts are already masked off at this point.
  621. */
  622. per_cpu(prof_counter, cpu_id)
  623. = per_cpu(prof_multiplier, cpu_id);
  624. if (per_cpu(prof_counter, cpu_id)
  625. != per_cpu(prof_old_multiplier, cpu_id))
  626. {
  627. per_cpu(prof_old_multiplier, cpu_id)
  628. = per_cpu(prof_counter, cpu_id);
  629. }
  630. update_process_times(user);
  631. }
  632. }
  633. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  634. /* Send IPI Routines */
  635. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  636. /*==========================================================================*
  637. * Name: send_IPI_allbutself
  638. *
  639. * Description: This routine sends a IPI to all other CPUs in the system.
  640. *
  641. * Born on Date: 2002.02.05
  642. *
  643. * Arguments: ipi_num - Number of IPI
  644. * try - 0 : Send IPI certainly.
  645. * !0 : The following IPI is not sent when Target CPU
  646. * has not received the before IPI.
  647. *
  648. * Returns: void (cannot fail)
  649. *
  650. * Modification log:
  651. * Date Who Description
  652. * ---------- --- --------------------------------------------------------
  653. *
  654. *==========================================================================*/
  655. static void send_IPI_allbutself(int ipi_num, int try)
  656. {
  657. cpumask_t cpumask;
  658. cpumask = cpu_online_map;
  659. cpu_clear(smp_processor_id(), cpumask);
  660. send_IPI_mask(&cpumask, ipi_num, try);
  661. }
  662. /*==========================================================================*
  663. * Name: send_IPI_mask
  664. *
  665. * Description: This routine sends a IPI to CPUs in the system.
  666. *
  667. * Born on Date: 2002.02.05
  668. *
  669. * Arguments: cpu_mask - Bitmap of target CPUs logical ID
  670. * ipi_num - Number of IPI
  671. * try - 0 : Send IPI certainly.
  672. * !0 : The following IPI is not sent when Target CPU
  673. * has not received the before IPI.
  674. *
  675. * Returns: void (cannot fail)
  676. *
  677. * Modification log:
  678. * Date Who Description
  679. * ---------- --- --------------------------------------------------------
  680. *
  681. *==========================================================================*/
  682. static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try)
  683. {
  684. cpumask_t physid_mask, tmp;
  685. int cpu_id, phys_id;
  686. int num_cpus = num_online_cpus();
  687. if (num_cpus <= 1) /* NO MP */
  688. return;
  689. cpumask_and(&tmp, cpumask, cpu_online_mask);
  690. BUG_ON(!cpumask_equal(cpumask, &tmp));
  691. physid_mask = CPU_MASK_NONE;
  692. for_each_cpu(cpu_id, cpumask) {
  693. if ((phys_id = cpu_to_physid(cpu_id)) != -1)
  694. cpu_set(phys_id, physid_mask);
  695. }
  696. send_IPI_mask_phys(physid_mask, ipi_num, try);
  697. }
  698. /*==========================================================================*
  699. * Name: send_IPI_mask_phys
  700. *
  701. * Description: This routine sends a IPI to other CPUs in the system.
  702. *
  703. * Born on Date: 2002.02.05
  704. *
  705. * Arguments: cpu_mask - Bitmap of target CPUs physical ID
  706. * ipi_num - Number of IPI
  707. * try - 0 : Send IPI certainly.
  708. * !0 : The following IPI is not sent when Target CPU
  709. * has not received the before IPI.
  710. *
  711. * Returns: IPICRi regster value.
  712. *
  713. * Modification log:
  714. * Date Who Description
  715. * ---------- --- --------------------------------------------------------
  716. *
  717. *==========================================================================*/
  718. unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
  719. int try)
  720. {
  721. spinlock_t *ipilock;
  722. volatile unsigned long *ipicr_addr;
  723. unsigned long ipicr_val;
  724. unsigned long my_physid_mask;
  725. unsigned long mask = cpus_addr(physid_mask)[0];
  726. if (mask & ~physids_coerce(phys_cpu_present_map))
  727. BUG();
  728. if (ipi_num >= NR_IPIS || ipi_num < 0)
  729. BUG();
  730. mask <<= IPI_SHIFT;
  731. ipilock = &ipi_lock[ipi_num];
  732. ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
  733. + (ipi_num << 2));
  734. my_physid_mask = ~(1 << smp_processor_id());
  735. /*
  736. * lock ipi_lock[i]
  737. * check IPICRi == 0
  738. * write IPICRi (send IPIi)
  739. * unlock ipi_lock[i]
  740. */
  741. spin_lock(ipilock);
  742. __asm__ __volatile__ (
  743. ";; CHECK IPICRi == 0 \n\t"
  744. ".fillinsn \n"
  745. "1: \n\t"
  746. "ld %0, @%1 \n\t"
  747. "and %0, %4 \n\t"
  748. "beqz %0, 2f \n\t"
  749. "bnez %3, 3f \n\t"
  750. "bra 1b \n\t"
  751. ";; WRITE IPICRi (send IPIi) \n\t"
  752. ".fillinsn \n"
  753. "2: \n\t"
  754. "st %2, @%1 \n\t"
  755. ".fillinsn \n"
  756. "3: \n\t"
  757. : "=&r"(ipicr_val)
  758. : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
  759. : "memory"
  760. );
  761. spin_unlock(ipilock);
  762. return ipicr_val;
  763. }