smp.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846
  1. /*
  2. * linux/arch/m32r/kernel/smp.c
  3. *
  4. * M32R SMP support routines.
  5. *
  6. * Copyright (c) 2001, 2002 Hitoshi Yamamoto
  7. *
  8. * Taken from i386 version.
  9. * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
  10. * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
  11. *
  12. * This code is released under the GNU General Public License version 2 or
  13. * later.
  14. */
  15. #undef DEBUG_SMP
  16. #include <linux/irq.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/profile.h>
  22. #include <linux/cpu.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/atomic.h>
  26. #include <asm/io.h>
  27. #include <asm/mmu_context.h>
  28. #include <asm/m32r.h>
  29. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  30. /* Data structures and variables */
  31. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  32. /*
  33. * For flush_cache_all()
  34. */
  35. static DEFINE_SPINLOCK(flushcache_lock);
  36. static volatile unsigned long flushcache_cpumask = 0;
  37. /*
  38. * For flush_tlb_others()
  39. */
  40. static volatile cpumask_t flush_cpumask;
  41. static struct mm_struct *flush_mm;
  42. static struct vm_area_struct *flush_vma;
  43. static volatile unsigned long flush_va;
  44. static DEFINE_SPINLOCK(tlbstate_lock);
  45. #define FLUSH_ALL 0xffffffff
  46. DECLARE_PER_CPU(int, prof_multiplier);
  47. DECLARE_PER_CPU(int, prof_old_multiplier);
  48. DECLARE_PER_CPU(int, prof_counter);
  49. extern spinlock_t ipi_lock[];
  50. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  51. /* Function Prototypes */
  52. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  53. void smp_send_reschedule(int);
  54. void smp_reschedule_interrupt(void);
  55. void smp_flush_cache_all(void);
  56. void smp_flush_cache_all_interrupt(void);
  57. void smp_flush_tlb_all(void);
  58. static void flush_tlb_all_ipi(void *);
  59. void smp_flush_tlb_mm(struct mm_struct *);
  60. void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \
  61. unsigned long);
  62. void smp_flush_tlb_page(struct vm_area_struct *, unsigned long);
  63. static void flush_tlb_others(cpumask_t, struct mm_struct *,
  64. struct vm_area_struct *, unsigned long);
  65. void smp_invalidate_interrupt(void);
  66. void smp_send_stop(void);
  67. static void stop_this_cpu(void *);
  68. void smp_send_timer(void);
  69. void smp_ipi_timer_interrupt(struct pt_regs *);
  70. void smp_local_timer_interrupt(void);
  71. static void send_IPI_allbutself(int, int);
  72. static void send_IPI_mask(cpumask_t, int, int);
  73. unsigned long send_IPI_mask_phys(cpumask_t, int, int);
  74. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  75. /* Rescheduling request Routines */
  76. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  77. /*==========================================================================*
  78. * Name: smp_send_reschedule
  79. *
  80. * Description: This routine requests other CPU to execute rescheduling.
  81. * 1.Send 'RESCHEDULE_IPI' to other CPU.
  82. * Request other CPU to execute 'smp_reschedule_interrupt()'.
  83. *
  84. * Born on Date: 2002.02.05
  85. *
  86. * Arguments: cpu_id - Target CPU ID
  87. *
  88. * Returns: void (cannot fail)
  89. *
  90. * Modification log:
  91. * Date Who Description
  92. * ---------- --- --------------------------------------------------------
  93. *
  94. *==========================================================================*/
  95. void smp_send_reschedule(int cpu_id)
  96. {
  97. WARN_ON(cpu_is_offline(cpu_id));
  98. send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1);
  99. }
  100. /*==========================================================================*
  101. * Name: smp_reschedule_interrupt
  102. *
  103. * Description: This routine executes on CPU which received
  104. * 'RESCHEDULE_IPI'.
  105. * Rescheduling is processed at the exit of interrupt
  106. * operation.
  107. *
  108. * Born on Date: 2002.02.05
  109. *
  110. * Arguments: NONE
  111. *
  112. * Returns: void (cannot fail)
  113. *
  114. * Modification log:
  115. * Date Who Description
  116. * ---------- --- --------------------------------------------------------
  117. *
  118. *==========================================================================*/
  119. void smp_reschedule_interrupt(void)
  120. {
  121. /* nothing to do */
  122. }
  123. /*==========================================================================*
  124. * Name: smp_flush_cache_all
  125. *
  126. * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other
  127. * CPUs in the system.
  128. *
  129. * Born on Date: 2003-05-28
  130. *
  131. * Arguments: NONE
  132. *
  133. * Returns: void (cannot fail)
  134. *
  135. * Modification log:
  136. * Date Who Description
  137. * ---------- --- --------------------------------------------------------
  138. *
  139. *==========================================================================*/
  140. void smp_flush_cache_all(void)
  141. {
  142. cpumask_t cpumask;
  143. unsigned long *mask;
  144. preempt_disable();
  145. cpumask = cpu_online_map;
  146. cpu_clear(smp_processor_id(), cpumask);
  147. spin_lock(&flushcache_lock);
  148. mask=cpus_addr(cpumask);
  149. atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask);
  150. send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0);
  151. _flush_cache_copyback_all();
  152. while (flushcache_cpumask)
  153. mb();
  154. spin_unlock(&flushcache_lock);
  155. preempt_enable();
  156. }
  157. void smp_flush_cache_all_interrupt(void)
  158. {
  159. _flush_cache_copyback_all();
  160. clear_bit(smp_processor_id(), &flushcache_cpumask);
  161. }
  162. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  163. /* TLB flush request Routines */
  164. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  165. /*==========================================================================*
  166. * Name: smp_flush_tlb_all
  167. *
  168. * Description: This routine flushes all processes TLBs.
  169. * 1.Request other CPU to execute 'flush_tlb_all_ipi()'.
  170. * 2.Execute 'do_flush_tlb_all_local()'.
  171. *
  172. * Born on Date: 2002.02.05
  173. *
  174. * Arguments: NONE
  175. *
  176. * Returns: void (cannot fail)
  177. *
  178. * Modification log:
  179. * Date Who Description
  180. * ---------- --- --------------------------------------------------------
  181. *
  182. *==========================================================================*/
  183. void smp_flush_tlb_all(void)
  184. {
  185. unsigned long flags;
  186. preempt_disable();
  187. local_irq_save(flags);
  188. __flush_tlb_all();
  189. local_irq_restore(flags);
  190. smp_call_function(flush_tlb_all_ipi, NULL, 1);
  191. preempt_enable();
  192. }
  193. /*==========================================================================*
  194. * Name: flush_tlb_all_ipi
  195. *
  196. * Description: This routine flushes all local TLBs.
  197. * 1.Execute 'do_flush_tlb_all_local()'.
  198. *
  199. * Born on Date: 2002.02.05
  200. *
  201. * Arguments: *info - not used
  202. *
  203. * Returns: void (cannot fail)
  204. *
  205. * Modification log:
  206. * Date Who Description
  207. * ---------- --- --------------------------------------------------------
  208. *
  209. *==========================================================================*/
  210. static void flush_tlb_all_ipi(void *info)
  211. {
  212. __flush_tlb_all();
  213. }
  214. /*==========================================================================*
  215. * Name: smp_flush_tlb_mm
  216. *
  217. * Description: This routine flushes the specified mm context TLB's.
  218. *
  219. * Born on Date: 2002.02.05
  220. *
  221. * Arguments: *mm - a pointer to the mm struct for flush TLB
  222. *
  223. * Returns: void (cannot fail)
  224. *
  225. * Modification log:
  226. * Date Who Description
  227. * ---------- --- --------------------------------------------------------
  228. *
  229. *==========================================================================*/
  230. void smp_flush_tlb_mm(struct mm_struct *mm)
  231. {
  232. int cpu_id;
  233. cpumask_t cpu_mask;
  234. unsigned long *mmc;
  235. unsigned long flags;
  236. preempt_disable();
  237. cpu_id = smp_processor_id();
  238. mmc = &mm->context[cpu_id];
  239. cpu_mask = mm->cpu_vm_mask;
  240. cpu_clear(cpu_id, cpu_mask);
  241. if (*mmc != NO_CONTEXT) {
  242. local_irq_save(flags);
  243. *mmc = NO_CONTEXT;
  244. if (mm == current->mm)
  245. activate_context(mm);
  246. else
  247. cpu_clear(cpu_id, mm->cpu_vm_mask);
  248. local_irq_restore(flags);
  249. }
  250. if (!cpus_empty(cpu_mask))
  251. flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL);
  252. preempt_enable();
  253. }
  254. /*==========================================================================*
  255. * Name: smp_flush_tlb_range
  256. *
  257. * Description: This routine flushes a range of pages.
  258. *
  259. * Born on Date: 2002.02.05
  260. *
  261. * Arguments: *mm - a pointer to the mm struct for flush TLB
  262. * start - not used
  263. * end - not used
  264. *
  265. * Returns: void (cannot fail)
  266. *
  267. * Modification log:
  268. * Date Who Description
  269. * ---------- --- --------------------------------------------------------
  270. *
  271. *==========================================================================*/
  272. void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
  273. unsigned long end)
  274. {
  275. smp_flush_tlb_mm(vma->vm_mm);
  276. }
  277. /*==========================================================================*
  278. * Name: smp_flush_tlb_page
  279. *
  280. * Description: This routine flushes one page.
  281. *
  282. * Born on Date: 2002.02.05
  283. *
  284. * Arguments: *vma - a pointer to the vma struct include va
  285. * va - virtual address for flush TLB
  286. *
  287. * Returns: void (cannot fail)
  288. *
  289. * Modification log:
  290. * Date Who Description
  291. * ---------- --- --------------------------------------------------------
  292. *
  293. *==========================================================================*/
  294. void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
  295. {
  296. struct mm_struct *mm = vma->vm_mm;
  297. int cpu_id;
  298. cpumask_t cpu_mask;
  299. unsigned long *mmc;
  300. unsigned long flags;
  301. preempt_disable();
  302. cpu_id = smp_processor_id();
  303. mmc = &mm->context[cpu_id];
  304. cpu_mask = mm->cpu_vm_mask;
  305. cpu_clear(cpu_id, cpu_mask);
  306. #ifdef DEBUG_SMP
  307. if (!mm)
  308. BUG();
  309. #endif
  310. if (*mmc != NO_CONTEXT) {
  311. local_irq_save(flags);
  312. va &= PAGE_MASK;
  313. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  314. __flush_tlb_page(va);
  315. local_irq_restore(flags);
  316. }
  317. if (!cpus_empty(cpu_mask))
  318. flush_tlb_others(cpu_mask, mm, vma, va);
  319. preempt_enable();
  320. }
  321. /*==========================================================================*
  322. * Name: flush_tlb_others
  323. *
  324. * Description: This routine requests other CPU to execute flush TLB.
  325. * 1.Setup parameters.
  326. * 2.Send 'INVALIDATE_TLB_IPI' to other CPU.
  327. * Request other CPU to execute 'smp_invalidate_interrupt()'.
  328. * 3.Wait for other CPUs operation finished.
  329. *
  330. * Born on Date: 2002.02.05
  331. *
  332. * Arguments: cpumask - bitmap of target CPUs
  333. * *mm - a pointer to the mm struct for flush TLB
  334. * *vma - a pointer to the vma struct include va
  335. * va - virtual address for flush TLB
  336. *
  337. * Returns: void (cannot fail)
  338. *
  339. * Modification log:
  340. * Date Who Description
  341. * ---------- --- --------------------------------------------------------
  342. *
  343. *==========================================================================*/
  344. static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
  345. struct vm_area_struct *vma, unsigned long va)
  346. {
  347. unsigned long *mask;
  348. #ifdef DEBUG_SMP
  349. unsigned long flags;
  350. __save_flags(flags);
  351. if (!(flags & 0x0040)) /* Interrupt Disable NONONO */
  352. BUG();
  353. #endif /* DEBUG_SMP */
  354. /*
  355. * A couple of (to be removed) sanity checks:
  356. *
  357. * - we do not send IPIs to not-yet booted CPUs.
  358. * - current CPU must not be in mask
  359. * - mask must exist :)
  360. */
  361. BUG_ON(cpus_empty(cpumask));
  362. BUG_ON(cpu_isset(smp_processor_id(), cpumask));
  363. BUG_ON(!mm);
  364. /* If a CPU which we ran on has gone down, OK. */
  365. cpus_and(cpumask, cpumask, cpu_online_map);
  366. if (cpus_empty(cpumask))
  367. return;
  368. /*
  369. * i'm not happy about this global shared spinlock in the
  370. * MM hot path, but we'll see how contended it is.
  371. * Temporarily this turns IRQs off, so that lockups are
  372. * detected by the NMI watchdog.
  373. */
  374. spin_lock(&tlbstate_lock);
  375. flush_mm = mm;
  376. flush_vma = vma;
  377. flush_va = va;
  378. mask=cpus_addr(cpumask);
  379. atomic_set_mask(*mask, (atomic_t *)&flush_cpumask);
  380. /*
  381. * We have to send the IPI only to
  382. * CPUs affected.
  383. */
  384. send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0);
  385. while (!cpus_empty(flush_cpumask)) {
  386. /* nothing. lockup detection does not belong here */
  387. mb();
  388. }
  389. flush_mm = NULL;
  390. flush_vma = NULL;
  391. flush_va = 0;
  392. spin_unlock(&tlbstate_lock);
  393. }
  394. /*==========================================================================*
  395. * Name: smp_invalidate_interrupt
  396. *
  397. * Description: This routine executes on CPU which received
  398. * 'INVALIDATE_TLB_IPI'.
  399. * 1.Flush local TLB.
  400. * 2.Report flush TLB process was finished.
  401. *
  402. * Born on Date: 2002.02.05
  403. *
  404. * Arguments: NONE
  405. *
  406. * Returns: void (cannot fail)
  407. *
  408. * Modification log:
  409. * Date Who Description
  410. * ---------- --- --------------------------------------------------------
  411. *
  412. *==========================================================================*/
  413. void smp_invalidate_interrupt(void)
  414. {
  415. int cpu_id = smp_processor_id();
  416. unsigned long *mmc = &flush_mm->context[cpu_id];
  417. if (!cpu_isset(cpu_id, flush_cpumask))
  418. return;
  419. if (flush_va == FLUSH_ALL) {
  420. *mmc = NO_CONTEXT;
  421. if (flush_mm == current->active_mm)
  422. activate_context(flush_mm);
  423. else
  424. cpu_clear(cpu_id, flush_mm->cpu_vm_mask);
  425. } else {
  426. unsigned long va = flush_va;
  427. if (*mmc != NO_CONTEXT) {
  428. va &= PAGE_MASK;
  429. va |= (*mmc & MMU_CONTEXT_ASID_MASK);
  430. __flush_tlb_page(va);
  431. }
  432. }
  433. cpu_clear(cpu_id, flush_cpumask);
  434. }
  435. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  436. /* Stop CPU request Routines */
  437. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  438. /*==========================================================================*
  439. * Name: smp_send_stop
  440. *
  441. * Description: This routine requests stop all CPUs.
  442. * 1.Request other CPU to execute 'stop_this_cpu()'.
  443. *
  444. * Born on Date: 2002.02.05
  445. *
  446. * Arguments: NONE
  447. *
  448. * Returns: void (cannot fail)
  449. *
  450. * Modification log:
  451. * Date Who Description
  452. * ---------- --- --------------------------------------------------------
  453. *
  454. *==========================================================================*/
  455. void smp_send_stop(void)
  456. {
  457. smp_call_function(stop_this_cpu, NULL, 0);
  458. }
  459. /*==========================================================================*
  460. * Name: stop_this_cpu
  461. *
  462. * Description: This routine halt CPU.
  463. *
  464. * Born on Date: 2002.02.05
  465. *
  466. * Arguments: NONE
  467. *
  468. * Returns: void (cannot fail)
  469. *
  470. * Modification log:
  471. * Date Who Description
  472. * ---------- --- --------------------------------------------------------
  473. *
  474. *==========================================================================*/
  475. static void stop_this_cpu(void *dummy)
  476. {
  477. int cpu_id = smp_processor_id();
  478. /*
  479. * Remove this CPU:
  480. */
  481. cpu_clear(cpu_id, cpu_online_map);
  482. /*
  483. * PSW IE = 1;
  484. * IMASK = 0;
  485. * goto SLEEP
  486. */
  487. local_irq_disable();
  488. outl(0, M32R_ICU_IMASK_PORTL);
  489. inl(M32R_ICU_IMASK_PORTL); /* dummy read */
  490. local_irq_enable();
  491. for ( ; ; );
  492. }
  493. void arch_send_call_function_ipi(cpumask_t mask)
  494. {
  495. send_IPI_mask(mask, CALL_FUNCTION_IPI, 0);
  496. }
  497. void arch_send_call_function_single_ipi(int cpu)
  498. {
  499. send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0);
  500. }
  501. /*==========================================================================*
  502. * Name: smp_call_function_interrupt
  503. *
  504. * Description: This routine executes on CPU which received
  505. * 'CALL_FUNCTION_IPI'.
  506. *
  507. * Born on Date: 2002.02.05
  508. *
  509. * Arguments: NONE
  510. *
  511. * Returns: void (cannot fail)
  512. *
  513. * Modification log:
  514. * Date Who Description
  515. * ---------- --- --------------------------------------------------------
  516. *
  517. *==========================================================================*/
  518. void smp_call_function_interrupt(void)
  519. {
  520. irq_enter();
  521. generic_smp_call_function_interrupt();
  522. irq_exit();
  523. }
  524. void smp_call_function_single_interrupt(void)
  525. {
  526. irq_enter();
  527. generic_smp_call_function_single_interrupt();
  528. irq_exit();
  529. }
  530. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  531. /* Timer Routines */
  532. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  533. /*==========================================================================*
  534. * Name: smp_send_timer
  535. *
  536. * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs
  537. * in the system.
  538. *
  539. * Born on Date: 2002.02.05
  540. *
  541. * Arguments: NONE
  542. *
  543. * Returns: void (cannot fail)
  544. *
  545. * Modification log:
  546. * Date Who Description
  547. * ---------- --- --------------------------------------------------------
  548. *
  549. *==========================================================================*/
  550. void smp_send_timer(void)
  551. {
  552. send_IPI_allbutself(LOCAL_TIMER_IPI, 1);
  553. }
  554. /*==========================================================================*
  555. * Name: smp_send_timer
  556. *
  557. * Description: This routine executes on CPU which received
  558. * 'LOCAL_TIMER_IPI'.
  559. *
  560. * Born on Date: 2002.02.05
  561. *
  562. * Arguments: *regs - a pointer to the saved regster info
  563. *
  564. * Returns: void (cannot fail)
  565. *
  566. * Modification log:
  567. * Date Who Description
  568. * ---------- --- --------------------------------------------------------
  569. *
  570. *==========================================================================*/
  571. void smp_ipi_timer_interrupt(struct pt_regs *regs)
  572. {
  573. struct pt_regs *old_regs;
  574. old_regs = set_irq_regs(regs);
  575. irq_enter();
  576. smp_local_timer_interrupt();
  577. irq_exit();
  578. set_irq_regs(old_regs);
  579. }
  580. /*==========================================================================*
  581. * Name: smp_local_timer_interrupt
  582. *
  583. * Description: Local timer interrupt handler. It does both profiling and
  584. * process statistics/rescheduling.
  585. * We do profiling in every local tick, statistics/rescheduling
  586. * happen only every 'profiling multiplier' ticks. The default
  587. * multiplier is 1 and it can be changed by writing the new
  588. * multiplier value into /proc/profile.
  589. *
  590. * Born on Date: 2002.02.05
  591. *
  592. * Arguments: *regs - a pointer to the saved regster info
  593. *
  594. * Returns: void (cannot fail)
  595. *
  596. * Original: arch/i386/kernel/apic.c
  597. *
  598. * Modification log:
  599. * Date Who Description
  600. * ---------- --- --------------------------------------------------------
  601. * 2003-06-24 hy use per_cpu structure.
  602. *==========================================================================*/
  603. void smp_local_timer_interrupt(void)
  604. {
  605. int user = user_mode(get_irq_regs());
  606. int cpu_id = smp_processor_id();
  607. /*
  608. * The profiling function is SMP safe. (nothing can mess
  609. * around with "current", and the profiling counters are
  610. * updated with atomic operations). This is especially
  611. * useful with a profiling multiplier != 1
  612. */
  613. profile_tick(CPU_PROFILING);
  614. if (--per_cpu(prof_counter, cpu_id) <= 0) {
  615. /*
  616. * The multiplier may have changed since the last time we got
  617. * to this point as a result of the user writing to
  618. * /proc/profile. In this case we need to adjust the APIC
  619. * timer accordingly.
  620. *
  621. * Interrupts are already masked off at this point.
  622. */
  623. per_cpu(prof_counter, cpu_id)
  624. = per_cpu(prof_multiplier, cpu_id);
  625. if (per_cpu(prof_counter, cpu_id)
  626. != per_cpu(prof_old_multiplier, cpu_id))
  627. {
  628. per_cpu(prof_old_multiplier, cpu_id)
  629. = per_cpu(prof_counter, cpu_id);
  630. }
  631. update_process_times(user);
  632. }
  633. }
  634. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  635. /* Send IPI Routines */
  636. /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
  637. /*==========================================================================*
  638. * Name: send_IPI_allbutself
  639. *
  640. * Description: This routine sends a IPI to all other CPUs in the system.
  641. *
  642. * Born on Date: 2002.02.05
  643. *
  644. * Arguments: ipi_num - Number of IPI
  645. * try - 0 : Send IPI certainly.
  646. * !0 : The following IPI is not sent when Target CPU
  647. * has not received the before IPI.
  648. *
  649. * Returns: void (cannot fail)
  650. *
  651. * Modification log:
  652. * Date Who Description
  653. * ---------- --- --------------------------------------------------------
  654. *
  655. *==========================================================================*/
  656. static void send_IPI_allbutself(int ipi_num, int try)
  657. {
  658. cpumask_t cpumask;
  659. cpumask = cpu_online_map;
  660. cpu_clear(smp_processor_id(), cpumask);
  661. send_IPI_mask(cpumask, ipi_num, try);
  662. }
  663. /*==========================================================================*
  664. * Name: send_IPI_mask
  665. *
  666. * Description: This routine sends a IPI to CPUs in the system.
  667. *
  668. * Born on Date: 2002.02.05
  669. *
  670. * Arguments: cpu_mask - Bitmap of target CPUs logical ID
  671. * ipi_num - Number of IPI
  672. * try - 0 : Send IPI certainly.
  673. * !0 : The following IPI is not sent when Target CPU
  674. * has not received the before IPI.
  675. *
  676. * Returns: void (cannot fail)
  677. *
  678. * Modification log:
  679. * Date Who Description
  680. * ---------- --- --------------------------------------------------------
  681. *
  682. *==========================================================================*/
  683. static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try)
  684. {
  685. cpumask_t physid_mask, tmp;
  686. int cpu_id, phys_id;
  687. int num_cpus = num_online_cpus();
  688. if (num_cpus <= 1) /* NO MP */
  689. return;
  690. cpus_and(tmp, cpumask, cpu_online_map);
  691. BUG_ON(!cpus_equal(cpumask, tmp));
  692. physid_mask = CPU_MASK_NONE;
  693. for_each_cpu_mask(cpu_id, cpumask){
  694. if ((phys_id = cpu_to_physid(cpu_id)) != -1)
  695. cpu_set(phys_id, physid_mask);
  696. }
  697. send_IPI_mask_phys(physid_mask, ipi_num, try);
  698. }
  699. /*==========================================================================*
  700. * Name: send_IPI_mask_phys
  701. *
  702. * Description: This routine sends a IPI to other CPUs in the system.
  703. *
  704. * Born on Date: 2002.02.05
  705. *
  706. * Arguments: cpu_mask - Bitmap of target CPUs physical ID
  707. * ipi_num - Number of IPI
  708. * try - 0 : Send IPI certainly.
  709. * !0 : The following IPI is not sent when Target CPU
  710. * has not received the before IPI.
  711. *
  712. * Returns: IPICRi regster value.
  713. *
  714. * Modification log:
  715. * Date Who Description
  716. * ---------- --- --------------------------------------------------------
  717. *
  718. *==========================================================================*/
  719. unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num,
  720. int try)
  721. {
  722. spinlock_t *ipilock;
  723. volatile unsigned long *ipicr_addr;
  724. unsigned long ipicr_val;
  725. unsigned long my_physid_mask;
  726. unsigned long mask = cpus_addr(physid_mask)[0];
  727. if (mask & ~physids_coerce(phys_cpu_present_map))
  728. BUG();
  729. if (ipi_num >= NR_IPIS)
  730. BUG();
  731. mask <<= IPI_SHIFT;
  732. ipilock = &ipi_lock[ipi_num];
  733. ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR
  734. + (ipi_num << 2));
  735. my_physid_mask = ~(1 << smp_processor_id());
  736. /*
  737. * lock ipi_lock[i]
  738. * check IPICRi == 0
  739. * write IPICRi (send IPIi)
  740. * unlock ipi_lock[i]
  741. */
  742. spin_lock(ipilock);
  743. __asm__ __volatile__ (
  744. ";; CHECK IPICRi == 0 \n\t"
  745. ".fillinsn \n"
  746. "1: \n\t"
  747. "ld %0, @%1 \n\t"
  748. "and %0, %4 \n\t"
  749. "beqz %0, 2f \n\t"
  750. "bnez %3, 3f \n\t"
  751. "bra 1b \n\t"
  752. ";; WRITE IPICRi (send IPIi) \n\t"
  753. ".fillinsn \n"
  754. "2: \n\t"
  755. "st %2, @%1 \n\t"
  756. ".fillinsn \n"
  757. "3: \n\t"
  758. : "=&r"(ipicr_val)
  759. : "r"(ipicr_addr), "r"(mask), "r"(try), "r"(my_physid_mask)
  760. : "memory"
  761. );
  762. spin_unlock(ipilock);
  763. return ipicr_val;
  764. }