tick-broadcast.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798
  1. /*
  2. * linux/kernel/time/tick-broadcast.c
  3. *
  4. * This file contains functions which emulate a local clock-event
  5. * device via a broadcast event source.
  6. *
  7. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  8. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  9. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  10. *
  11. * This code is licenced under the GPL version 2. For details see
  12. * kernel-base/COPYING.
  13. */
  14. #include <linux/cpu.h>
  15. #include <linux/err.h>
  16. #include <linux/hrtimer.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/percpu.h>
  19. #include <linux/profile.h>
  20. #include <linux/sched.h>
  21. #include <linux/smp.h>
  22. #include <linux/module.h>
  23. #include "tick-internal.h"
  24. /*
  25. * Broadcast support for broken x86 hardware, where the local apic
  26. * timer stops in C3 state.
  27. */
  28. static struct tick_device tick_broadcast_device;
  29. static cpumask_var_t tick_broadcast_mask;
  30. static cpumask_var_t tmpmask;
  31. static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
  32. static int tick_broadcast_force;
  33. #ifdef CONFIG_TICK_ONESHOT
  34. static void tick_broadcast_clear_oneshot(int cpu);
  35. #else
  36. static inline void tick_broadcast_clear_oneshot(int cpu) { }
  37. #endif
  38. /*
  39. * Debugging: see timer_list.c
  40. */
  41. struct tick_device *tick_get_broadcast_device(void)
  42. {
  43. return &tick_broadcast_device;
  44. }
  45. struct cpumask *tick_get_broadcast_mask(void)
  46. {
  47. return tick_broadcast_mask;
  48. }
  49. /*
  50. * Start the device in periodic mode
  51. */
  52. static void tick_broadcast_start_periodic(struct clock_event_device *bc)
  53. {
  54. if (bc)
  55. tick_setup_periodic(bc, 1);
  56. }
  57. /*
  58. * Check, if the device can be utilized as broadcast device:
  59. */
  60. void tick_install_broadcast_device(struct clock_event_device *dev)
  61. {
  62. struct clock_event_device *cur = tick_broadcast_device.evtdev;
  63. if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
  64. (tick_broadcast_device.evtdev &&
  65. tick_broadcast_device.evtdev->rating >= dev->rating) ||
  66. (dev->features & CLOCK_EVT_FEAT_C3STOP))
  67. return;
  68. if (!try_module_get(dev->owner))
  69. return;
  70. clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
  71. if (cur)
  72. cur->event_handler = clockevents_handle_noop;
  73. tick_broadcast_device.evtdev = dev;
  74. if (!cpumask_empty(tick_broadcast_mask))
  75. tick_broadcast_start_periodic(dev);
  76. /*
  77. * Inform all cpus about this. We might be in a situation
  78. * where we did not switch to oneshot mode because the per cpu
  79. * devices are affected by CLOCK_EVT_FEAT_C3STOP and the lack
  80. * of a oneshot capable broadcast device. Without that
  81. * notification the systems stays stuck in periodic mode
  82. * forever.
  83. */
  84. if (dev->features & CLOCK_EVT_FEAT_ONESHOT)
  85. tick_clock_notify();
  86. }
  87. /*
  88. * Check, if the device is the broadcast device
  89. */
  90. int tick_is_broadcast_device(struct clock_event_device *dev)
  91. {
  92. return (dev && tick_broadcast_device.evtdev == dev);
  93. }
  94. static void err_broadcast(const struct cpumask *mask)
  95. {
  96. pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
  97. }
  98. static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
  99. {
  100. if (!dev->broadcast)
  101. dev->broadcast = tick_broadcast;
  102. if (!dev->broadcast) {
  103. pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
  104. dev->name);
  105. dev->broadcast = err_broadcast;
  106. }
  107. }
  108. /*
  109. * Check, if the device is disfunctional and a place holder, which
  110. * needs to be handled by the broadcast device.
  111. */
  112. int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
  113. {
  114. unsigned long flags;
  115. int ret = 0;
  116. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  117. /*
  118. * Devices might be registered with both periodic and oneshot
  119. * mode disabled. This signals, that the device needs to be
  120. * operated from the broadcast device and is a placeholder for
  121. * the cpu local device.
  122. */
  123. if (!tick_device_is_functional(dev)) {
  124. dev->event_handler = tick_handle_periodic;
  125. tick_device_setup_broadcast_func(dev);
  126. cpumask_set_cpu(cpu, tick_broadcast_mask);
  127. tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
  128. ret = 1;
  129. } else {
  130. /*
  131. * When the new device is not affected by the stop
  132. * feature and the cpu is marked in the broadcast mask
  133. * then clear the broadcast bit.
  134. */
  135. if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
  136. int cpu = smp_processor_id();
  137. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  138. tick_broadcast_clear_oneshot(cpu);
  139. } else {
  140. tick_device_setup_broadcast_func(dev);
  141. }
  142. }
  143. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  144. return ret;
  145. }
  146. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  147. int tick_receive_broadcast(void)
  148. {
  149. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  150. struct clock_event_device *evt = td->evtdev;
  151. if (!evt)
  152. return -ENODEV;
  153. if (!evt->event_handler)
  154. return -EINVAL;
  155. evt->event_handler(evt);
  156. return 0;
  157. }
  158. #endif
  159. /*
  160. * Broadcast the event to the cpus, which are set in the mask (mangled).
  161. */
  162. static void tick_do_broadcast(struct cpumask *mask)
  163. {
  164. int cpu = smp_processor_id();
  165. struct tick_device *td;
  166. /*
  167. * Check, if the current cpu is in the mask
  168. */
  169. if (cpumask_test_cpu(cpu, mask)) {
  170. cpumask_clear_cpu(cpu, mask);
  171. td = &per_cpu(tick_cpu_device, cpu);
  172. td->evtdev->event_handler(td->evtdev);
  173. }
  174. if (!cpumask_empty(mask)) {
  175. /*
  176. * It might be necessary to actually check whether the devices
  177. * have different broadcast functions. For now, just use the
  178. * one of the first device. This works as long as we have this
  179. * misfeature only on x86 (lapic)
  180. */
  181. td = &per_cpu(tick_cpu_device, cpumask_first(mask));
  182. td->evtdev->broadcast(mask);
  183. }
  184. }
  185. /*
  186. * Periodic broadcast:
  187. * - invoke the broadcast handlers
  188. */
  189. static void tick_do_periodic_broadcast(void)
  190. {
  191. raw_spin_lock(&tick_broadcast_lock);
  192. cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
  193. tick_do_broadcast(tmpmask);
  194. raw_spin_unlock(&tick_broadcast_lock);
  195. }
  196. /*
  197. * Event handler for periodic broadcast ticks
  198. */
  199. static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
  200. {
  201. ktime_t next;
  202. tick_do_periodic_broadcast();
  203. /*
  204. * The device is in periodic mode. No reprogramming necessary:
  205. */
  206. if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
  207. return;
  208. /*
  209. * Setup the next period for devices, which do not have
  210. * periodic mode. We read dev->next_event first and add to it
  211. * when the event already expired. clockevents_program_event()
  212. * sets dev->next_event only when the event is really
  213. * programmed to the device.
  214. */
  215. for (next = dev->next_event; ;) {
  216. next = ktime_add(next, tick_period);
  217. if (!clockevents_program_event(dev, next, false))
  218. return;
  219. tick_do_periodic_broadcast();
  220. }
  221. }
  222. /*
  223. * Powerstate information: The system enters/leaves a state, where
  224. * affected devices might stop
  225. */
  226. static void tick_do_broadcast_on_off(unsigned long *reason)
  227. {
  228. struct clock_event_device *bc, *dev;
  229. struct tick_device *td;
  230. unsigned long flags;
  231. int cpu, bc_stopped;
  232. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  233. cpu = smp_processor_id();
  234. td = &per_cpu(tick_cpu_device, cpu);
  235. dev = td->evtdev;
  236. bc = tick_broadcast_device.evtdev;
  237. /*
  238. * Is the device not affected by the powerstate ?
  239. */
  240. if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
  241. goto out;
  242. if (!tick_device_is_functional(dev))
  243. goto out;
  244. bc_stopped = cpumask_empty(tick_broadcast_mask);
  245. switch (*reason) {
  246. case CLOCK_EVT_NOTIFY_BROADCAST_ON:
  247. case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
  248. if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_mask)) {
  249. if (tick_broadcast_device.mode ==
  250. TICKDEV_MODE_PERIODIC)
  251. clockevents_shutdown(dev);
  252. }
  253. if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
  254. tick_broadcast_force = 1;
  255. break;
  256. case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
  257. if (!tick_broadcast_force &&
  258. cpumask_test_and_clear_cpu(cpu, tick_broadcast_mask)) {
  259. if (tick_broadcast_device.mode ==
  260. TICKDEV_MODE_PERIODIC)
  261. tick_setup_periodic(dev, 0);
  262. }
  263. break;
  264. }
  265. if (cpumask_empty(tick_broadcast_mask)) {
  266. if (!bc_stopped)
  267. clockevents_shutdown(bc);
  268. } else if (bc_stopped) {
  269. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  270. tick_broadcast_start_periodic(bc);
  271. else
  272. tick_broadcast_setup_oneshot(bc);
  273. }
  274. out:
  275. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  276. }
  277. /*
  278. * Powerstate information: The system enters/leaves a state, where
  279. * affected devices might stop.
  280. */
  281. void tick_broadcast_on_off(unsigned long reason, int *oncpu)
  282. {
  283. if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
  284. printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
  285. "offline CPU #%d\n", *oncpu);
  286. else
  287. tick_do_broadcast_on_off(&reason);
  288. }
  289. /*
  290. * Set the periodic handler depending on broadcast on/off
  291. */
  292. void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
  293. {
  294. if (!broadcast)
  295. dev->event_handler = tick_handle_periodic;
  296. else
  297. dev->event_handler = tick_handle_periodic_broadcast;
  298. }
  299. /*
  300. * Remove a CPU from broadcasting
  301. */
  302. void tick_shutdown_broadcast(unsigned int *cpup)
  303. {
  304. struct clock_event_device *bc;
  305. unsigned long flags;
  306. unsigned int cpu = *cpup;
  307. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  308. bc = tick_broadcast_device.evtdev;
  309. cpumask_clear_cpu(cpu, tick_broadcast_mask);
  310. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
  311. if (bc && cpumask_empty(tick_broadcast_mask))
  312. clockevents_shutdown(bc);
  313. }
  314. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  315. }
  316. void tick_suspend_broadcast(void)
  317. {
  318. struct clock_event_device *bc;
  319. unsigned long flags;
  320. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  321. bc = tick_broadcast_device.evtdev;
  322. if (bc)
  323. clockevents_shutdown(bc);
  324. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  325. }
  326. int tick_resume_broadcast(void)
  327. {
  328. struct clock_event_device *bc;
  329. unsigned long flags;
  330. int broadcast = 0;
  331. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  332. bc = tick_broadcast_device.evtdev;
  333. if (bc) {
  334. clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
  335. switch (tick_broadcast_device.mode) {
  336. case TICKDEV_MODE_PERIODIC:
  337. if (!cpumask_empty(tick_broadcast_mask))
  338. tick_broadcast_start_periodic(bc);
  339. broadcast = cpumask_test_cpu(smp_processor_id(),
  340. tick_broadcast_mask);
  341. break;
  342. case TICKDEV_MODE_ONESHOT:
  343. if (!cpumask_empty(tick_broadcast_mask))
  344. broadcast = tick_resume_broadcast_oneshot(bc);
  345. break;
  346. }
  347. }
  348. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  349. return broadcast;
  350. }
  351. #ifdef CONFIG_TICK_ONESHOT
  352. static cpumask_var_t tick_broadcast_oneshot_mask;
  353. static cpumask_var_t tick_broadcast_pending_mask;
  354. static cpumask_var_t tick_broadcast_force_mask;
  355. /*
  356. * Exposed for debugging: see timer_list.c
  357. */
  358. struct cpumask *tick_get_broadcast_oneshot_mask(void)
  359. {
  360. return tick_broadcast_oneshot_mask;
  361. }
  362. /*
  363. * Called before going idle with interrupts disabled. Checks whether a
  364. * broadcast event from the other core is about to happen. We detected
  365. * that in tick_broadcast_oneshot_control(). The callsite can use this
  366. * to avoid a deep idle transition as we are about to get the
  367. * broadcast IPI right away.
  368. */
  369. int tick_check_broadcast_expired(void)
  370. {
  371. return cpumask_test_cpu(smp_processor_id(), tick_broadcast_force_mask);
  372. }
  373. /*
  374. * Set broadcast interrupt affinity
  375. */
  376. static void tick_broadcast_set_affinity(struct clock_event_device *bc,
  377. const struct cpumask *cpumask)
  378. {
  379. if (!(bc->features & CLOCK_EVT_FEAT_DYNIRQ))
  380. return;
  381. if (cpumask_equal(bc->cpumask, cpumask))
  382. return;
  383. bc->cpumask = cpumask;
  384. irq_set_affinity(bc->irq, bc->cpumask);
  385. }
  386. static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
  387. ktime_t expires, int force)
  388. {
  389. int ret;
  390. if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
  391. clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
  392. ret = clockevents_program_event(bc, expires, force);
  393. if (!ret)
  394. tick_broadcast_set_affinity(bc, cpumask_of(cpu));
  395. return ret;
  396. }
  397. int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
  398. {
  399. clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
  400. return 0;
  401. }
  402. /*
  403. * Called from irq_enter() when idle was interrupted to reenable the
  404. * per cpu device.
  405. */
  406. void tick_check_oneshot_broadcast(int cpu)
  407. {
  408. if (cpumask_test_cpu(cpu, tick_broadcast_oneshot_mask)) {
  409. struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
  410. clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
  411. }
  412. }
  413. /*
  414. * Handle oneshot mode broadcasting
  415. */
  416. static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
  417. {
  418. struct tick_device *td;
  419. ktime_t now, next_event;
  420. int cpu, next_cpu = 0;
  421. raw_spin_lock(&tick_broadcast_lock);
  422. again:
  423. dev->next_event.tv64 = KTIME_MAX;
  424. next_event.tv64 = KTIME_MAX;
  425. cpumask_clear(tmpmask);
  426. now = ktime_get();
  427. /* Find all expired events */
  428. for_each_cpu(cpu, tick_broadcast_oneshot_mask) {
  429. td = &per_cpu(tick_cpu_device, cpu);
  430. if (td->evtdev->next_event.tv64 <= now.tv64) {
  431. cpumask_set_cpu(cpu, tmpmask);
  432. /*
  433. * Mark the remote cpu in the pending mask, so
  434. * it can avoid reprogramming the cpu local
  435. * timer in tick_broadcast_oneshot_control().
  436. */
  437. cpumask_set_cpu(cpu, tick_broadcast_pending_mask);
  438. } else if (td->evtdev->next_event.tv64 < next_event.tv64) {
  439. next_event.tv64 = td->evtdev->next_event.tv64;
  440. next_cpu = cpu;
  441. }
  442. }
  443. /* Take care of enforced broadcast requests */
  444. cpumask_or(tmpmask, tmpmask, tick_broadcast_force_mask);
  445. cpumask_clear(tick_broadcast_force_mask);
  446. /*
  447. * Wakeup the cpus which have an expired event.
  448. */
  449. tick_do_broadcast(tmpmask);
  450. /*
  451. * Two reasons for reprogram:
  452. *
  453. * - The global event did not expire any CPU local
  454. * events. This happens in dyntick mode, as the maximum PIT
  455. * delta is quite small.
  456. *
  457. * - There are pending events on sleeping CPUs which were not
  458. * in the event mask
  459. */
  460. if (next_event.tv64 != KTIME_MAX) {
  461. /*
  462. * Rearm the broadcast device. If event expired,
  463. * repeat the above
  464. */
  465. if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
  466. goto again;
  467. }
  468. raw_spin_unlock(&tick_broadcast_lock);
  469. }
  470. /*
  471. * Powerstate information: The system enters/leaves a state, where
  472. * affected devices might stop
  473. */
  474. void tick_broadcast_oneshot_control(unsigned long reason)
  475. {
  476. struct clock_event_device *bc, *dev;
  477. struct tick_device *td;
  478. unsigned long flags;
  479. ktime_t now;
  480. int cpu;
  481. /*
  482. * Periodic mode does not care about the enter/exit of power
  483. * states
  484. */
  485. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  486. return;
  487. /*
  488. * We are called with preemtion disabled from the depth of the
  489. * idle code, so we can't be moved away.
  490. */
  491. cpu = smp_processor_id();
  492. td = &per_cpu(tick_cpu_device, cpu);
  493. dev = td->evtdev;
  494. if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
  495. return;
  496. bc = tick_broadcast_device.evtdev;
  497. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  498. if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
  499. WARN_ON_ONCE(cpumask_test_cpu(cpu, tick_broadcast_pending_mask));
  500. if (!cpumask_test_and_set_cpu(cpu, tick_broadcast_oneshot_mask)) {
  501. clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
  502. /*
  503. * We only reprogram the broadcast timer if we
  504. * did not mark ourself in the force mask and
  505. * if the cpu local event is earlier than the
  506. * broadcast event. If the current CPU is in
  507. * the force mask, then we are going to be
  508. * woken by the IPI right away.
  509. */
  510. if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
  511. dev->next_event.tv64 < bc->next_event.tv64)
  512. tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
  513. }
  514. } else {
  515. if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
  516. clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
  517. if (dev->next_event.tv64 == KTIME_MAX)
  518. goto out;
  519. /*
  520. * The cpu which was handling the broadcast
  521. * timer marked this cpu in the broadcast
  522. * pending mask and fired the broadcast
  523. * IPI. So we are going to handle the expired
  524. * event anyway via the broadcast IPI
  525. * handler. No need to reprogram the timer
  526. * with an already expired event.
  527. */
  528. if (cpumask_test_and_clear_cpu(cpu,
  529. tick_broadcast_pending_mask))
  530. goto out;
  531. /*
  532. * If the pending bit is not set, then we are
  533. * either the CPU handling the broadcast
  534. * interrupt or we got woken by something else.
  535. *
  536. * We are not longer in the broadcast mask, so
  537. * if the cpu local expiry time is already
  538. * reached, we would reprogram the cpu local
  539. * timer with an already expired event.
  540. *
  541. * This can lead to a ping-pong when we return
  542. * to idle and therefor rearm the broadcast
  543. * timer before the cpu local timer was able
  544. * to fire. This happens because the forced
  545. * reprogramming makes sure that the event
  546. * will happen in the future and depending on
  547. * the min_delta setting this might be far
  548. * enough out that the ping-pong starts.
  549. *
  550. * If the cpu local next_event has expired
  551. * then we know that the broadcast timer
  552. * next_event has expired as well and
  553. * broadcast is about to be handled. So we
  554. * avoid reprogramming and enforce that the
  555. * broadcast handler, which did not run yet,
  556. * will invoke the cpu local handler.
  557. *
  558. * We cannot call the handler directly from
  559. * here, because we might be in a NOHZ phase
  560. * and we did not go through the irq_enter()
  561. * nohz fixups.
  562. */
  563. now = ktime_get();
  564. if (dev->next_event.tv64 <= now.tv64) {
  565. cpumask_set_cpu(cpu, tick_broadcast_force_mask);
  566. goto out;
  567. }
  568. /*
  569. * We got woken by something else. Reprogram
  570. * the cpu local timer device.
  571. */
  572. tick_program_event(dev->next_event, 1);
  573. }
  574. }
  575. out:
  576. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  577. }
  578. /*
  579. * Reset the one shot broadcast for a cpu
  580. *
  581. * Called with tick_broadcast_lock held
  582. */
  583. static void tick_broadcast_clear_oneshot(int cpu)
  584. {
  585. cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
  586. }
  587. static void tick_broadcast_init_next_event(struct cpumask *mask,
  588. ktime_t expires)
  589. {
  590. struct tick_device *td;
  591. int cpu;
  592. for_each_cpu(cpu, mask) {
  593. td = &per_cpu(tick_cpu_device, cpu);
  594. if (td->evtdev)
  595. td->evtdev->next_event = expires;
  596. }
  597. }
  598. /**
  599. * tick_broadcast_setup_oneshot - setup the broadcast device
  600. */
  601. void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
  602. {
  603. int cpu = smp_processor_id();
  604. /* Set it up only once ! */
  605. if (bc->event_handler != tick_handle_oneshot_broadcast) {
  606. int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
  607. bc->event_handler = tick_handle_oneshot_broadcast;
  608. /* Take the do_timer update */
  609. if (!tick_nohz_full_cpu(cpu))
  610. tick_do_timer_cpu = cpu;
  611. /*
  612. * We must be careful here. There might be other CPUs
  613. * waiting for periodic broadcast. We need to set the
  614. * oneshot_mask bits for those and program the
  615. * broadcast device to fire.
  616. */
  617. cpumask_copy(tmpmask, tick_broadcast_mask);
  618. cpumask_clear_cpu(cpu, tmpmask);
  619. cpumask_or(tick_broadcast_oneshot_mask,
  620. tick_broadcast_oneshot_mask, tmpmask);
  621. if (was_periodic && !cpumask_empty(tmpmask)) {
  622. clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
  623. tick_broadcast_init_next_event(tmpmask,
  624. tick_next_period);
  625. tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
  626. } else
  627. bc->next_event.tv64 = KTIME_MAX;
  628. } else {
  629. /*
  630. * The first cpu which switches to oneshot mode sets
  631. * the bit for all other cpus which are in the general
  632. * (periodic) broadcast mask. So the bit is set and
  633. * would prevent the first broadcast enter after this
  634. * to program the bc device.
  635. */
  636. tick_broadcast_clear_oneshot(cpu);
  637. }
  638. }
  639. /*
  640. * Select oneshot operating mode for the broadcast device
  641. */
  642. void tick_broadcast_switch_to_oneshot(void)
  643. {
  644. struct clock_event_device *bc;
  645. unsigned long flags;
  646. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  647. tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
  648. bc = tick_broadcast_device.evtdev;
  649. if (bc)
  650. tick_broadcast_setup_oneshot(bc);
  651. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  652. }
  653. /*
  654. * Remove a dead CPU from broadcasting
  655. */
  656. void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
  657. {
  658. unsigned long flags;
  659. unsigned int cpu = *cpup;
  660. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  661. /*
  662. * Clear the broadcast mask flag for the dead cpu, but do not
  663. * stop the broadcast device!
  664. */
  665. cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
  666. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  667. }
  668. /*
  669. * Check, whether the broadcast device is in one shot mode
  670. */
  671. int tick_broadcast_oneshot_active(void)
  672. {
  673. return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
  674. }
  675. /*
  676. * Check whether the broadcast device supports oneshot.
  677. */
  678. bool tick_broadcast_oneshot_available(void)
  679. {
  680. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  681. return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
  682. }
  683. #endif
  684. void __init tick_broadcast_init(void)
  685. {
  686. zalloc_cpumask_var(&tick_broadcast_mask, GFP_NOWAIT);
  687. zalloc_cpumask_var(&tmpmask, GFP_NOWAIT);
  688. #ifdef CONFIG_TICK_ONESHOT
  689. zalloc_cpumask_var(&tick_broadcast_oneshot_mask, GFP_NOWAIT);
  690. zalloc_cpumask_var(&tick_broadcast_pending_mask, GFP_NOWAIT);
  691. zalloc_cpumask_var(&tick_broadcast_force_mask, GFP_NOWAIT);
  692. #endif
  693. }