tick-broadcast.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666
  1. /*
  2. * linux/kernel/time/tick-broadcast.c
  3. *
  4. * This file contains functions which emulate a local clock-event
  5. * device via a broadcast event source.
  6. *
  7. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  8. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  9. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  10. *
  11. * This code is licenced under the GPL version 2. For details see
  12. * kernel-base/COPYING.
  13. */
  14. #include <linux/cpu.h>
  15. #include <linux/err.h>
  16. #include <linux/hrtimer.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/percpu.h>
  19. #include <linux/profile.h>
  20. #include <linux/sched.h>
  21. #include <linux/smp.h>
  22. #include "tick-internal.h"
  23. /*
  24. * Broadcast support for broken x86 hardware, where the local apic
  25. * timer stops in C3 state.
  26. */
  27. static struct tick_device tick_broadcast_device;
  28. /* FIXME: Use cpumask_var_t. */
  29. static DECLARE_BITMAP(tick_broadcast_mask, NR_CPUS);
  30. static DECLARE_BITMAP(tmpmask, NR_CPUS);
  31. static DEFINE_RAW_SPINLOCK(tick_broadcast_lock);
  32. static int tick_broadcast_force;
  33. #ifdef CONFIG_TICK_ONESHOT
  34. static void tick_broadcast_clear_oneshot(int cpu);
  35. #else
  36. static inline void tick_broadcast_clear_oneshot(int cpu) { }
  37. #endif
  38. /*
  39. * Debugging: see timer_list.c
  40. */
  41. struct tick_device *tick_get_broadcast_device(void)
  42. {
  43. return &tick_broadcast_device;
  44. }
  45. struct cpumask *tick_get_broadcast_mask(void)
  46. {
  47. return to_cpumask(tick_broadcast_mask);
  48. }
  49. /*
  50. * Start the device in periodic mode
  51. */
  52. static void tick_broadcast_start_periodic(struct clock_event_device *bc)
  53. {
  54. if (bc)
  55. tick_setup_periodic(bc, 1);
  56. }
  57. /*
  58. * Check, if the device can be utilized as broadcast device:
  59. */
  60. int tick_check_broadcast_device(struct clock_event_device *dev)
  61. {
  62. if ((dev->features & CLOCK_EVT_FEAT_DUMMY) ||
  63. (tick_broadcast_device.evtdev &&
  64. tick_broadcast_device.evtdev->rating >= dev->rating) ||
  65. (dev->features & CLOCK_EVT_FEAT_C3STOP))
  66. return 0;
  67. clockevents_exchange_device(tick_broadcast_device.evtdev, dev);
  68. tick_broadcast_device.evtdev = dev;
  69. if (!cpumask_empty(tick_get_broadcast_mask()))
  70. tick_broadcast_start_periodic(dev);
  71. return 1;
  72. }
  73. /*
  74. * Check, if the device is the broadcast device
  75. */
  76. int tick_is_broadcast_device(struct clock_event_device *dev)
  77. {
  78. return (dev && tick_broadcast_device.evtdev == dev);
  79. }
  80. static void err_broadcast(const struct cpumask *mask)
  81. {
  82. pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
  83. }
  84. static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
  85. {
  86. if (!dev->broadcast)
  87. dev->broadcast = tick_broadcast;
  88. if (!dev->broadcast) {
  89. pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
  90. dev->name);
  91. dev->broadcast = err_broadcast;
  92. }
  93. }
  94. /*
  95. * Check, if the device is disfunctional and a place holder, which
  96. * needs to be handled by the broadcast device.
  97. */
  98. int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
  99. {
  100. unsigned long flags;
  101. int ret = 0;
  102. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  103. /*
  104. * Devices might be registered with both periodic and oneshot
  105. * mode disabled. This signals, that the device needs to be
  106. * operated from the broadcast device and is a placeholder for
  107. * the cpu local device.
  108. */
  109. if (!tick_device_is_functional(dev)) {
  110. dev->event_handler = tick_handle_periodic;
  111. tick_device_setup_broadcast_func(dev);
  112. cpumask_set_cpu(cpu, tick_get_broadcast_mask());
  113. tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
  114. ret = 1;
  115. } else {
  116. /*
  117. * When the new device is not affected by the stop
  118. * feature and the cpu is marked in the broadcast mask
  119. * then clear the broadcast bit.
  120. */
  121. if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
  122. int cpu = smp_processor_id();
  123. cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
  124. tick_broadcast_clear_oneshot(cpu);
  125. } else {
  126. tick_device_setup_broadcast_func(dev);
  127. }
  128. }
  129. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  130. return ret;
  131. }
  132. #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
  133. int tick_receive_broadcast(void)
  134. {
  135. struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
  136. struct clock_event_device *evt = td->evtdev;
  137. if (!evt)
  138. return -ENODEV;
  139. if (!evt->event_handler)
  140. return -EINVAL;
  141. evt->event_handler(evt);
  142. return 0;
  143. }
  144. #endif
  145. /*
  146. * Broadcast the event to the cpus, which are set in the mask (mangled).
  147. */
  148. static void tick_do_broadcast(struct cpumask *mask)
  149. {
  150. int cpu = smp_processor_id();
  151. struct tick_device *td;
  152. /*
  153. * Check, if the current cpu is in the mask
  154. */
  155. if (cpumask_test_cpu(cpu, mask)) {
  156. cpumask_clear_cpu(cpu, mask);
  157. td = &per_cpu(tick_cpu_device, cpu);
  158. td->evtdev->event_handler(td->evtdev);
  159. }
  160. if (!cpumask_empty(mask)) {
  161. /*
  162. * It might be necessary to actually check whether the devices
  163. * have different broadcast functions. For now, just use the
  164. * one of the first device. This works as long as we have this
  165. * misfeature only on x86 (lapic)
  166. */
  167. td = &per_cpu(tick_cpu_device, cpumask_first(mask));
  168. td->evtdev->broadcast(mask);
  169. }
  170. }
  171. /*
  172. * Periodic broadcast:
  173. * - invoke the broadcast handlers
  174. */
  175. static void tick_do_periodic_broadcast(void)
  176. {
  177. raw_spin_lock(&tick_broadcast_lock);
  178. cpumask_and(to_cpumask(tmpmask),
  179. cpu_online_mask, tick_get_broadcast_mask());
  180. tick_do_broadcast(to_cpumask(tmpmask));
  181. raw_spin_unlock(&tick_broadcast_lock);
  182. }
  183. /*
  184. * Event handler for periodic broadcast ticks
  185. */
  186. static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
  187. {
  188. ktime_t next;
  189. tick_do_periodic_broadcast();
  190. /*
  191. * The device is in periodic mode. No reprogramming necessary:
  192. */
  193. if (dev->mode == CLOCK_EVT_MODE_PERIODIC)
  194. return;
  195. /*
  196. * Setup the next period for devices, which do not have
  197. * periodic mode. We read dev->next_event first and add to it
  198. * when the event already expired. clockevents_program_event()
  199. * sets dev->next_event only when the event is really
  200. * programmed to the device.
  201. */
  202. for (next = dev->next_event; ;) {
  203. next = ktime_add(next, tick_period);
  204. if (!clockevents_program_event(dev, next, false))
  205. return;
  206. tick_do_periodic_broadcast();
  207. }
  208. }
  209. /*
  210. * Powerstate information: The system enters/leaves a state, where
  211. * affected devices might stop
  212. */
  213. static void tick_do_broadcast_on_off(unsigned long *reason)
  214. {
  215. struct clock_event_device *bc, *dev;
  216. struct tick_device *td;
  217. unsigned long flags;
  218. int cpu, bc_stopped;
  219. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  220. cpu = smp_processor_id();
  221. td = &per_cpu(tick_cpu_device, cpu);
  222. dev = td->evtdev;
  223. bc = tick_broadcast_device.evtdev;
  224. /*
  225. * Is the device not affected by the powerstate ?
  226. */
  227. if (!dev || !(dev->features & CLOCK_EVT_FEAT_C3STOP))
  228. goto out;
  229. if (!tick_device_is_functional(dev))
  230. goto out;
  231. bc_stopped = cpumask_empty(tick_get_broadcast_mask());
  232. switch (*reason) {
  233. case CLOCK_EVT_NOTIFY_BROADCAST_ON:
  234. case CLOCK_EVT_NOTIFY_BROADCAST_FORCE:
  235. if (!cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
  236. cpumask_set_cpu(cpu, tick_get_broadcast_mask());
  237. if (tick_broadcast_device.mode ==
  238. TICKDEV_MODE_PERIODIC)
  239. clockevents_shutdown(dev);
  240. }
  241. if (*reason == CLOCK_EVT_NOTIFY_BROADCAST_FORCE)
  242. tick_broadcast_force = 1;
  243. break;
  244. case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
  245. if (!tick_broadcast_force &&
  246. cpumask_test_cpu(cpu, tick_get_broadcast_mask())) {
  247. cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
  248. if (tick_broadcast_device.mode ==
  249. TICKDEV_MODE_PERIODIC)
  250. tick_setup_periodic(dev, 0);
  251. }
  252. break;
  253. }
  254. if (cpumask_empty(tick_get_broadcast_mask())) {
  255. if (!bc_stopped)
  256. clockevents_shutdown(bc);
  257. } else if (bc_stopped) {
  258. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  259. tick_broadcast_start_periodic(bc);
  260. else
  261. tick_broadcast_setup_oneshot(bc);
  262. }
  263. out:
  264. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  265. }
  266. /*
  267. * Powerstate information: The system enters/leaves a state, where
  268. * affected devices might stop.
  269. */
  270. void tick_broadcast_on_off(unsigned long reason, int *oncpu)
  271. {
  272. if (!cpumask_test_cpu(*oncpu, cpu_online_mask))
  273. printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
  274. "offline CPU #%d\n", *oncpu);
  275. else
  276. tick_do_broadcast_on_off(&reason);
  277. }
  278. /*
  279. * Set the periodic handler depending on broadcast on/off
  280. */
  281. void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast)
  282. {
  283. if (!broadcast)
  284. dev->event_handler = tick_handle_periodic;
  285. else
  286. dev->event_handler = tick_handle_periodic_broadcast;
  287. }
  288. /*
  289. * Remove a CPU from broadcasting
  290. */
  291. void tick_shutdown_broadcast(unsigned int *cpup)
  292. {
  293. struct clock_event_device *bc;
  294. unsigned long flags;
  295. unsigned int cpu = *cpup;
  296. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  297. bc = tick_broadcast_device.evtdev;
  298. cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
  299. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) {
  300. if (bc && cpumask_empty(tick_get_broadcast_mask()))
  301. clockevents_shutdown(bc);
  302. }
  303. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  304. }
  305. void tick_suspend_broadcast(void)
  306. {
  307. struct clock_event_device *bc;
  308. unsigned long flags;
  309. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  310. bc = tick_broadcast_device.evtdev;
  311. if (bc)
  312. clockevents_shutdown(bc);
  313. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  314. }
  315. int tick_resume_broadcast(void)
  316. {
  317. struct clock_event_device *bc;
  318. unsigned long flags;
  319. int broadcast = 0;
  320. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  321. bc = tick_broadcast_device.evtdev;
  322. if (bc) {
  323. clockevents_set_mode(bc, CLOCK_EVT_MODE_RESUME);
  324. switch (tick_broadcast_device.mode) {
  325. case TICKDEV_MODE_PERIODIC:
  326. if (!cpumask_empty(tick_get_broadcast_mask()))
  327. tick_broadcast_start_periodic(bc);
  328. broadcast = cpumask_test_cpu(smp_processor_id(),
  329. tick_get_broadcast_mask());
  330. break;
  331. case TICKDEV_MODE_ONESHOT:
  332. if (!cpumask_empty(tick_get_broadcast_mask()))
  333. broadcast = tick_resume_broadcast_oneshot(bc);
  334. break;
  335. }
  336. }
  337. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  338. return broadcast;
  339. }
  340. #ifdef CONFIG_TICK_ONESHOT
  341. /* FIXME: use cpumask_var_t. */
  342. static DECLARE_BITMAP(tick_broadcast_oneshot_mask, NR_CPUS);
  343. /*
  344. * Exposed for debugging: see timer_list.c
  345. */
  346. struct cpumask *tick_get_broadcast_oneshot_mask(void)
  347. {
  348. return to_cpumask(tick_broadcast_oneshot_mask);
  349. }
  350. static int tick_broadcast_set_event(ktime_t expires, int force)
  351. {
  352. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  353. if (bc->mode != CLOCK_EVT_MODE_ONESHOT)
  354. clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
  355. return clockevents_program_event(bc, expires, force);
  356. }
  357. int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
  358. {
  359. clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
  360. return 0;
  361. }
  362. /*
  363. * Called from irq_enter() when idle was interrupted to reenable the
  364. * per cpu device.
  365. */
  366. void tick_check_oneshot_broadcast(int cpu)
  367. {
  368. if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) {
  369. struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
  370. clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
  371. }
  372. }
  373. /*
  374. * Handle oneshot mode broadcasting
  375. */
  376. static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
  377. {
  378. struct tick_device *td;
  379. ktime_t now, next_event;
  380. int cpu;
  381. raw_spin_lock(&tick_broadcast_lock);
  382. again:
  383. dev->next_event.tv64 = KTIME_MAX;
  384. next_event.tv64 = KTIME_MAX;
  385. cpumask_clear(to_cpumask(tmpmask));
  386. now = ktime_get();
  387. /* Find all expired events */
  388. for_each_cpu(cpu, tick_get_broadcast_oneshot_mask()) {
  389. td = &per_cpu(tick_cpu_device, cpu);
  390. if (td->evtdev->next_event.tv64 <= now.tv64)
  391. cpumask_set_cpu(cpu, to_cpumask(tmpmask));
  392. else if (td->evtdev->next_event.tv64 < next_event.tv64)
  393. next_event.tv64 = td->evtdev->next_event.tv64;
  394. }
  395. /*
  396. * Wakeup the cpus which have an expired event.
  397. */
  398. tick_do_broadcast(to_cpumask(tmpmask));
  399. /*
  400. * Two reasons for reprogram:
  401. *
  402. * - The global event did not expire any CPU local
  403. * events. This happens in dyntick mode, as the maximum PIT
  404. * delta is quite small.
  405. *
  406. * - There are pending events on sleeping CPUs which were not
  407. * in the event mask
  408. */
  409. if (next_event.tv64 != KTIME_MAX) {
  410. /*
  411. * Rearm the broadcast device. If event expired,
  412. * repeat the above
  413. */
  414. if (tick_broadcast_set_event(next_event, 0))
  415. goto again;
  416. }
  417. raw_spin_unlock(&tick_broadcast_lock);
  418. }
  419. /*
  420. * Powerstate information: The system enters/leaves a state, where
  421. * affected devices might stop
  422. */
  423. void tick_broadcast_oneshot_control(unsigned long reason)
  424. {
  425. struct clock_event_device *bc, *dev;
  426. struct tick_device *td;
  427. unsigned long flags;
  428. int cpu;
  429. /*
  430. * Periodic mode does not care about the enter/exit of power
  431. * states
  432. */
  433. if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
  434. return;
  435. /*
  436. * We are called with preemtion disabled from the depth of the
  437. * idle code, so we can't be moved away.
  438. */
  439. cpu = smp_processor_id();
  440. td = &per_cpu(tick_cpu_device, cpu);
  441. dev = td->evtdev;
  442. if (!(dev->features & CLOCK_EVT_FEAT_C3STOP))
  443. return;
  444. bc = tick_broadcast_device.evtdev;
  445. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  446. if (reason == CLOCK_EVT_NOTIFY_BROADCAST_ENTER) {
  447. if (!cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
  448. cpumask_set_cpu(cpu, tick_get_broadcast_oneshot_mask());
  449. clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
  450. if (dev->next_event.tv64 < bc->next_event.tv64)
  451. tick_broadcast_set_event(dev->next_event, 1);
  452. }
  453. } else {
  454. if (cpumask_test_cpu(cpu, tick_get_broadcast_oneshot_mask())) {
  455. cpumask_clear_cpu(cpu,
  456. tick_get_broadcast_oneshot_mask());
  457. clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
  458. if (dev->next_event.tv64 != KTIME_MAX)
  459. tick_program_event(dev->next_event, 1);
  460. }
  461. }
  462. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  463. }
  464. /*
  465. * Reset the one shot broadcast for a cpu
  466. *
  467. * Called with tick_broadcast_lock held
  468. */
  469. static void tick_broadcast_clear_oneshot(int cpu)
  470. {
  471. cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
  472. }
  473. static void tick_broadcast_init_next_event(struct cpumask *mask,
  474. ktime_t expires)
  475. {
  476. struct tick_device *td;
  477. int cpu;
  478. for_each_cpu(cpu, mask) {
  479. td = &per_cpu(tick_cpu_device, cpu);
  480. if (td->evtdev)
  481. td->evtdev->next_event = expires;
  482. }
  483. }
  484. /**
  485. * tick_broadcast_setup_oneshot - setup the broadcast device
  486. */
  487. void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
  488. {
  489. int cpu = smp_processor_id();
  490. /* Set it up only once ! */
  491. if (bc->event_handler != tick_handle_oneshot_broadcast) {
  492. int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;
  493. bc->event_handler = tick_handle_oneshot_broadcast;
  494. /* Take the do_timer update */
  495. tick_do_timer_cpu = cpu;
  496. /*
  497. * We must be careful here. There might be other CPUs
  498. * waiting for periodic broadcast. We need to set the
  499. * oneshot_mask bits for those and program the
  500. * broadcast device to fire.
  501. */
  502. cpumask_copy(to_cpumask(tmpmask), tick_get_broadcast_mask());
  503. cpumask_clear_cpu(cpu, to_cpumask(tmpmask));
  504. cpumask_or(tick_get_broadcast_oneshot_mask(),
  505. tick_get_broadcast_oneshot_mask(),
  506. to_cpumask(tmpmask));
  507. if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) {
  508. clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);
  509. tick_broadcast_init_next_event(to_cpumask(tmpmask),
  510. tick_next_period);
  511. tick_broadcast_set_event(tick_next_period, 1);
  512. } else
  513. bc->next_event.tv64 = KTIME_MAX;
  514. } else {
  515. /*
  516. * The first cpu which switches to oneshot mode sets
  517. * the bit for all other cpus which are in the general
  518. * (periodic) broadcast mask. So the bit is set and
  519. * would prevent the first broadcast enter after this
  520. * to program the bc device.
  521. */
  522. tick_broadcast_clear_oneshot(cpu);
  523. }
  524. }
  525. /*
  526. * Select oneshot operating mode for the broadcast device
  527. */
  528. void tick_broadcast_switch_to_oneshot(void)
  529. {
  530. struct clock_event_device *bc;
  531. unsigned long flags;
  532. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  533. tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
  534. bc = tick_broadcast_device.evtdev;
  535. if (bc)
  536. tick_broadcast_setup_oneshot(bc);
  537. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  538. }
  539. /*
  540. * Remove a dead CPU from broadcasting
  541. */
  542. void tick_shutdown_broadcast_oneshot(unsigned int *cpup)
  543. {
  544. unsigned long flags;
  545. unsigned int cpu = *cpup;
  546. raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
  547. /*
  548. * Clear the broadcast mask flag for the dead cpu, but do not
  549. * stop the broadcast device!
  550. */
  551. cpumask_clear_cpu(cpu, tick_get_broadcast_oneshot_mask());
  552. raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
  553. }
  554. /*
  555. * Check, whether the broadcast device is in one shot mode
  556. */
  557. int tick_broadcast_oneshot_active(void)
  558. {
  559. return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT;
  560. }
  561. /*
  562. * Check whether the broadcast device supports oneshot.
  563. */
  564. bool tick_broadcast_oneshot_available(void)
  565. {
  566. struct clock_event_device *bc = tick_broadcast_device.evtdev;
  567. return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false;
  568. }
  569. #endif