clockevents.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /*
  2. * linux/kernel/time/clockevents.c
  3. *
  4. * This file contains functions which manage clock event devices.
  5. *
  6. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  7. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  8. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  9. *
  10. * This code is licenced under the GPL version 2. For details see
  11. * kernel-base/COPYING.
  12. */
  13. #include <linux/clockchips.h>
  14. #include <linux/hrtimer.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/notifier.h>
  18. #include <linux/smp.h>
  19. #include <linux/sysdev.h>
  20. #include <linux/tick.h>
  21. /* The registered clock event devices */
  22. static LIST_HEAD(clockevent_devices);
  23. static LIST_HEAD(clockevents_released);
  24. /* Notification for clock events */
  25. static RAW_NOTIFIER_HEAD(clockevents_chain);
  26. /* Protection for the above */
  27. static DEFINE_SPINLOCK(clockevents_lock);
  28. /**
  29. * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
  30. * @latch: value to convert
  31. * @evt: pointer to clock event device descriptor
  32. *
  33. * Math helper, returns latch value converted to nanoseconds (bound checked)
  34. */
  35. unsigned long clockevent_delta2ns(unsigned long latch,
  36. struct clock_event_device *evt)
  37. {
  38. u64 clc = ((u64) latch << evt->shift);
  39. if (unlikely(!evt->mult)) {
  40. evt->mult = 1;
  41. WARN_ON(1);
  42. }
  43. do_div(clc, evt->mult);
  44. if (clc < 1000)
  45. clc = 1000;
  46. if (clc > LONG_MAX)
  47. clc = LONG_MAX;
  48. return (unsigned long) clc;
  49. }
  50. EXPORT_SYMBOL_GPL(clockevent_delta2ns);
  51. /**
  52. * clockevents_set_mode - set the operating mode of a clock event device
  53. * @dev: device to modify
  54. * @mode: new mode
  55. *
  56. * Must be called with interrupts disabled !
  57. */
  58. void clockevents_set_mode(struct clock_event_device *dev,
  59. enum clock_event_mode mode)
  60. {
  61. if (dev->mode != mode) {
  62. dev->set_mode(mode, dev);
  63. dev->mode = mode;
  64. /*
  65. * A nsec2cyc multiplicator of 0 is invalid and we'd crash
  66. * on it, so fix it up and emit a warning:
  67. */
  68. if (mode == CLOCK_EVT_MODE_ONESHOT) {
  69. if (unlikely(!dev->mult)) {
  70. dev->mult = 1;
  71. WARN_ON(1);
  72. }
  73. }
  74. }
  75. }
  76. /**
  77. * clockevents_shutdown - shutdown the device and clear next_event
  78. * @dev: device to shutdown
  79. */
  80. void clockevents_shutdown(struct clock_event_device *dev)
  81. {
  82. clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
  83. dev->next_event.tv64 = KTIME_MAX;
  84. }
  85. /**
  86. * clockevents_program_event - Reprogram the clock event device.
  87. * @expires: absolute expiry time (monotonic clock)
  88. *
  89. * Returns 0 on success, -ETIME when the event is in the past.
  90. */
  91. int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
  92. ktime_t now)
  93. {
  94. unsigned long long clc;
  95. int64_t delta;
  96. if (unlikely(expires.tv64 < 0)) {
  97. WARN_ON_ONCE(1);
  98. return -ETIME;
  99. }
  100. delta = ktime_to_ns(ktime_sub(expires, now));
  101. if (delta <= 0)
  102. return -ETIME;
  103. dev->next_event = expires;
  104. if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  105. return 0;
  106. if (delta > dev->max_delta_ns)
  107. delta = dev->max_delta_ns;
  108. if (delta < dev->min_delta_ns)
  109. delta = dev->min_delta_ns;
  110. clc = delta * dev->mult;
  111. clc >>= dev->shift;
  112. return dev->set_next_event((unsigned long) clc, dev);
  113. }
  114. /**
  115. * clockevents_register_notifier - register a clock events change listener
  116. */
  117. int clockevents_register_notifier(struct notifier_block *nb)
  118. {
  119. unsigned long flags;
  120. int ret;
  121. spin_lock_irqsave(&clockevents_lock, flags);
  122. ret = raw_notifier_chain_register(&clockevents_chain, nb);
  123. spin_unlock_irqrestore(&clockevents_lock, flags);
  124. return ret;
  125. }
  126. /*
  127. * Notify about a clock event change. Called with clockevents_lock
  128. * held.
  129. */
  130. static void clockevents_do_notify(unsigned long reason, void *dev)
  131. {
  132. raw_notifier_call_chain(&clockevents_chain, reason, dev);
  133. }
  134. /*
  135. * Called after a notify add to make devices available which were
  136. * released from the notifier call.
  137. */
  138. static void clockevents_notify_released(void)
  139. {
  140. struct clock_event_device *dev;
  141. while (!list_empty(&clockevents_released)) {
  142. dev = list_entry(clockevents_released.next,
  143. struct clock_event_device, list);
  144. list_del(&dev->list);
  145. list_add(&dev->list, &clockevent_devices);
  146. clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
  147. }
  148. }
  149. /**
  150. * clockevents_register_device - register a clock event device
  151. * @dev: device to register
  152. */
  153. void clockevents_register_device(struct clock_event_device *dev)
  154. {
  155. unsigned long flags;
  156. BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
  157. BUG_ON(!dev->cpumask);
  158. spin_lock_irqsave(&clockevents_lock, flags);
  159. list_add(&dev->list, &clockevent_devices);
  160. clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
  161. clockevents_notify_released();
  162. spin_unlock_irqrestore(&clockevents_lock, flags);
  163. }
  164. EXPORT_SYMBOL_GPL(clockevents_register_device);
  165. /*
  166. * Noop handler when we shut down an event device
  167. */
  168. void clockevents_handle_noop(struct clock_event_device *dev)
  169. {
  170. }
  171. /**
  172. * clockevents_exchange_device - release and request clock devices
  173. * @old: device to release (can be NULL)
  174. * @new: device to request (can be NULL)
  175. *
  176. * Called from the notifier chain. clockevents_lock is held already
  177. */
  178. void clockevents_exchange_device(struct clock_event_device *old,
  179. struct clock_event_device *new)
  180. {
  181. unsigned long flags;
  182. local_irq_save(flags);
  183. /*
  184. * Caller releases a clock event device. We queue it into the
  185. * released list and do a notify add later.
  186. */
  187. if (old) {
  188. clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
  189. list_del(&old->list);
  190. list_add(&old->list, &clockevents_released);
  191. }
  192. if (new) {
  193. BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
  194. clockevents_shutdown(new);
  195. }
  196. local_irq_restore(flags);
  197. }
  198. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  199. /**
  200. * clockevents_notify - notification about relevant events
  201. */
  202. void clockevents_notify(unsigned long reason, void *arg)
  203. {
  204. struct list_head *node, *tmp;
  205. unsigned long flags;
  206. spin_lock_irqsave(&clockevents_lock, flags);
  207. clockevents_do_notify(reason, arg);
  208. switch (reason) {
  209. case CLOCK_EVT_NOTIFY_CPU_DEAD:
  210. /*
  211. * Unregister the clock event devices which were
  212. * released from the users in the notify chain.
  213. */
  214. list_for_each_safe(node, tmp, &clockevents_released)
  215. list_del(node);
  216. break;
  217. default:
  218. break;
  219. }
  220. spin_unlock_irqrestore(&clockevents_lock, flags);
  221. }
  222. EXPORT_SYMBOL_GPL(clockevents_notify);
  223. #endif