clockevents.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441
  1. /*
  2. * linux/kernel/time/clockevents.c
  3. *
  4. * This file contains functions which manage clock event devices.
  5. *
  6. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  7. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  8. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  9. *
  10. * This code is licenced under the GPL version 2. For details see
  11. * kernel-base/COPYING.
  12. */
  13. #include <linux/clockchips.h>
  14. #include <linux/hrtimer.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/notifier.h>
  18. #include <linux/smp.h>
  19. #include "tick-internal.h"
  20. /* The registered clock event devices */
  21. static LIST_HEAD(clockevent_devices);
  22. static LIST_HEAD(clockevents_released);
  23. /* Notification for clock events */
  24. static RAW_NOTIFIER_HEAD(clockevents_chain);
  25. /* Protection for the above */
  26. static DEFINE_RAW_SPINLOCK(clockevents_lock);
  27. /**
  28. * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
  29. * @latch: value to convert
  30. * @evt: pointer to clock event device descriptor
  31. *
  32. * Math helper, returns latch value converted to nanoseconds (bound checked)
  33. */
  34. u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
  35. {
  36. u64 clc = (u64) latch << evt->shift;
  37. if (unlikely(!evt->mult)) {
  38. evt->mult = 1;
  39. WARN_ON(1);
  40. }
  41. do_div(clc, evt->mult);
  42. if (clc < 1000)
  43. clc = 1000;
  44. if (clc > KTIME_MAX)
  45. clc = KTIME_MAX;
  46. return clc;
  47. }
  48. EXPORT_SYMBOL_GPL(clockevent_delta2ns);
  49. /**
  50. * clockevents_set_mode - set the operating mode of a clock event device
  51. * @dev: device to modify
  52. * @mode: new mode
  53. *
  54. * Must be called with interrupts disabled !
  55. */
  56. void clockevents_set_mode(struct clock_event_device *dev,
  57. enum clock_event_mode mode)
  58. {
  59. if (dev->mode != mode) {
  60. dev->set_mode(mode, dev);
  61. dev->mode = mode;
  62. /*
  63. * A nsec2cyc multiplicator of 0 is invalid and we'd crash
  64. * on it, so fix it up and emit a warning:
  65. */
  66. if (mode == CLOCK_EVT_MODE_ONESHOT) {
  67. if (unlikely(!dev->mult)) {
  68. dev->mult = 1;
  69. WARN_ON(1);
  70. }
  71. }
  72. }
  73. }
  74. /**
  75. * clockevents_shutdown - shutdown the device and clear next_event
  76. * @dev: device to shutdown
  77. */
  78. void clockevents_shutdown(struct clock_event_device *dev)
  79. {
  80. clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
  81. dev->next_event.tv64 = KTIME_MAX;
  82. }
  83. #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
  84. /* Limit min_delta to a jiffie */
  85. #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
  86. /**
  87. * clockevents_increase_min_delta - raise minimum delta of a clock event device
  88. * @dev: device to increase the minimum delta
  89. *
  90. * Returns 0 on success, -ETIME when the minimum delta reached the limit.
  91. */
  92. static int clockevents_increase_min_delta(struct clock_event_device *dev)
  93. {
  94. /* Nothing to do if we already reached the limit */
  95. if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
  96. printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
  97. dev->next_event.tv64 = KTIME_MAX;
  98. return -ETIME;
  99. }
  100. if (dev->min_delta_ns < 5000)
  101. dev->min_delta_ns = 5000;
  102. else
  103. dev->min_delta_ns += dev->min_delta_ns >> 1;
  104. if (dev->min_delta_ns > MIN_DELTA_LIMIT)
  105. dev->min_delta_ns = MIN_DELTA_LIMIT;
  106. printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
  107. dev->name ? dev->name : "?",
  108. (unsigned long long) dev->min_delta_ns);
  109. return 0;
  110. }
  111. /**
  112. * clockevents_program_min_delta - Set clock event device to the minimum delay.
  113. * @dev: device to program
  114. *
  115. * Returns 0 on success, -ETIME when the retry loop failed.
  116. */
  117. static int clockevents_program_min_delta(struct clock_event_device *dev)
  118. {
  119. unsigned long long clc;
  120. int64_t delta;
  121. int i;
  122. for (i = 0;;) {
  123. delta = dev->min_delta_ns;
  124. dev->next_event = ktime_add_ns(ktime_get(), delta);
  125. if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  126. return 0;
  127. dev->retries++;
  128. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  129. if (dev->set_next_event((unsigned long) clc, dev) == 0)
  130. return 0;
  131. if (++i > 2) {
  132. /*
  133. * We tried 3 times to program the device with the
  134. * given min_delta_ns. Try to increase the minimum
  135. * delta, if that fails as well get out of here.
  136. */
  137. if (clockevents_increase_min_delta(dev))
  138. return -ETIME;
  139. i = 0;
  140. }
  141. }
  142. }
  143. #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
  144. /**
  145. * clockevents_program_min_delta - Set clock event device to the minimum delay.
  146. * @dev: device to program
  147. *
  148. * Returns 0 on success, -ETIME when the retry loop failed.
  149. */
  150. static int clockevents_program_min_delta(struct clock_event_device *dev)
  151. {
  152. unsigned long long clc;
  153. int64_t delta;
  154. delta = dev->min_delta_ns;
  155. dev->next_event = ktime_add_ns(ktime_get(), delta);
  156. if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  157. return 0;
  158. dev->retries++;
  159. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  160. return dev->set_next_event((unsigned long) clc, dev);
  161. }
  162. #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
  163. /**
  164. * clockevents_program_event - Reprogram the clock event device.
  165. * @dev: device to program
  166. * @expires: absolute expiry time (monotonic clock)
  167. * @force: program minimum delay if expires can not be set
  168. *
  169. * Returns 0 on success, -ETIME when the event is in the past.
  170. */
  171. int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
  172. bool force)
  173. {
  174. unsigned long long clc;
  175. int64_t delta;
  176. int rc;
  177. if (unlikely(expires.tv64 < 0)) {
  178. WARN_ON_ONCE(1);
  179. return -ETIME;
  180. }
  181. dev->next_event = expires;
  182. if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  183. return 0;
  184. /* Shortcut for clockevent devices that can deal with ktime. */
  185. if (dev->features & CLOCK_EVT_FEAT_KTIME)
  186. return dev->set_next_ktime(expires, dev);
  187. delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
  188. if (delta <= 0)
  189. return force ? clockevents_program_min_delta(dev) : -ETIME;
  190. delta = min(delta, (int64_t) dev->max_delta_ns);
  191. delta = max(delta, (int64_t) dev->min_delta_ns);
  192. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  193. rc = dev->set_next_event((unsigned long) clc, dev);
  194. return (rc && force) ? clockevents_program_min_delta(dev) : rc;
  195. }
  196. /**
  197. * clockevents_register_notifier - register a clock events change listener
  198. */
  199. int clockevents_register_notifier(struct notifier_block *nb)
  200. {
  201. unsigned long flags;
  202. int ret;
  203. raw_spin_lock_irqsave(&clockevents_lock, flags);
  204. ret = raw_notifier_chain_register(&clockevents_chain, nb);
  205. raw_spin_unlock_irqrestore(&clockevents_lock, flags);
  206. return ret;
  207. }
  208. /*
  209. * Notify about a clock event change. Called with clockevents_lock
  210. * held.
  211. */
  212. static void clockevents_do_notify(unsigned long reason, void *dev)
  213. {
  214. raw_notifier_call_chain(&clockevents_chain, reason, dev);
  215. }
  216. /*
  217. * Called after a notify add to make devices available which were
  218. * released from the notifier call.
  219. */
  220. static void clockevents_notify_released(void)
  221. {
  222. struct clock_event_device *dev;
  223. while (!list_empty(&clockevents_released)) {
  224. dev = list_entry(clockevents_released.next,
  225. struct clock_event_device, list);
  226. list_del(&dev->list);
  227. list_add(&dev->list, &clockevent_devices);
  228. clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
  229. }
  230. }
  231. /**
  232. * clockevents_register_device - register a clock event device
  233. * @dev: device to register
  234. */
  235. void clockevents_register_device(struct clock_event_device *dev)
  236. {
  237. unsigned long flags;
  238. BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
  239. if (!dev->cpumask) {
  240. WARN_ON(num_possible_cpus() > 1);
  241. dev->cpumask = cpumask_of(smp_processor_id());
  242. }
  243. raw_spin_lock_irqsave(&clockevents_lock, flags);
  244. list_add(&dev->list, &clockevent_devices);
  245. clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
  246. clockevents_notify_released();
  247. raw_spin_unlock_irqrestore(&clockevents_lock, flags);
  248. }
  249. EXPORT_SYMBOL_GPL(clockevents_register_device);
  250. static void clockevents_config(struct clock_event_device *dev,
  251. u32 freq)
  252. {
  253. u64 sec;
  254. if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
  255. return;
  256. /*
  257. * Calculate the maximum number of seconds we can sleep. Limit
  258. * to 10 minutes for hardware which can program more than
  259. * 32bit ticks so we still get reasonable conversion values.
  260. */
  261. sec = dev->max_delta_ticks;
  262. do_div(sec, freq);
  263. if (!sec)
  264. sec = 1;
  265. else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
  266. sec = 600;
  267. clockevents_calc_mult_shift(dev, freq, sec);
  268. dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
  269. dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
  270. }
  271. /**
  272. * clockevents_config_and_register - Configure and register a clock event device
  273. * @dev: device to register
  274. * @freq: The clock frequency
  275. * @min_delta: The minimum clock ticks to program in oneshot mode
  276. * @max_delta: The maximum clock ticks to program in oneshot mode
  277. *
  278. * min/max_delta can be 0 for devices which do not support oneshot mode.
  279. */
  280. void clockevents_config_and_register(struct clock_event_device *dev,
  281. u32 freq, unsigned long min_delta,
  282. unsigned long max_delta)
  283. {
  284. dev->min_delta_ticks = min_delta;
  285. dev->max_delta_ticks = max_delta;
  286. clockevents_config(dev, freq);
  287. clockevents_register_device(dev);
  288. }
  289. /**
  290. * clockevents_update_freq - Update frequency and reprogram a clock event device.
  291. * @dev: device to modify
  292. * @freq: new device frequency
  293. *
  294. * Reconfigure and reprogram a clock event device in oneshot
  295. * mode. Must be called on the cpu for which the device delivers per
  296. * cpu timer events with interrupts disabled! Returns 0 on success,
  297. * -ETIME when the event is in the past.
  298. */
  299. int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
  300. {
  301. clockevents_config(dev, freq);
  302. if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
  303. return 0;
  304. return clockevents_program_event(dev, dev->next_event, false);
  305. }
  306. /*
  307. * Noop handler when we shut down an event device
  308. */
  309. void clockevents_handle_noop(struct clock_event_device *dev)
  310. {
  311. }
  312. /**
  313. * clockevents_exchange_device - release and request clock devices
  314. * @old: device to release (can be NULL)
  315. * @new: device to request (can be NULL)
  316. *
  317. * Called from the notifier chain. clockevents_lock is held already
  318. */
  319. void clockevents_exchange_device(struct clock_event_device *old,
  320. struct clock_event_device *new)
  321. {
  322. unsigned long flags;
  323. local_irq_save(flags);
  324. /*
  325. * Caller releases a clock event device. We queue it into the
  326. * released list and do a notify add later.
  327. */
  328. if (old) {
  329. clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
  330. list_del(&old->list);
  331. list_add(&old->list, &clockevents_released);
  332. }
  333. if (new) {
  334. BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
  335. clockevents_shutdown(new);
  336. }
  337. local_irq_restore(flags);
  338. }
  339. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  340. /**
  341. * clockevents_notify - notification about relevant events
  342. */
  343. void clockevents_notify(unsigned long reason, void *arg)
  344. {
  345. struct clock_event_device *dev, *tmp;
  346. unsigned long flags;
  347. int cpu;
  348. raw_spin_lock_irqsave(&clockevents_lock, flags);
  349. clockevents_do_notify(reason, arg);
  350. switch (reason) {
  351. case CLOCK_EVT_NOTIFY_CPU_DEAD:
  352. /*
  353. * Unregister the clock event devices which were
  354. * released from the users in the notify chain.
  355. */
  356. list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
  357. list_del(&dev->list);
  358. /*
  359. * Now check whether the CPU has left unused per cpu devices
  360. */
  361. cpu = *((int *)arg);
  362. list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
  363. if (cpumask_test_cpu(cpu, dev->cpumask) &&
  364. cpumask_weight(dev->cpumask) == 1 &&
  365. !tick_is_broadcast_device(dev)) {
  366. BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
  367. list_del(&dev->list);
  368. }
  369. }
  370. break;
  371. default:
  372. break;
  373. }
  374. raw_spin_unlock_irqrestore(&clockevents_lock, flags);
  375. }
  376. EXPORT_SYMBOL_GPL(clockevents_notify);
  377. #endif