clockevents.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. /*
  2. * linux/kernel/time/clockevents.c
  3. *
  4. * This file contains functions which manage clock event devices.
  5. *
  6. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  7. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  8. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  9. *
  10. * This code is licenced under the GPL version 2. For details see
  11. * kernel-base/COPYING.
  12. */
  13. #include <linux/clockchips.h>
  14. #include <linux/hrtimer.h>
  15. #include <linux/init.h>
  16. #include <linux/module.h>
  17. #include <linux/smp.h>
  18. #include "tick-internal.h"
  19. /* The registered clock event devices */
  20. static LIST_HEAD(clockevent_devices);
  21. static LIST_HEAD(clockevents_released);
  22. /* Protection for the above */
  23. static DEFINE_RAW_SPINLOCK(clockevents_lock);
  24. /**
  25. * clockevents_delta2ns - Convert a latch value (device ticks) to nanoseconds
  26. * @latch: value to convert
  27. * @evt: pointer to clock event device descriptor
  28. *
  29. * Math helper, returns latch value converted to nanoseconds (bound checked)
  30. */
  31. u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
  32. {
  33. u64 clc = (u64) latch << evt->shift;
  34. if (unlikely(!evt->mult)) {
  35. evt->mult = 1;
  36. WARN_ON(1);
  37. }
  38. do_div(clc, evt->mult);
  39. if (clc < 1000)
  40. clc = 1000;
  41. if (clc > KTIME_MAX)
  42. clc = KTIME_MAX;
  43. return clc;
  44. }
  45. EXPORT_SYMBOL_GPL(clockevent_delta2ns);
  46. /**
  47. * clockevents_set_mode - set the operating mode of a clock event device
  48. * @dev: device to modify
  49. * @mode: new mode
  50. *
  51. * Must be called with interrupts disabled !
  52. */
  53. void clockevents_set_mode(struct clock_event_device *dev,
  54. enum clock_event_mode mode)
  55. {
  56. if (dev->mode != mode) {
  57. dev->set_mode(mode, dev);
  58. dev->mode = mode;
  59. /*
  60. * A nsec2cyc multiplicator of 0 is invalid and we'd crash
  61. * on it, so fix it up and emit a warning:
  62. */
  63. if (mode == CLOCK_EVT_MODE_ONESHOT) {
  64. if (unlikely(!dev->mult)) {
  65. dev->mult = 1;
  66. WARN_ON(1);
  67. }
  68. }
  69. }
  70. }
  71. /**
  72. * clockevents_shutdown - shutdown the device and clear next_event
  73. * @dev: device to shutdown
  74. */
  75. void clockevents_shutdown(struct clock_event_device *dev)
  76. {
  77. clockevents_set_mode(dev, CLOCK_EVT_MODE_SHUTDOWN);
  78. dev->next_event.tv64 = KTIME_MAX;
  79. }
  80. #ifdef CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST
  81. /* Limit min_delta to a jiffie */
  82. #define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
  83. /**
  84. * clockevents_increase_min_delta - raise minimum delta of a clock event device
  85. * @dev: device to increase the minimum delta
  86. *
  87. * Returns 0 on success, -ETIME when the minimum delta reached the limit.
  88. */
  89. static int clockevents_increase_min_delta(struct clock_event_device *dev)
  90. {
  91. /* Nothing to do if we already reached the limit */
  92. if (dev->min_delta_ns >= MIN_DELTA_LIMIT) {
  93. printk(KERN_WARNING "CE: Reprogramming failure. Giving up\n");
  94. dev->next_event.tv64 = KTIME_MAX;
  95. return -ETIME;
  96. }
  97. if (dev->min_delta_ns < 5000)
  98. dev->min_delta_ns = 5000;
  99. else
  100. dev->min_delta_ns += dev->min_delta_ns >> 1;
  101. if (dev->min_delta_ns > MIN_DELTA_LIMIT)
  102. dev->min_delta_ns = MIN_DELTA_LIMIT;
  103. printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
  104. dev->name ? dev->name : "?",
  105. (unsigned long long) dev->min_delta_ns);
  106. return 0;
  107. }
  108. /**
  109. * clockevents_program_min_delta - Set clock event device to the minimum delay.
  110. * @dev: device to program
  111. *
  112. * Returns 0 on success, -ETIME when the retry loop failed.
  113. */
  114. static int clockevents_program_min_delta(struct clock_event_device *dev)
  115. {
  116. unsigned long long clc;
  117. int64_t delta;
  118. int i;
  119. for (i = 0;;) {
  120. delta = dev->min_delta_ns;
  121. dev->next_event = ktime_add_ns(ktime_get(), delta);
  122. if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  123. return 0;
  124. dev->retries++;
  125. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  126. if (dev->set_next_event((unsigned long) clc, dev) == 0)
  127. return 0;
  128. if (++i > 2) {
  129. /*
  130. * We tried 3 times to program the device with the
  131. * given min_delta_ns. Try to increase the minimum
  132. * delta, if that fails as well get out of here.
  133. */
  134. if (clockevents_increase_min_delta(dev))
  135. return -ETIME;
  136. i = 0;
  137. }
  138. }
  139. }
  140. #else /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
  141. /**
  142. * clockevents_program_min_delta - Set clock event device to the minimum delay.
  143. * @dev: device to program
  144. *
  145. * Returns 0 on success, -ETIME when the retry loop failed.
  146. */
  147. static int clockevents_program_min_delta(struct clock_event_device *dev)
  148. {
  149. unsigned long long clc;
  150. int64_t delta;
  151. delta = dev->min_delta_ns;
  152. dev->next_event = ktime_add_ns(ktime_get(), delta);
  153. if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  154. return 0;
  155. dev->retries++;
  156. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  157. return dev->set_next_event((unsigned long) clc, dev);
  158. }
  159. #endif /* CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST */
  160. /**
  161. * clockevents_program_event - Reprogram the clock event device.
  162. * @dev: device to program
  163. * @expires: absolute expiry time (monotonic clock)
  164. * @force: program minimum delay if expires can not be set
  165. *
  166. * Returns 0 on success, -ETIME when the event is in the past.
  167. */
  168. int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
  169. bool force)
  170. {
  171. unsigned long long clc;
  172. int64_t delta;
  173. int rc;
  174. if (unlikely(expires.tv64 < 0)) {
  175. WARN_ON_ONCE(1);
  176. return -ETIME;
  177. }
  178. dev->next_event = expires;
  179. if (dev->mode == CLOCK_EVT_MODE_SHUTDOWN)
  180. return 0;
  181. /* Shortcut for clockevent devices that can deal with ktime. */
  182. if (dev->features & CLOCK_EVT_FEAT_KTIME)
  183. return dev->set_next_ktime(expires, dev);
  184. delta = ktime_to_ns(ktime_sub(expires, ktime_get()));
  185. if (delta <= 0)
  186. return force ? clockevents_program_min_delta(dev) : -ETIME;
  187. delta = min(delta, (int64_t) dev->max_delta_ns);
  188. delta = max(delta, (int64_t) dev->min_delta_ns);
  189. clc = ((unsigned long long) delta * dev->mult) >> dev->shift;
  190. rc = dev->set_next_event((unsigned long) clc, dev);
  191. return (rc && force) ? clockevents_program_min_delta(dev) : rc;
  192. }
  193. /*
  194. * Called after a notify add to make devices available which were
  195. * released from the notifier call.
  196. */
  197. static void clockevents_notify_released(void)
  198. {
  199. struct clock_event_device *dev;
  200. while (!list_empty(&clockevents_released)) {
  201. dev = list_entry(clockevents_released.next,
  202. struct clock_event_device, list);
  203. list_del(&dev->list);
  204. list_add(&dev->list, &clockevent_devices);
  205. tick_check_new_device(dev);
  206. }
  207. }
  208. /**
  209. * clockevents_register_device - register a clock event device
  210. * @dev: device to register
  211. */
  212. void clockevents_register_device(struct clock_event_device *dev)
  213. {
  214. unsigned long flags;
  215. BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
  216. if (!dev->cpumask) {
  217. WARN_ON(num_possible_cpus() > 1);
  218. dev->cpumask = cpumask_of(smp_processor_id());
  219. }
  220. raw_spin_lock_irqsave(&clockevents_lock, flags);
  221. list_add(&dev->list, &clockevent_devices);
  222. tick_check_new_device(dev);
  223. clockevents_notify_released();
  224. raw_spin_unlock_irqrestore(&clockevents_lock, flags);
  225. }
  226. EXPORT_SYMBOL_GPL(clockevents_register_device);
  227. void clockevents_config(struct clock_event_device *dev, u32 freq)
  228. {
  229. u64 sec;
  230. if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT))
  231. return;
  232. /*
  233. * Calculate the maximum number of seconds we can sleep. Limit
  234. * to 10 minutes for hardware which can program more than
  235. * 32bit ticks so we still get reasonable conversion values.
  236. */
  237. sec = dev->max_delta_ticks;
  238. do_div(sec, freq);
  239. if (!sec)
  240. sec = 1;
  241. else if (sec > 600 && dev->max_delta_ticks > UINT_MAX)
  242. sec = 600;
  243. clockevents_calc_mult_shift(dev, freq, sec);
  244. dev->min_delta_ns = clockevent_delta2ns(dev->min_delta_ticks, dev);
  245. dev->max_delta_ns = clockevent_delta2ns(dev->max_delta_ticks, dev);
  246. }
  247. /**
  248. * clockevents_config_and_register - Configure and register a clock event device
  249. * @dev: device to register
  250. * @freq: The clock frequency
  251. * @min_delta: The minimum clock ticks to program in oneshot mode
  252. * @max_delta: The maximum clock ticks to program in oneshot mode
  253. *
  254. * min/max_delta can be 0 for devices which do not support oneshot mode.
  255. */
  256. void clockevents_config_and_register(struct clock_event_device *dev,
  257. u32 freq, unsigned long min_delta,
  258. unsigned long max_delta)
  259. {
  260. dev->min_delta_ticks = min_delta;
  261. dev->max_delta_ticks = max_delta;
  262. clockevents_config(dev, freq);
  263. clockevents_register_device(dev);
  264. }
  265. EXPORT_SYMBOL_GPL(clockevents_config_and_register);
  266. /**
  267. * clockevents_update_freq - Update frequency and reprogram a clock event device.
  268. * @dev: device to modify
  269. * @freq: new device frequency
  270. *
  271. * Reconfigure and reprogram a clock event device in oneshot
  272. * mode. Must be called on the cpu for which the device delivers per
  273. * cpu timer events with interrupts disabled! Returns 0 on success,
  274. * -ETIME when the event is in the past.
  275. */
  276. int clockevents_update_freq(struct clock_event_device *dev, u32 freq)
  277. {
  278. clockevents_config(dev, freq);
  279. if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
  280. return 0;
  281. return clockevents_program_event(dev, dev->next_event, false);
  282. }
  283. /*
  284. * Noop handler when we shut down an event device
  285. */
  286. void clockevents_handle_noop(struct clock_event_device *dev)
  287. {
  288. }
  289. /**
  290. * clockevents_exchange_device - release and request clock devices
  291. * @old: device to release (can be NULL)
  292. * @new: device to request (can be NULL)
  293. *
  294. * Called from the notifier chain. clockevents_lock is held already
  295. */
  296. void clockevents_exchange_device(struct clock_event_device *old,
  297. struct clock_event_device *new)
  298. {
  299. unsigned long flags;
  300. local_irq_save(flags);
  301. /*
  302. * Caller releases a clock event device. We queue it into the
  303. * released list and do a notify add later.
  304. */
  305. if (old) {
  306. clockevents_set_mode(old, CLOCK_EVT_MODE_UNUSED);
  307. list_del(&old->list);
  308. list_add(&old->list, &clockevents_released);
  309. }
  310. if (new) {
  311. BUG_ON(new->mode != CLOCK_EVT_MODE_UNUSED);
  312. clockevents_shutdown(new);
  313. }
  314. local_irq_restore(flags);
  315. }
  316. /**
  317. * clockevents_suspend - suspend clock devices
  318. */
  319. void clockevents_suspend(void)
  320. {
  321. struct clock_event_device *dev;
  322. list_for_each_entry_reverse(dev, &clockevent_devices, list)
  323. if (dev->suspend)
  324. dev->suspend(dev);
  325. }
  326. /**
  327. * clockevents_resume - resume clock devices
  328. */
  329. void clockevents_resume(void)
  330. {
  331. struct clock_event_device *dev;
  332. list_for_each_entry(dev, &clockevent_devices, list)
  333. if (dev->resume)
  334. dev->resume(dev);
  335. }
  336. #ifdef CONFIG_GENERIC_CLOCKEVENTS
  337. /**
  338. * clockevents_notify - notification about relevant events
  339. */
  340. void clockevents_notify(unsigned long reason, void *arg)
  341. {
  342. struct clock_event_device *dev, *tmp;
  343. unsigned long flags;
  344. int cpu;
  345. raw_spin_lock_irqsave(&clockevents_lock, flags);
  346. tick_notify(reason, arg);
  347. switch (reason) {
  348. case CLOCK_EVT_NOTIFY_CPU_DEAD:
  349. /*
  350. * Unregister the clock event devices which were
  351. * released from the users in the notify chain.
  352. */
  353. list_for_each_entry_safe(dev, tmp, &clockevents_released, list)
  354. list_del(&dev->list);
  355. /*
  356. * Now check whether the CPU has left unused per cpu devices
  357. */
  358. cpu = *((int *)arg);
  359. list_for_each_entry_safe(dev, tmp, &clockevent_devices, list) {
  360. if (cpumask_test_cpu(cpu, dev->cpumask) &&
  361. cpumask_weight(dev->cpumask) == 1 &&
  362. !tick_is_broadcast_device(dev)) {
  363. BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
  364. list_del(&dev->list);
  365. }
  366. }
  367. break;
  368. default:
  369. break;
  370. }
  371. raw_spin_unlock_irqrestore(&clockevents_lock, flags);
  372. }
  373. EXPORT_SYMBOL_GPL(clockevents_notify);
  374. #endif