tick-common.c 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338
  1. /*
  2. * linux/kernel/time/tick-common.c
  3. *
  4. * This file contains the base functions to manage periodic tick
  5. * related events.
  6. *
  7. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  8. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  9. * Copyright(C) 2006-2007, Timesys Corp., Thomas Gleixner
  10. *
  11. * This code is licenced under the GPL version 2. For details see
  12. * kernel-base/COPYING.
  13. */
  14. #include <linux/cpu.h>
  15. #include <linux/err.h>
  16. #include <linux/hrtimer.h>
  17. #include <linux/irq.h>
  18. #include <linux/percpu.h>
  19. #include <linux/profile.h>
  20. #include <linux/sched.h>
  21. #include <linux/tick.h>
  22. #include "tick-internal.h"
  23. /*
  24. * Tick devices
  25. */
  26. DEFINE_PER_CPU(struct tick_device, tick_cpu_device);
  27. /*
  28. * Tick next event: keeps track of the tick time
  29. */
  30. ktime_t tick_next_period;
  31. ktime_t tick_period;
  32. static int tick_do_timer_cpu = -1;
  33. DEFINE_SPINLOCK(tick_device_lock);
  34. /**
  35. * tick_is_oneshot_available - check for a oneshot capable event device
  36. */
  37. int tick_is_oneshot_available(void)
  38. {
  39. struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
  40. return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT);
  41. }
  42. /*
  43. * Periodic tick
  44. */
  45. static void tick_periodic(int cpu)
  46. {
  47. if (tick_do_timer_cpu == cpu) {
  48. write_seqlock(&xtime_lock);
  49. /* Keep track of the next tick event */
  50. tick_next_period = ktime_add(tick_next_period, tick_period);
  51. do_timer(1);
  52. write_sequnlock(&xtime_lock);
  53. }
  54. update_process_times(user_mode(get_irq_regs()));
  55. profile_tick(CPU_PROFILING);
  56. }
  57. /*
  58. * Event handler for periodic ticks
  59. */
  60. void tick_handle_periodic(struct clock_event_device *dev)
  61. {
  62. int cpu = smp_processor_id();
  63. tick_periodic(cpu);
  64. if (dev->mode != CLOCK_EVT_MODE_ONESHOT)
  65. return;
  66. /*
  67. * Setup the next period for devices, which do not have
  68. * periodic mode:
  69. */
  70. for (;;) {
  71. ktime_t next = ktime_add(dev->next_event, tick_period);
  72. if (!clockevents_program_event(dev, next, ktime_get()))
  73. return;
  74. tick_periodic(cpu);
  75. }
  76. }
  77. /*
  78. * Setup the device for a periodic tick
  79. */
  80. void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
  81. {
  82. tick_set_periodic_handler(dev, broadcast);
  83. /* Broadcast setup ? */
  84. if (!tick_device_is_functional(dev))
  85. return;
  86. if (dev->features & CLOCK_EVT_FEAT_PERIODIC) {
  87. clockevents_set_mode(dev, CLOCK_EVT_MODE_PERIODIC);
  88. } else {
  89. unsigned long seq;
  90. ktime_t next;
  91. do {
  92. seq = read_seqbegin(&xtime_lock);
  93. next = tick_next_period;
  94. } while (read_seqretry(&xtime_lock, seq));
  95. clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
  96. for (;;) {
  97. if (!clockevents_program_event(dev, next, ktime_get()))
  98. return;
  99. next = ktime_add(next, tick_period);
  100. }
  101. }
  102. }
  103. /*
  104. * Setup the tick device
  105. */
  106. static void tick_setup_device(struct tick_device *td,
  107. struct clock_event_device *newdev, int cpu,
  108. cpumask_t cpumask)
  109. {
  110. ktime_t next_event;
  111. void (*handler)(struct clock_event_device *) = NULL;
  112. /*
  113. * First device setup ?
  114. */
  115. if (!td->evtdev) {
  116. /*
  117. * If no cpu took the do_timer update, assign it to
  118. * this cpu:
  119. */
  120. if (tick_do_timer_cpu == -1) {
  121. tick_do_timer_cpu = cpu;
  122. tick_next_period = ktime_get();
  123. tick_period = ktime_set(0, NSEC_PER_SEC / HZ);
  124. }
  125. /*
  126. * Startup in periodic mode first.
  127. */
  128. td->mode = TICKDEV_MODE_PERIODIC;
  129. } else {
  130. handler = td->evtdev->event_handler;
  131. next_event = td->evtdev->next_event;
  132. }
  133. td->evtdev = newdev;
  134. /*
  135. * When the device is not per cpu, pin the interrupt to the
  136. * current cpu:
  137. */
  138. if (!cpus_equal(newdev->cpumask, cpumask))
  139. irq_set_affinity(newdev->irq, cpumask);
  140. /*
  141. * When global broadcasting is active, check if the current
  142. * device is registered as a placeholder for broadcast mode.
  143. * This allows us to handle this x86 misfeature in a generic
  144. * way.
  145. */
  146. if (tick_device_uses_broadcast(newdev, cpu))
  147. return;
  148. if (td->mode == TICKDEV_MODE_PERIODIC)
  149. tick_setup_periodic(newdev, 0);
  150. else
  151. tick_setup_oneshot(newdev, handler, next_event);
  152. }
  153. /*
  154. * Check, if the new registered device should be used.
  155. */
  156. static int tick_check_new_device(struct clock_event_device *newdev)
  157. {
  158. struct clock_event_device *curdev;
  159. struct tick_device *td;
  160. int cpu, ret = NOTIFY_OK;
  161. unsigned long flags;
  162. cpumask_t cpumask;
  163. spin_lock_irqsave(&tick_device_lock, flags);
  164. cpu = smp_processor_id();
  165. if (!cpu_isset(cpu, newdev->cpumask))
  166. goto out;
  167. td = &per_cpu(tick_cpu_device, cpu);
  168. curdev = td->evtdev;
  169. cpumask = cpumask_of_cpu(cpu);
  170. /* cpu local device ? */
  171. if (!cpus_equal(newdev->cpumask, cpumask)) {
  172. /*
  173. * If the cpu affinity of the device interrupt can not
  174. * be set, ignore it.
  175. */
  176. if (!irq_can_set_affinity(newdev->irq))
  177. goto out_bc;
  178. /*
  179. * If we have a cpu local device already, do not replace it
  180. * by a non cpu local device
  181. */
  182. if (curdev && cpus_equal(curdev->cpumask, cpumask))
  183. goto out_bc;
  184. }
  185. /*
  186. * If we have an active device, then check the rating and the oneshot
  187. * feature.
  188. */
  189. if (curdev) {
  190. /*
  191. * Prefer one shot capable devices !
  192. */
  193. if ((curdev->features & CLOCK_EVT_FEAT_ONESHOT) &&
  194. !(newdev->features & CLOCK_EVT_FEAT_ONESHOT))
  195. goto out_bc;
  196. /*
  197. * Check the rating
  198. */
  199. if (curdev->rating >= newdev->rating)
  200. goto out_bc;
  201. }
  202. /*
  203. * Replace the eventually existing device by the new
  204. * device. If the current device is the broadcast device, do
  205. * not give it back to the clockevents layer !
  206. */
  207. if (tick_is_broadcast_device(curdev)) {
  208. clockevents_set_mode(curdev, CLOCK_EVT_MODE_SHUTDOWN);
  209. curdev = NULL;
  210. }
  211. clockevents_exchange_device(curdev, newdev);
  212. tick_setup_device(td, newdev, cpu, cpumask);
  213. if (newdev->features & CLOCK_EVT_FEAT_ONESHOT)
  214. tick_oneshot_notify();
  215. spin_unlock_irqrestore(&tick_device_lock, flags);
  216. return NOTIFY_STOP;
  217. out_bc:
  218. /*
  219. * Can the new device be used as a broadcast device ?
  220. */
  221. if (tick_check_broadcast_device(newdev))
  222. ret = NOTIFY_STOP;
  223. out:
  224. spin_unlock_irqrestore(&tick_device_lock, flags);
  225. return ret;
  226. }
  227. /*
  228. * Shutdown an event device on a given cpu:
  229. *
  230. * This is called on a life CPU, when a CPU is dead. So we cannot
  231. * access the hardware device itself.
  232. * We just set the mode and remove it from the lists.
  233. */
  234. static void tick_shutdown(unsigned int *cpup)
  235. {
  236. struct tick_device *td = &per_cpu(tick_cpu_device, *cpup);
  237. struct clock_event_device *dev = td->evtdev;
  238. unsigned long flags;
  239. spin_lock_irqsave(&tick_device_lock, flags);
  240. td->mode = TICKDEV_MODE_PERIODIC;
  241. if (dev) {
  242. /*
  243. * Prevent that the clock events layer tries to call
  244. * the set mode function!
  245. */
  246. dev->mode = CLOCK_EVT_MODE_UNUSED;
  247. clockevents_exchange_device(dev, NULL);
  248. td->evtdev = NULL;
  249. }
  250. spin_unlock_irqrestore(&tick_device_lock, flags);
  251. }
  252. /*
  253. * Notification about clock event devices
  254. */
  255. static int tick_notify(struct notifier_block *nb, unsigned long reason,
  256. void *dev)
  257. {
  258. switch (reason) {
  259. case CLOCK_EVT_NOTIFY_ADD:
  260. return tick_check_new_device(dev);
  261. case CLOCK_EVT_NOTIFY_BROADCAST_ON:
  262. case CLOCK_EVT_NOTIFY_BROADCAST_OFF:
  263. tick_broadcast_on_off(reason, dev);
  264. break;
  265. case CLOCK_EVT_NOTIFY_BROADCAST_ENTER:
  266. case CLOCK_EVT_NOTIFY_BROADCAST_EXIT:
  267. tick_broadcast_oneshot_control(reason);
  268. break;
  269. case CLOCK_EVT_NOTIFY_CPU_DEAD:
  270. tick_shutdown_broadcast_oneshot(dev);
  271. tick_shutdown_broadcast(dev);
  272. tick_shutdown(dev);
  273. break;
  274. default:
  275. break;
  276. }
  277. return NOTIFY_OK;
  278. }
  279. static struct notifier_block tick_notifier = {
  280. .notifier_call = tick_notify,
  281. };
  282. /**
  283. * tick_init - initialize the tick control
  284. *
  285. * Register the notifier with the clockevents framework
  286. */
  287. void __init tick_init(void)
  288. {
  289. clockevents_register_notifier(&tick_notifier);
  290. }