hrtimer.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918
  1. /*
  2. * linux/kernel/hrtimer.c
  3. *
  4. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  6. * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
  7. *
  8. * High-resolution kernel timers
  9. *
  10. * In contrast to the low-resolution timeout API implemented in
  11. * kernel/timer.c, hrtimers provide finer resolution and accuracy
  12. * depending on system configuration and capabilities.
  13. *
  14. * These timers are currently used for:
  15. * - itimers
  16. * - POSIX timers
  17. * - nanosleep
  18. * - precise in-kernel timing
  19. *
  20. * Started by: Thomas Gleixner and Ingo Molnar
  21. *
  22. * Credits:
  23. * based on kernel/timer.c
  24. *
  25. * Help, testing, suggestions, bugfixes, improvements were
  26. * provided by:
  27. *
  28. * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  29. * et. al.
  30. *
  31. * For licencing details see kernel-base/COPYING
  32. */
  33. #include <linux/cpu.h>
  34. #include <linux/irq.h>
  35. #include <linux/module.h>
  36. #include <linux/percpu.h>
  37. #include <linux/hrtimer.h>
  38. #include <linux/notifier.h>
  39. #include <linux/syscalls.h>
  40. #include <linux/kallsyms.h>
  41. #include <linux/interrupt.h>
  42. #include <linux/tick.h>
  43. #include <linux/seq_file.h>
  44. #include <linux/err.h>
  45. #include <linux/debugobjects.h>
  46. #include <asm/uaccess.h>
  47. /**
  48. * ktime_get - get the monotonic time in ktime_t format
  49. *
  50. * returns the time in ktime_t format
  51. */
  52. ktime_t ktime_get(void)
  53. {
  54. struct timespec now;
  55. ktime_get_ts(&now);
  56. return timespec_to_ktime(now);
  57. }
  58. EXPORT_SYMBOL_GPL(ktime_get);
  59. /**
  60. * ktime_get_real - get the real (wall-) time in ktime_t format
  61. *
  62. * returns the time in ktime_t format
  63. */
  64. ktime_t ktime_get_real(void)
  65. {
  66. struct timespec now;
  67. getnstimeofday(&now);
  68. return timespec_to_ktime(now);
  69. }
  70. EXPORT_SYMBOL_GPL(ktime_get_real);
  71. /*
  72. * The timer bases:
  73. *
  74. * Note: If we want to add new timer bases, we have to skip the two
  75. * clock ids captured by the cpu-timers. We do this by holding empty
  76. * entries rather than doing math adjustment of the clock ids.
  77. * This ensures that we capture erroneous accesses to these clock ids
  78. * rather than moving them into the range of valid clock id's.
  79. */
  80. DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  81. {
  82. .clock_base =
  83. {
  84. {
  85. .index = CLOCK_REALTIME,
  86. .get_time = &ktime_get_real,
  87. .resolution = KTIME_LOW_RES,
  88. },
  89. {
  90. .index = CLOCK_MONOTONIC,
  91. .get_time = &ktime_get,
  92. .resolution = KTIME_LOW_RES,
  93. },
  94. }
  95. };
  96. /**
  97. * ktime_get_ts - get the monotonic clock in timespec format
  98. * @ts: pointer to timespec variable
  99. *
  100. * The function calculates the monotonic clock from the realtime
  101. * clock and the wall_to_monotonic offset and stores the result
  102. * in normalized timespec format in the variable pointed to by @ts.
  103. */
  104. void ktime_get_ts(struct timespec *ts)
  105. {
  106. struct timespec tomono;
  107. unsigned long seq;
  108. do {
  109. seq = read_seqbegin(&xtime_lock);
  110. getnstimeofday(ts);
  111. tomono = wall_to_monotonic;
  112. } while (read_seqretry(&xtime_lock, seq));
  113. set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
  114. ts->tv_nsec + tomono.tv_nsec);
  115. }
  116. EXPORT_SYMBOL_GPL(ktime_get_ts);
  117. /*
  118. * Get the coarse grained time at the softirq based on xtime and
  119. * wall_to_monotonic.
  120. */
  121. static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
  122. {
  123. ktime_t xtim, tomono;
  124. struct timespec xts, tom;
  125. unsigned long seq;
  126. do {
  127. seq = read_seqbegin(&xtime_lock);
  128. xts = current_kernel_time();
  129. tom = wall_to_monotonic;
  130. } while (read_seqretry(&xtime_lock, seq));
  131. xtim = timespec_to_ktime(xts);
  132. tomono = timespec_to_ktime(tom);
  133. base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
  134. base->clock_base[CLOCK_MONOTONIC].softirq_time =
  135. ktime_add(xtim, tomono);
  136. }
  137. /*
  138. * Functions and macros which are different for UP/SMP systems are kept in a
  139. * single place
  140. */
  141. #ifdef CONFIG_SMP
  142. /*
  143. * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
  144. * means that all timers which are tied to this base via timer->base are
  145. * locked, and the base itself is locked too.
  146. *
  147. * So __run_timers/migrate_timers can safely modify all timers which could
  148. * be found on the lists/queues.
  149. *
  150. * When the timer's base is locked, and the timer removed from list, it is
  151. * possible to set timer->base = NULL and drop the lock: the timer remains
  152. * locked.
  153. */
  154. static
  155. struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
  156. unsigned long *flags)
  157. {
  158. struct hrtimer_clock_base *base;
  159. for (;;) {
  160. base = timer->base;
  161. if (likely(base != NULL)) {
  162. spin_lock_irqsave(&base->cpu_base->lock, *flags);
  163. if (likely(base == timer->base))
  164. return base;
  165. /* The timer has migrated to another CPU: */
  166. spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
  167. }
  168. cpu_relax();
  169. }
  170. }
  171. /*
  172. * Switch the timer base to the current CPU when possible.
  173. */
  174. static inline struct hrtimer_clock_base *
  175. switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base)
  176. {
  177. struct hrtimer_clock_base *new_base;
  178. struct hrtimer_cpu_base *new_cpu_base;
  179. new_cpu_base = &__get_cpu_var(hrtimer_bases);
  180. new_base = &new_cpu_base->clock_base[base->index];
  181. if (base != new_base) {
  182. /*
  183. * We are trying to schedule the timer on the local CPU.
  184. * However we can't change timer's base while it is running,
  185. * so we keep it on the same CPU. No hassle vs. reprogramming
  186. * the event source in the high resolution case. The softirq
  187. * code will take care of this when the timer function has
  188. * completed. There is no conflict as we hold the lock until
  189. * the timer is enqueued.
  190. */
  191. if (unlikely(hrtimer_callback_running(timer)))
  192. return base;
  193. /* See the comment in lock_timer_base() */
  194. timer->base = NULL;
  195. spin_unlock(&base->cpu_base->lock);
  196. spin_lock(&new_base->cpu_base->lock);
  197. timer->base = new_base;
  198. }
  199. return new_base;
  200. }
  201. #else /* CONFIG_SMP */
  202. static inline struct hrtimer_clock_base *
  203. lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  204. {
  205. struct hrtimer_clock_base *base = timer->base;
  206. spin_lock_irqsave(&base->cpu_base->lock, *flags);
  207. return base;
  208. }
  209. # define switch_hrtimer_base(t, b) (b)
  210. #endif /* !CONFIG_SMP */
  211. /*
  212. * Functions for the union type storage format of ktime_t which are
  213. * too large for inlining:
  214. */
  215. #if BITS_PER_LONG < 64
  216. # ifndef CONFIG_KTIME_SCALAR
  217. /**
  218. * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
  219. * @kt: addend
  220. * @nsec: the scalar nsec value to add
  221. *
  222. * Returns the sum of kt and nsec in ktime_t format
  223. */
  224. ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
  225. {
  226. ktime_t tmp;
  227. if (likely(nsec < NSEC_PER_SEC)) {
  228. tmp.tv64 = nsec;
  229. } else {
  230. unsigned long rem = do_div(nsec, NSEC_PER_SEC);
  231. tmp = ktime_set((long)nsec, rem);
  232. }
  233. return ktime_add(kt, tmp);
  234. }
  235. EXPORT_SYMBOL_GPL(ktime_add_ns);
  236. /**
  237. * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
  238. * @kt: minuend
  239. * @nsec: the scalar nsec value to subtract
  240. *
  241. * Returns the subtraction of @nsec from @kt in ktime_t format
  242. */
  243. ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
  244. {
  245. ktime_t tmp;
  246. if (likely(nsec < NSEC_PER_SEC)) {
  247. tmp.tv64 = nsec;
  248. } else {
  249. unsigned long rem = do_div(nsec, NSEC_PER_SEC);
  250. tmp = ktime_set((long)nsec, rem);
  251. }
  252. return ktime_sub(kt, tmp);
  253. }
  254. EXPORT_SYMBOL_GPL(ktime_sub_ns);
  255. # endif /* !CONFIG_KTIME_SCALAR */
  256. /*
  257. * Divide a ktime value by a nanosecond value
  258. */
  259. u64 ktime_divns(const ktime_t kt, s64 div)
  260. {
  261. u64 dclc;
  262. int sft = 0;
  263. dclc = ktime_to_ns(kt);
  264. /* Make sure the divisor is less than 2^32: */
  265. while (div >> 32) {
  266. sft++;
  267. div >>= 1;
  268. }
  269. dclc >>= sft;
  270. do_div(dclc, (unsigned long) div);
  271. return dclc;
  272. }
  273. #endif /* BITS_PER_LONG >= 64 */
  274. /*
  275. * Add two ktime values and do a safety check for overflow:
  276. */
  277. ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
  278. {
  279. ktime_t res = ktime_add(lhs, rhs);
  280. /*
  281. * We use KTIME_SEC_MAX here, the maximum timeout which we can
  282. * return to user space in a timespec:
  283. */
  284. if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
  285. res = ktime_set(KTIME_SEC_MAX, 0);
  286. return res;
  287. }
  288. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  289. static struct debug_obj_descr hrtimer_debug_descr;
  290. /*
  291. * fixup_init is called when:
  292. * - an active object is initialized
  293. */
  294. static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
  295. {
  296. struct hrtimer *timer = addr;
  297. switch (state) {
  298. case ODEBUG_STATE_ACTIVE:
  299. hrtimer_cancel(timer);
  300. debug_object_init(timer, &hrtimer_debug_descr);
  301. return 1;
  302. default:
  303. return 0;
  304. }
  305. }
  306. /*
  307. * fixup_activate is called when:
  308. * - an active object is activated
  309. * - an unknown object is activated (might be a statically initialized object)
  310. */
  311. static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
  312. {
  313. switch (state) {
  314. case ODEBUG_STATE_NOTAVAILABLE:
  315. WARN_ON_ONCE(1);
  316. return 0;
  317. case ODEBUG_STATE_ACTIVE:
  318. WARN_ON(1);
  319. default:
  320. return 0;
  321. }
  322. }
  323. /*
  324. * fixup_free is called when:
  325. * - an active object is freed
  326. */
  327. static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
  328. {
  329. struct hrtimer *timer = addr;
  330. switch (state) {
  331. case ODEBUG_STATE_ACTIVE:
  332. hrtimer_cancel(timer);
  333. debug_object_free(timer, &hrtimer_debug_descr);
  334. return 1;
  335. default:
  336. return 0;
  337. }
  338. }
  339. static struct debug_obj_descr hrtimer_debug_descr = {
  340. .name = "hrtimer",
  341. .fixup_init = hrtimer_fixup_init,
  342. .fixup_activate = hrtimer_fixup_activate,
  343. .fixup_free = hrtimer_fixup_free,
  344. };
  345. static inline void debug_hrtimer_init(struct hrtimer *timer)
  346. {
  347. debug_object_init(timer, &hrtimer_debug_descr);
  348. }
  349. static inline void debug_hrtimer_activate(struct hrtimer *timer)
  350. {
  351. debug_object_activate(timer, &hrtimer_debug_descr);
  352. }
  353. static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
  354. {
  355. debug_object_deactivate(timer, &hrtimer_debug_descr);
  356. }
  357. static inline void debug_hrtimer_free(struct hrtimer *timer)
  358. {
  359. debug_object_free(timer, &hrtimer_debug_descr);
  360. }
  361. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  362. enum hrtimer_mode mode);
  363. void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
  364. enum hrtimer_mode mode)
  365. {
  366. debug_object_init_on_stack(timer, &hrtimer_debug_descr);
  367. __hrtimer_init(timer, clock_id, mode);
  368. }
  369. void destroy_hrtimer_on_stack(struct hrtimer *timer)
  370. {
  371. debug_object_free(timer, &hrtimer_debug_descr);
  372. }
  373. #else
  374. static inline void debug_hrtimer_init(struct hrtimer *timer) { }
  375. static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
  376. static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
  377. #endif
  378. /*
  379. * Check, whether the timer is on the callback pending list
  380. */
  381. static inline int hrtimer_cb_pending(const struct hrtimer *timer)
  382. {
  383. return timer->state & HRTIMER_STATE_PENDING;
  384. }
  385. /*
  386. * Remove a timer from the callback pending list
  387. */
  388. static inline void hrtimer_remove_cb_pending(struct hrtimer *timer)
  389. {
  390. list_del_init(&timer->cb_entry);
  391. }
  392. /* High resolution timer related functions */
  393. #ifdef CONFIG_HIGH_RES_TIMERS
  394. /*
  395. * High resolution timer enabled ?
  396. */
  397. static int hrtimer_hres_enabled __read_mostly = 1;
  398. /*
  399. * Enable / Disable high resolution mode
  400. */
  401. static int __init setup_hrtimer_hres(char *str)
  402. {
  403. if (!strcmp(str, "off"))
  404. hrtimer_hres_enabled = 0;
  405. else if (!strcmp(str, "on"))
  406. hrtimer_hres_enabled = 1;
  407. else
  408. return 0;
  409. return 1;
  410. }
  411. __setup("highres=", setup_hrtimer_hres);
  412. /*
  413. * hrtimer_high_res_enabled - query, if the highres mode is enabled
  414. */
  415. static inline int hrtimer_is_hres_enabled(void)
  416. {
  417. return hrtimer_hres_enabled;
  418. }
  419. /*
  420. * Is the high resolution mode active ?
  421. */
  422. static inline int hrtimer_hres_active(void)
  423. {
  424. return __get_cpu_var(hrtimer_bases).hres_active;
  425. }
  426. /*
  427. * Reprogram the event source with checking both queues for the
  428. * next event
  429. * Called with interrupts disabled and base->lock held
  430. */
  431. static void hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base)
  432. {
  433. int i;
  434. struct hrtimer_clock_base *base = cpu_base->clock_base;
  435. ktime_t expires;
  436. cpu_base->expires_next.tv64 = KTIME_MAX;
  437. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  438. struct hrtimer *timer;
  439. if (!base->first)
  440. continue;
  441. timer = rb_entry(base->first, struct hrtimer, node);
  442. expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  443. if (expires.tv64 < cpu_base->expires_next.tv64)
  444. cpu_base->expires_next = expires;
  445. }
  446. if (cpu_base->expires_next.tv64 != KTIME_MAX)
  447. tick_program_event(cpu_base->expires_next, 1);
  448. }
  449. /*
  450. * Shared reprogramming for clock_realtime and clock_monotonic
  451. *
  452. * When a timer is enqueued and expires earlier than the already enqueued
  453. * timers, we have to check, whether it expires earlier than the timer for
  454. * which the clock event device was armed.
  455. *
  456. * Called with interrupts disabled and base->cpu_base.lock held
  457. */
  458. static int hrtimer_reprogram(struct hrtimer *timer,
  459. struct hrtimer_clock_base *base)
  460. {
  461. ktime_t *expires_next = &__get_cpu_var(hrtimer_bases).expires_next;
  462. ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  463. int res;
  464. WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
  465. /*
  466. * When the callback is running, we do not reprogram the clock event
  467. * device. The timer callback is either running on a different CPU or
  468. * the callback is executed in the hrtimer_interrupt context. The
  469. * reprogramming is handled either by the softirq, which called the
  470. * callback or at the end of the hrtimer_interrupt.
  471. */
  472. if (hrtimer_callback_running(timer))
  473. return 0;
  474. /*
  475. * CLOCK_REALTIME timer might be requested with an absolute
  476. * expiry time which is less than base->offset. Nothing wrong
  477. * about that, just avoid to call into the tick code, which
  478. * has now objections against negative expiry values.
  479. */
  480. if (expires.tv64 < 0)
  481. return -ETIME;
  482. if (expires.tv64 >= expires_next->tv64)
  483. return 0;
  484. /*
  485. * Clockevents returns -ETIME, when the event was in the past.
  486. */
  487. res = tick_program_event(expires, 0);
  488. if (!IS_ERR_VALUE(res))
  489. *expires_next = expires;
  490. return res;
  491. }
  492. /*
  493. * Retrigger next event is called after clock was set
  494. *
  495. * Called with interrupts disabled via on_each_cpu()
  496. */
  497. static void retrigger_next_event(void *arg)
  498. {
  499. struct hrtimer_cpu_base *base;
  500. struct timespec realtime_offset;
  501. unsigned long seq;
  502. if (!hrtimer_hres_active())
  503. return;
  504. do {
  505. seq = read_seqbegin(&xtime_lock);
  506. set_normalized_timespec(&realtime_offset,
  507. -wall_to_monotonic.tv_sec,
  508. -wall_to_monotonic.tv_nsec);
  509. } while (read_seqretry(&xtime_lock, seq));
  510. base = &__get_cpu_var(hrtimer_bases);
  511. /* Adjust CLOCK_REALTIME offset */
  512. spin_lock(&base->lock);
  513. base->clock_base[CLOCK_REALTIME].offset =
  514. timespec_to_ktime(realtime_offset);
  515. hrtimer_force_reprogram(base);
  516. spin_unlock(&base->lock);
  517. }
  518. /*
  519. * Clock realtime was set
  520. *
  521. * Change the offset of the realtime clock vs. the monotonic
  522. * clock.
  523. *
  524. * We might have to reprogram the high resolution timer interrupt. On
  525. * SMP we call the architecture specific code to retrigger _all_ high
  526. * resolution timer interrupts. On UP we just disable interrupts and
  527. * call the high resolution interrupt code.
  528. */
  529. void clock_was_set(void)
  530. {
  531. /* Retrigger the CPU local events everywhere */
  532. on_each_cpu(retrigger_next_event, NULL, 1);
  533. }
  534. /*
  535. * During resume we might have to reprogram the high resolution timer
  536. * interrupt (on the local CPU):
  537. */
  538. void hres_timers_resume(void)
  539. {
  540. /* Retrigger the CPU local events: */
  541. retrigger_next_event(NULL);
  542. }
  543. /*
  544. * Initialize the high resolution related parts of cpu_base
  545. */
  546. static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
  547. {
  548. base->expires_next.tv64 = KTIME_MAX;
  549. base->hres_active = 0;
  550. }
  551. /*
  552. * Initialize the high resolution related parts of a hrtimer
  553. */
  554. static inline void hrtimer_init_timer_hres(struct hrtimer *timer)
  555. {
  556. }
  557. /*
  558. * When High resolution timers are active, try to reprogram. Note, that in case
  559. * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
  560. * check happens. The timer gets enqueued into the rbtree. The reprogramming
  561. * and expiry check is done in the hrtimer_interrupt or in the softirq.
  562. */
  563. static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
  564. struct hrtimer_clock_base *base)
  565. {
  566. if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
  567. /* Timer is expired, act upon the callback mode */
  568. switch(timer->cb_mode) {
  569. case HRTIMER_CB_IRQSAFE_NO_RESTART:
  570. debug_hrtimer_deactivate(timer);
  571. /*
  572. * We can call the callback from here. No restart
  573. * happens, so no danger of recursion
  574. */
  575. BUG_ON(timer->function(timer) != HRTIMER_NORESTART);
  576. return 1;
  577. case HRTIMER_CB_IRQSAFE_PERCPU:
  578. case HRTIMER_CB_IRQSAFE_UNLOCKED:
  579. /*
  580. * This is solely for the sched tick emulation with
  581. * dynamic tick support to ensure that we do not
  582. * restart the tick right on the edge and end up with
  583. * the tick timer in the softirq ! The calling site
  584. * takes care of this. Also used for hrtimer sleeper !
  585. */
  586. debug_hrtimer_deactivate(timer);
  587. return 1;
  588. case HRTIMER_CB_IRQSAFE:
  589. case HRTIMER_CB_SOFTIRQ:
  590. /*
  591. * Move everything else into the softirq pending list !
  592. */
  593. list_add_tail(&timer->cb_entry,
  594. &base->cpu_base->cb_pending);
  595. timer->state = HRTIMER_STATE_PENDING;
  596. return 1;
  597. default:
  598. BUG();
  599. }
  600. }
  601. return 0;
  602. }
  603. /*
  604. * Switch to high resolution mode
  605. */
  606. static int hrtimer_switch_to_hres(void)
  607. {
  608. int cpu = smp_processor_id();
  609. struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
  610. unsigned long flags;
  611. if (base->hres_active)
  612. return 1;
  613. local_irq_save(flags);
  614. if (tick_init_highres()) {
  615. local_irq_restore(flags);
  616. printk(KERN_WARNING "Could not switch to high resolution "
  617. "mode on CPU %d\n", cpu);
  618. return 0;
  619. }
  620. base->hres_active = 1;
  621. base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
  622. base->clock_base[CLOCK_MONOTONIC].resolution = KTIME_HIGH_RES;
  623. tick_setup_sched_timer();
  624. /* "Retrigger" the interrupt to get things going */
  625. retrigger_next_event(NULL);
  626. local_irq_restore(flags);
  627. printk(KERN_DEBUG "Switched to high resolution mode on CPU %d\n",
  628. smp_processor_id());
  629. return 1;
  630. }
  631. static inline void hrtimer_raise_softirq(void)
  632. {
  633. raise_softirq(HRTIMER_SOFTIRQ);
  634. }
  635. #else
  636. static inline int hrtimer_hres_active(void) { return 0; }
  637. static inline int hrtimer_is_hres_enabled(void) { return 0; }
  638. static inline int hrtimer_switch_to_hres(void) { return 0; }
  639. static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
  640. static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
  641. struct hrtimer_clock_base *base)
  642. {
  643. return 0;
  644. }
  645. static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
  646. static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { }
  647. static inline int hrtimer_reprogram(struct hrtimer *timer,
  648. struct hrtimer_clock_base *base)
  649. {
  650. return 0;
  651. }
  652. static inline void hrtimer_raise_softirq(void) { }
  653. #endif /* CONFIG_HIGH_RES_TIMERS */
  654. #ifdef CONFIG_TIMER_STATS
  655. void __timer_stats_hrtimer_set_start_info(struct hrtimer *timer, void *addr)
  656. {
  657. if (timer->start_site)
  658. return;
  659. timer->start_site = addr;
  660. memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
  661. timer->start_pid = current->pid;
  662. }
  663. #endif
  664. /*
  665. * Counterpart to lock_hrtimer_base above:
  666. */
  667. static inline
  668. void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  669. {
  670. spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
  671. }
  672. /**
  673. * hrtimer_forward - forward the timer expiry
  674. * @timer: hrtimer to forward
  675. * @now: forward past this time
  676. * @interval: the interval to forward
  677. *
  678. * Forward the timer expiry so it will expire in the future.
  679. * Returns the number of overruns.
  680. */
  681. u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
  682. {
  683. u64 orun = 1;
  684. ktime_t delta;
  685. delta = ktime_sub(now, hrtimer_get_expires(timer));
  686. if (delta.tv64 < 0)
  687. return 0;
  688. if (interval.tv64 < timer->base->resolution.tv64)
  689. interval.tv64 = timer->base->resolution.tv64;
  690. if (unlikely(delta.tv64 >= interval.tv64)) {
  691. s64 incr = ktime_to_ns(interval);
  692. orun = ktime_divns(delta, incr);
  693. hrtimer_add_expires_ns(timer, incr * orun);
  694. if (hrtimer_get_expires_tv64(timer) > now.tv64)
  695. return orun;
  696. /*
  697. * This (and the ktime_add() below) is the
  698. * correction for exact:
  699. */
  700. orun++;
  701. }
  702. hrtimer_add_expires(timer, interval);
  703. return orun;
  704. }
  705. EXPORT_SYMBOL_GPL(hrtimer_forward);
  706. /*
  707. * enqueue_hrtimer - internal function to (re)start a timer
  708. *
  709. * The timer is inserted in expiry order. Insertion into the
  710. * red black tree is O(log(n)). Must hold the base lock.
  711. */
  712. static void enqueue_hrtimer(struct hrtimer *timer,
  713. struct hrtimer_clock_base *base, int reprogram)
  714. {
  715. struct rb_node **link = &base->active.rb_node;
  716. struct rb_node *parent = NULL;
  717. struct hrtimer *entry;
  718. int leftmost = 1;
  719. debug_hrtimer_activate(timer);
  720. /*
  721. * Find the right place in the rbtree:
  722. */
  723. while (*link) {
  724. parent = *link;
  725. entry = rb_entry(parent, struct hrtimer, node);
  726. /*
  727. * We dont care about collisions. Nodes with
  728. * the same expiry time stay together.
  729. */
  730. if (hrtimer_get_expires_tv64(timer) <
  731. hrtimer_get_expires_tv64(entry)) {
  732. link = &(*link)->rb_left;
  733. } else {
  734. link = &(*link)->rb_right;
  735. leftmost = 0;
  736. }
  737. }
  738. /*
  739. * Insert the timer to the rbtree and check whether it
  740. * replaces the first pending timer
  741. */
  742. if (leftmost) {
  743. /*
  744. * Reprogram the clock event device. When the timer is already
  745. * expired hrtimer_enqueue_reprogram has either called the
  746. * callback or added it to the pending list and raised the
  747. * softirq.
  748. *
  749. * This is a NOP for !HIGHRES
  750. */
  751. if (reprogram && hrtimer_enqueue_reprogram(timer, base))
  752. return;
  753. base->first = &timer->node;
  754. }
  755. rb_link_node(&timer->node, parent, link);
  756. rb_insert_color(&timer->node, &base->active);
  757. /*
  758. * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
  759. * state of a possibly running callback.
  760. */
  761. timer->state |= HRTIMER_STATE_ENQUEUED;
  762. }
  763. /*
  764. * __remove_hrtimer - internal function to remove a timer
  765. *
  766. * Caller must hold the base lock.
  767. *
  768. * High resolution timer mode reprograms the clock event device when the
  769. * timer is the one which expires next. The caller can disable this by setting
  770. * reprogram to zero. This is useful, when the context does a reprogramming
  771. * anyway (e.g. timer interrupt)
  772. */
  773. static void __remove_hrtimer(struct hrtimer *timer,
  774. struct hrtimer_clock_base *base,
  775. unsigned long newstate, int reprogram)
  776. {
  777. /* High res. callback list. NOP for !HIGHRES */
  778. if (hrtimer_cb_pending(timer))
  779. hrtimer_remove_cb_pending(timer);
  780. else {
  781. /*
  782. * Remove the timer from the rbtree and replace the
  783. * first entry pointer if necessary.
  784. */
  785. if (base->first == &timer->node) {
  786. base->first = rb_next(&timer->node);
  787. /* Reprogram the clock event device. if enabled */
  788. if (reprogram && hrtimer_hres_active())
  789. hrtimer_force_reprogram(base->cpu_base);
  790. }
  791. rb_erase(&timer->node, &base->active);
  792. }
  793. timer->state = newstate;
  794. }
  795. /*
  796. * remove hrtimer, called with base lock held
  797. */
  798. static inline int
  799. remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
  800. {
  801. if (hrtimer_is_queued(timer)) {
  802. int reprogram;
  803. /*
  804. * Remove the timer and force reprogramming when high
  805. * resolution mode is active and the timer is on the current
  806. * CPU. If we remove a timer on another CPU, reprogramming is
  807. * skipped. The interrupt event on this CPU is fired and
  808. * reprogramming happens in the interrupt handler. This is a
  809. * rare case and less expensive than a smp call.
  810. */
  811. debug_hrtimer_deactivate(timer);
  812. timer_stats_hrtimer_clear_start_info(timer);
  813. reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
  814. __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE,
  815. reprogram);
  816. return 1;
  817. }
  818. return 0;
  819. }
  820. /**
  821. * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
  822. * @timer: the timer to be added
  823. * @tim: expiry time
  824. * @delta_ns: "slack" range for the timer
  825. * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
  826. *
  827. * Returns:
  828. * 0 on success
  829. * 1 when the timer was active
  830. */
  831. int
  832. hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_ns,
  833. const enum hrtimer_mode mode)
  834. {
  835. struct hrtimer_clock_base *base, *new_base;
  836. unsigned long flags;
  837. int ret, raise;
  838. base = lock_hrtimer_base(timer, &flags);
  839. /* Remove an active timer from the queue: */
  840. ret = remove_hrtimer(timer, base);
  841. /* Switch the timer base, if necessary: */
  842. new_base = switch_hrtimer_base(timer, base);
  843. if (mode == HRTIMER_MODE_REL) {
  844. tim = ktime_add_safe(tim, new_base->get_time());
  845. /*
  846. * CONFIG_TIME_LOW_RES is a temporary way for architectures
  847. * to signal that they simply return xtime in
  848. * do_gettimeoffset(). In this case we want to round up by
  849. * resolution when starting a relative timer, to avoid short
  850. * timeouts. This will go away with the GTOD framework.
  851. */
  852. #ifdef CONFIG_TIME_LOW_RES
  853. tim = ktime_add_safe(tim, base->resolution);
  854. #endif
  855. }
  856. hrtimer_set_expires_range_ns(timer, tim, delta_ns);
  857. timer_stats_hrtimer_set_start_info(timer);
  858. /*
  859. * Only allow reprogramming if the new base is on this CPU.
  860. * (it might still be on another CPU if the timer was pending)
  861. */
  862. enqueue_hrtimer(timer, new_base,
  863. new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
  864. /*
  865. * The timer may be expired and moved to the cb_pending
  866. * list. We can not raise the softirq with base lock held due
  867. * to a possible deadlock with runqueue lock.
  868. */
  869. raise = timer->state == HRTIMER_STATE_PENDING;
  870. /*
  871. * We use preempt_disable to prevent this task from migrating after
  872. * setting up the softirq and raising it. Otherwise, if me migrate
  873. * we will raise the softirq on the wrong CPU.
  874. */
  875. preempt_disable();
  876. unlock_hrtimer_base(timer, &flags);
  877. if (raise)
  878. hrtimer_raise_softirq();
  879. preempt_enable();
  880. return ret;
  881. }
  882. EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
  883. /**
  884. * hrtimer_start - (re)start an hrtimer on the current CPU
  885. * @timer: the timer to be added
  886. * @tim: expiry time
  887. * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
  888. *
  889. * Returns:
  890. * 0 on success
  891. * 1 when the timer was active
  892. */
  893. int
  894. hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
  895. {
  896. return hrtimer_start_range_ns(timer, tim, 0, mode);
  897. }
  898. EXPORT_SYMBOL_GPL(hrtimer_start);
  899. /**
  900. * hrtimer_try_to_cancel - try to deactivate a timer
  901. * @timer: hrtimer to stop
  902. *
  903. * Returns:
  904. * 0 when the timer was not active
  905. * 1 when the timer was active
  906. * -1 when the timer is currently excuting the callback function and
  907. * cannot be stopped
  908. */
  909. int hrtimer_try_to_cancel(struct hrtimer *timer)
  910. {
  911. struct hrtimer_clock_base *base;
  912. unsigned long flags;
  913. int ret = -1;
  914. base = lock_hrtimer_base(timer, &flags);
  915. if (!hrtimer_callback_running(timer))
  916. ret = remove_hrtimer(timer, base);
  917. unlock_hrtimer_base(timer, &flags);
  918. return ret;
  919. }
  920. EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
  921. /**
  922. * hrtimer_cancel - cancel a timer and wait for the handler to finish.
  923. * @timer: the timer to be cancelled
  924. *
  925. * Returns:
  926. * 0 when the timer was not active
  927. * 1 when the timer was active
  928. */
  929. int hrtimer_cancel(struct hrtimer *timer)
  930. {
  931. for (;;) {
  932. int ret = hrtimer_try_to_cancel(timer);
  933. if (ret >= 0)
  934. return ret;
  935. cpu_relax();
  936. }
  937. }
  938. EXPORT_SYMBOL_GPL(hrtimer_cancel);
  939. /**
  940. * hrtimer_get_remaining - get remaining time for the timer
  941. * @timer: the timer to read
  942. */
  943. ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
  944. {
  945. struct hrtimer_clock_base *base;
  946. unsigned long flags;
  947. ktime_t rem;
  948. base = lock_hrtimer_base(timer, &flags);
  949. rem = hrtimer_expires_remaining(timer);
  950. unlock_hrtimer_base(timer, &flags);
  951. return rem;
  952. }
  953. EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
  954. #ifdef CONFIG_NO_HZ
  955. /**
  956. * hrtimer_get_next_event - get the time until next expiry event
  957. *
  958. * Returns the delta to the next expiry event or KTIME_MAX if no timer
  959. * is pending.
  960. */
  961. ktime_t hrtimer_get_next_event(void)
  962. {
  963. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  964. struct hrtimer_clock_base *base = cpu_base->clock_base;
  965. ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
  966. unsigned long flags;
  967. int i;
  968. spin_lock_irqsave(&cpu_base->lock, flags);
  969. if (!hrtimer_hres_active()) {
  970. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  971. struct hrtimer *timer;
  972. if (!base->first)
  973. continue;
  974. timer = rb_entry(base->first, struct hrtimer, node);
  975. delta.tv64 = hrtimer_get_expires_tv64(timer);
  976. delta = ktime_sub(delta, base->get_time());
  977. if (delta.tv64 < mindelta.tv64)
  978. mindelta.tv64 = delta.tv64;
  979. }
  980. }
  981. spin_unlock_irqrestore(&cpu_base->lock, flags);
  982. if (mindelta.tv64 < 0)
  983. mindelta.tv64 = 0;
  984. return mindelta;
  985. }
  986. #endif
  987. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  988. enum hrtimer_mode mode)
  989. {
  990. struct hrtimer_cpu_base *cpu_base;
  991. memset(timer, 0, sizeof(struct hrtimer));
  992. cpu_base = &__raw_get_cpu_var(hrtimer_bases);
  993. if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
  994. clock_id = CLOCK_MONOTONIC;
  995. timer->base = &cpu_base->clock_base[clock_id];
  996. INIT_LIST_HEAD(&timer->cb_entry);
  997. hrtimer_init_timer_hres(timer);
  998. #ifdef CONFIG_TIMER_STATS
  999. timer->start_site = NULL;
  1000. timer->start_pid = -1;
  1001. memset(timer->start_comm, 0, TASK_COMM_LEN);
  1002. #endif
  1003. }
  1004. /**
  1005. * hrtimer_init - initialize a timer to the given clock
  1006. * @timer: the timer to be initialized
  1007. * @clock_id: the clock to be used
  1008. * @mode: timer mode abs/rel
  1009. */
  1010. void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  1011. enum hrtimer_mode mode)
  1012. {
  1013. debug_hrtimer_init(timer);
  1014. __hrtimer_init(timer, clock_id, mode);
  1015. }
  1016. EXPORT_SYMBOL_GPL(hrtimer_init);
  1017. /**
  1018. * hrtimer_get_res - get the timer resolution for a clock
  1019. * @which_clock: which clock to query
  1020. * @tp: pointer to timespec variable to store the resolution
  1021. *
  1022. * Store the resolution of the clock selected by @which_clock in the
  1023. * variable pointed to by @tp.
  1024. */
  1025. int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
  1026. {
  1027. struct hrtimer_cpu_base *cpu_base;
  1028. cpu_base = &__raw_get_cpu_var(hrtimer_bases);
  1029. *tp = ktime_to_timespec(cpu_base->clock_base[which_clock].resolution);
  1030. return 0;
  1031. }
  1032. EXPORT_SYMBOL_GPL(hrtimer_get_res);
  1033. static void run_hrtimer_pending(struct hrtimer_cpu_base *cpu_base)
  1034. {
  1035. spin_lock_irq(&cpu_base->lock);
  1036. while (!list_empty(&cpu_base->cb_pending)) {
  1037. enum hrtimer_restart (*fn)(struct hrtimer *);
  1038. struct hrtimer *timer;
  1039. int restart;
  1040. timer = list_entry(cpu_base->cb_pending.next,
  1041. struct hrtimer, cb_entry);
  1042. debug_hrtimer_deactivate(timer);
  1043. timer_stats_account_hrtimer(timer);
  1044. fn = timer->function;
  1045. __remove_hrtimer(timer, timer->base, HRTIMER_STATE_CALLBACK, 0);
  1046. spin_unlock_irq(&cpu_base->lock);
  1047. restart = fn(timer);
  1048. spin_lock_irq(&cpu_base->lock);
  1049. timer->state &= ~HRTIMER_STATE_CALLBACK;
  1050. if (restart == HRTIMER_RESTART) {
  1051. BUG_ON(hrtimer_active(timer));
  1052. /*
  1053. * Enqueue the timer, allow reprogramming of the event
  1054. * device
  1055. */
  1056. enqueue_hrtimer(timer, timer->base, 1);
  1057. } else if (hrtimer_active(timer)) {
  1058. /*
  1059. * If the timer was rearmed on another CPU, reprogram
  1060. * the event device.
  1061. */
  1062. struct hrtimer_clock_base *base = timer->base;
  1063. if (base->first == &timer->node &&
  1064. hrtimer_reprogram(timer, base)) {
  1065. /*
  1066. * Timer is expired. Thus move it from tree to
  1067. * pending list again.
  1068. */
  1069. __remove_hrtimer(timer, base,
  1070. HRTIMER_STATE_PENDING, 0);
  1071. list_add_tail(&timer->cb_entry,
  1072. &base->cpu_base->cb_pending);
  1073. }
  1074. }
  1075. }
  1076. spin_unlock_irq(&cpu_base->lock);
  1077. }
  1078. static void __run_hrtimer(struct hrtimer *timer)
  1079. {
  1080. struct hrtimer_clock_base *base = timer->base;
  1081. struct hrtimer_cpu_base *cpu_base = base->cpu_base;
  1082. enum hrtimer_restart (*fn)(struct hrtimer *);
  1083. int restart;
  1084. debug_hrtimer_deactivate(timer);
  1085. __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
  1086. timer_stats_account_hrtimer(timer);
  1087. fn = timer->function;
  1088. if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU ||
  1089. timer->cb_mode == HRTIMER_CB_IRQSAFE_UNLOCKED) {
  1090. /*
  1091. * Used for scheduler timers, avoid lock inversion with
  1092. * rq->lock and tasklist_lock.
  1093. *
  1094. * These timers are required to deal with enqueue expiry
  1095. * themselves and are not allowed to migrate.
  1096. */
  1097. spin_unlock(&cpu_base->lock);
  1098. restart = fn(timer);
  1099. spin_lock(&cpu_base->lock);
  1100. } else
  1101. restart = fn(timer);
  1102. /*
  1103. * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid
  1104. * reprogramming of the event hardware. This happens at the end of this
  1105. * function anyway.
  1106. */
  1107. if (restart != HRTIMER_NORESTART) {
  1108. BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
  1109. enqueue_hrtimer(timer, base, 0);
  1110. }
  1111. timer->state &= ~HRTIMER_STATE_CALLBACK;
  1112. }
  1113. #ifdef CONFIG_HIGH_RES_TIMERS
  1114. /*
  1115. * High resolution timer interrupt
  1116. * Called with interrupts disabled
  1117. */
  1118. void hrtimer_interrupt(struct clock_event_device *dev)
  1119. {
  1120. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1121. struct hrtimer_clock_base *base;
  1122. ktime_t expires_next, now;
  1123. int i, raise = 0;
  1124. BUG_ON(!cpu_base->hres_active);
  1125. cpu_base->nr_events++;
  1126. dev->next_event.tv64 = KTIME_MAX;
  1127. retry:
  1128. now = ktime_get();
  1129. expires_next.tv64 = KTIME_MAX;
  1130. base = cpu_base->clock_base;
  1131. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1132. ktime_t basenow;
  1133. struct rb_node *node;
  1134. spin_lock(&cpu_base->lock);
  1135. basenow = ktime_add(now, base->offset);
  1136. while ((node = base->first)) {
  1137. struct hrtimer *timer;
  1138. timer = rb_entry(node, struct hrtimer, node);
  1139. /*
  1140. * The immediate goal for using the softexpires is
  1141. * minimizing wakeups, not running timers at the
  1142. * earliest interrupt after their soft expiration.
  1143. * This allows us to avoid using a Priority Search
  1144. * Tree, which can answer a stabbing querry for
  1145. * overlapping intervals and instead use the simple
  1146. * BST we already have.
  1147. * We don't add extra wakeups by delaying timers that
  1148. * are right-of a not yet expired timer, because that
  1149. * timer will have to trigger a wakeup anyway.
  1150. */
  1151. if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
  1152. ktime_t expires;
  1153. expires = ktime_sub(hrtimer_get_expires(timer),
  1154. base->offset);
  1155. if (expires.tv64 < expires_next.tv64)
  1156. expires_next = expires;
  1157. break;
  1158. }
  1159. /* Move softirq callbacks to the pending list */
  1160. if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
  1161. __remove_hrtimer(timer, base,
  1162. HRTIMER_STATE_PENDING, 0);
  1163. list_add_tail(&timer->cb_entry,
  1164. &base->cpu_base->cb_pending);
  1165. raise = 1;
  1166. continue;
  1167. }
  1168. __run_hrtimer(timer);
  1169. }
  1170. spin_unlock(&cpu_base->lock);
  1171. base++;
  1172. }
  1173. cpu_base->expires_next = expires_next;
  1174. /* Reprogramming necessary ? */
  1175. if (expires_next.tv64 != KTIME_MAX) {
  1176. if (tick_program_event(expires_next, 0))
  1177. goto retry;
  1178. }
  1179. /* Raise softirq ? */
  1180. if (raise)
  1181. raise_softirq(HRTIMER_SOFTIRQ);
  1182. }
  1183. /**
  1184. * hrtimer_peek_ahead_timers -- run soft-expired timers now
  1185. *
  1186. * hrtimer_peek_ahead_timers will peek at the timer queue of
  1187. * the current cpu and check if there are any timers for which
  1188. * the soft expires time has passed. If any such timers exist,
  1189. * they are run immediately and then removed from the timer queue.
  1190. *
  1191. */
  1192. void hrtimer_peek_ahead_timers(void)
  1193. {
  1194. struct tick_device *td;
  1195. unsigned long flags;
  1196. if (!hrtimer_hres_active())
  1197. return;
  1198. local_irq_save(flags);
  1199. td = &__get_cpu_var(tick_cpu_device);
  1200. if (td && td->evtdev)
  1201. hrtimer_interrupt(td->evtdev);
  1202. local_irq_restore(flags);
  1203. }
  1204. static void run_hrtimer_softirq(struct softirq_action *h)
  1205. {
  1206. run_hrtimer_pending(&__get_cpu_var(hrtimer_bases));
  1207. }
  1208. #endif /* CONFIG_HIGH_RES_TIMERS */
  1209. /*
  1210. * Called from timer softirq every jiffy, expire hrtimers:
  1211. *
  1212. * For HRT its the fall back code to run the softirq in the timer
  1213. * softirq context in case the hrtimer initialization failed or has
  1214. * not been done yet.
  1215. */
  1216. void hrtimer_run_pending(void)
  1217. {
  1218. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1219. if (hrtimer_hres_active())
  1220. return;
  1221. /*
  1222. * This _is_ ugly: We have to check in the softirq context,
  1223. * whether we can switch to highres and / or nohz mode. The
  1224. * clocksource switch happens in the timer interrupt with
  1225. * xtime_lock held. Notification from there only sets the
  1226. * check bit in the tick_oneshot code, otherwise we might
  1227. * deadlock vs. xtime_lock.
  1228. */
  1229. if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
  1230. hrtimer_switch_to_hres();
  1231. run_hrtimer_pending(cpu_base);
  1232. }
  1233. /*
  1234. * Called from hardirq context every jiffy
  1235. */
  1236. void hrtimer_run_queues(void)
  1237. {
  1238. struct rb_node *node;
  1239. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1240. struct hrtimer_clock_base *base;
  1241. int index, gettime = 1;
  1242. if (hrtimer_hres_active())
  1243. return;
  1244. for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
  1245. base = &cpu_base->clock_base[index];
  1246. if (!base->first)
  1247. continue;
  1248. if (gettime) {
  1249. hrtimer_get_softirq_time(cpu_base);
  1250. gettime = 0;
  1251. }
  1252. spin_lock(&cpu_base->lock);
  1253. while ((node = base->first)) {
  1254. struct hrtimer *timer;
  1255. timer = rb_entry(node, struct hrtimer, node);
  1256. if (base->softirq_time.tv64 <=
  1257. hrtimer_get_expires_tv64(timer))
  1258. break;
  1259. if (timer->cb_mode == HRTIMER_CB_SOFTIRQ) {
  1260. __remove_hrtimer(timer, base,
  1261. HRTIMER_STATE_PENDING, 0);
  1262. list_add_tail(&timer->cb_entry,
  1263. &base->cpu_base->cb_pending);
  1264. continue;
  1265. }
  1266. __run_hrtimer(timer);
  1267. }
  1268. spin_unlock(&cpu_base->lock);
  1269. }
  1270. }
  1271. /*
  1272. * Sleep related functions:
  1273. */
  1274. static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
  1275. {
  1276. struct hrtimer_sleeper *t =
  1277. container_of(timer, struct hrtimer_sleeper, timer);
  1278. struct task_struct *task = t->task;
  1279. t->task = NULL;
  1280. if (task)
  1281. wake_up_process(task);
  1282. return HRTIMER_NORESTART;
  1283. }
  1284. void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  1285. {
  1286. sl->timer.function = hrtimer_wakeup;
  1287. sl->task = task;
  1288. #ifdef CONFIG_HIGH_RES_TIMERS
  1289. sl->timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
  1290. #endif
  1291. }
  1292. static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
  1293. {
  1294. hrtimer_init_sleeper(t, current);
  1295. do {
  1296. set_current_state(TASK_INTERRUPTIBLE);
  1297. hrtimer_start_expires(&t->timer, mode);
  1298. if (!hrtimer_active(&t->timer))
  1299. t->task = NULL;
  1300. if (likely(t->task))
  1301. schedule();
  1302. hrtimer_cancel(&t->timer);
  1303. mode = HRTIMER_MODE_ABS;
  1304. } while (t->task && !signal_pending(current));
  1305. __set_current_state(TASK_RUNNING);
  1306. return t->task == NULL;
  1307. }
  1308. static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
  1309. {
  1310. struct timespec rmt;
  1311. ktime_t rem;
  1312. rem = hrtimer_expires_remaining(timer);
  1313. if (rem.tv64 <= 0)
  1314. return 0;
  1315. rmt = ktime_to_timespec(rem);
  1316. if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
  1317. return -EFAULT;
  1318. return 1;
  1319. }
  1320. long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
  1321. {
  1322. struct hrtimer_sleeper t;
  1323. struct timespec __user *rmtp;
  1324. int ret = 0;
  1325. hrtimer_init_on_stack(&t.timer, restart->nanosleep.index,
  1326. HRTIMER_MODE_ABS);
  1327. hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
  1328. if (do_nanosleep(&t, HRTIMER_MODE_ABS))
  1329. goto out;
  1330. rmtp = restart->nanosleep.rmtp;
  1331. if (rmtp) {
  1332. ret = update_rmtp(&t.timer, rmtp);
  1333. if (ret <= 0)
  1334. goto out;
  1335. }
  1336. /* The other values in restart are already filled in */
  1337. ret = -ERESTART_RESTARTBLOCK;
  1338. out:
  1339. destroy_hrtimer_on_stack(&t.timer);
  1340. return ret;
  1341. }
  1342. long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
  1343. const enum hrtimer_mode mode, const clockid_t clockid)
  1344. {
  1345. struct restart_block *restart;
  1346. struct hrtimer_sleeper t;
  1347. int ret = 0;
  1348. unsigned long slack;
  1349. slack = current->timer_slack_ns;
  1350. if (rt_task(current))
  1351. slack = 0;
  1352. hrtimer_init_on_stack(&t.timer, clockid, mode);
  1353. hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
  1354. if (do_nanosleep(&t, mode))
  1355. goto out;
  1356. /* Absolute timers do not update the rmtp value and restart: */
  1357. if (mode == HRTIMER_MODE_ABS) {
  1358. ret = -ERESTARTNOHAND;
  1359. goto out;
  1360. }
  1361. if (rmtp) {
  1362. ret = update_rmtp(&t.timer, rmtp);
  1363. if (ret <= 0)
  1364. goto out;
  1365. }
  1366. restart = &current_thread_info()->restart_block;
  1367. restart->fn = hrtimer_nanosleep_restart;
  1368. restart->nanosleep.index = t.timer.base->index;
  1369. restart->nanosleep.rmtp = rmtp;
  1370. restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
  1371. ret = -ERESTART_RESTARTBLOCK;
  1372. out:
  1373. destroy_hrtimer_on_stack(&t.timer);
  1374. return ret;
  1375. }
  1376. asmlinkage long
  1377. sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
  1378. {
  1379. struct timespec tu;
  1380. if (copy_from_user(&tu, rqtp, sizeof(tu)))
  1381. return -EFAULT;
  1382. if (!timespec_valid(&tu))
  1383. return -EINVAL;
  1384. return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  1385. }
  1386. /*
  1387. * Functions related to boot-time initialization:
  1388. */
  1389. static void __cpuinit init_hrtimers_cpu(int cpu)
  1390. {
  1391. struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
  1392. int i;
  1393. spin_lock_init(&cpu_base->lock);
  1394. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
  1395. cpu_base->clock_base[i].cpu_base = cpu_base;
  1396. INIT_LIST_HEAD(&cpu_base->cb_pending);
  1397. hrtimer_init_hres(cpu_base);
  1398. }
  1399. #ifdef CONFIG_HOTPLUG_CPU
  1400. static int migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
  1401. struct hrtimer_clock_base *new_base, int dcpu)
  1402. {
  1403. struct hrtimer *timer;
  1404. struct rb_node *node;
  1405. int raise = 0;
  1406. while ((node = rb_first(&old_base->active))) {
  1407. timer = rb_entry(node, struct hrtimer, node);
  1408. BUG_ON(hrtimer_callback_running(timer));
  1409. debug_hrtimer_deactivate(timer);
  1410. /*
  1411. * Should not happen. Per CPU timers should be
  1412. * canceled _before_ the migration code is called
  1413. */
  1414. if (timer->cb_mode == HRTIMER_CB_IRQSAFE_PERCPU) {
  1415. __remove_hrtimer(timer, old_base,
  1416. HRTIMER_STATE_INACTIVE, 0);
  1417. WARN(1, "hrtimer (%p %p)active but cpu %d dead\n",
  1418. timer, timer->function, dcpu);
  1419. continue;
  1420. }
  1421. /*
  1422. * Mark it as STATE_MIGRATE not INACTIVE otherwise the
  1423. * timer could be seen as !active and just vanish away
  1424. * under us on another CPU
  1425. */
  1426. __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
  1427. timer->base = new_base;
  1428. /*
  1429. * Enqueue the timer. Allow reprogramming of the event device
  1430. */
  1431. enqueue_hrtimer(timer, new_base, 1);
  1432. #ifdef CONFIG_HIGH_RES_TIMERS
  1433. /*
  1434. * Happens with high res enabled when the timer was
  1435. * already expired and the callback mode is
  1436. * HRTIMER_CB_IRQSAFE_UNLOCKED (hrtimer_sleeper). The
  1437. * enqueue code does not move them to the soft irq
  1438. * pending list for performance/latency reasons, but
  1439. * in the migration state, we need to do that
  1440. * otherwise we end up with a stale timer.
  1441. */
  1442. if (timer->state == HRTIMER_STATE_MIGRATE) {
  1443. timer->state = HRTIMER_STATE_PENDING;
  1444. list_add_tail(&timer->cb_entry,
  1445. &new_base->cpu_base->cb_pending);
  1446. raise = 1;
  1447. }
  1448. #endif
  1449. /* Clear the migration state bit */
  1450. timer->state &= ~HRTIMER_STATE_MIGRATE;
  1451. }
  1452. return raise;
  1453. }
  1454. #ifdef CONFIG_HIGH_RES_TIMERS
  1455. static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
  1456. struct hrtimer_cpu_base *new_base)
  1457. {
  1458. struct hrtimer *timer;
  1459. int raise = 0;
  1460. while (!list_empty(&old_base->cb_pending)) {
  1461. timer = list_entry(old_base->cb_pending.next,
  1462. struct hrtimer, cb_entry);
  1463. __remove_hrtimer(timer, timer->base, HRTIMER_STATE_PENDING, 0);
  1464. timer->base = &new_base->clock_base[timer->base->index];
  1465. list_add_tail(&timer->cb_entry, &new_base->cb_pending);
  1466. raise = 1;
  1467. }
  1468. return raise;
  1469. }
  1470. #else
  1471. static int migrate_hrtimer_pending(struct hrtimer_cpu_base *old_base,
  1472. struct hrtimer_cpu_base *new_base)
  1473. {
  1474. return 0;
  1475. }
  1476. #endif
  1477. static void migrate_hrtimers(int cpu)
  1478. {
  1479. struct hrtimer_cpu_base *old_base, *new_base;
  1480. int i, raise = 0;
  1481. BUG_ON(cpu_online(cpu));
  1482. old_base = &per_cpu(hrtimer_bases, cpu);
  1483. new_base = &get_cpu_var(hrtimer_bases);
  1484. tick_cancel_sched_timer(cpu);
  1485. /*
  1486. * The caller is globally serialized and nobody else
  1487. * takes two locks at once, deadlock is not possible.
  1488. */
  1489. spin_lock_irq(&new_base->lock);
  1490. spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  1491. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1492. if (migrate_hrtimer_list(&old_base->clock_base[i],
  1493. &new_base->clock_base[i], cpu))
  1494. raise = 1;
  1495. }
  1496. if (migrate_hrtimer_pending(old_base, new_base))
  1497. raise = 1;
  1498. spin_unlock(&old_base->lock);
  1499. spin_unlock_irq(&new_base->lock);
  1500. put_cpu_var(hrtimer_bases);
  1501. if (raise)
  1502. hrtimer_raise_softirq();
  1503. }
  1504. #endif /* CONFIG_HOTPLUG_CPU */
  1505. static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
  1506. unsigned long action, void *hcpu)
  1507. {
  1508. unsigned int cpu = (long)hcpu;
  1509. switch (action) {
  1510. case CPU_UP_PREPARE:
  1511. case CPU_UP_PREPARE_FROZEN:
  1512. init_hrtimers_cpu(cpu);
  1513. break;
  1514. #ifdef CONFIG_HOTPLUG_CPU
  1515. case CPU_DEAD:
  1516. case CPU_DEAD_FROZEN:
  1517. clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &cpu);
  1518. migrate_hrtimers(cpu);
  1519. break;
  1520. #endif
  1521. default:
  1522. break;
  1523. }
  1524. return NOTIFY_OK;
  1525. }
  1526. static struct notifier_block __cpuinitdata hrtimers_nb = {
  1527. .notifier_call = hrtimer_cpu_notify,
  1528. };
  1529. void __init hrtimers_init(void)
  1530. {
  1531. hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
  1532. (void *)(long)smp_processor_id());
  1533. register_cpu_notifier(&hrtimers_nb);
  1534. #ifdef CONFIG_HIGH_RES_TIMERS
  1535. open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
  1536. #endif
  1537. }
  1538. /**
  1539. * schedule_hrtimeout_range - sleep until timeout
  1540. * @expires: timeout value (ktime_t)
  1541. * @delta: slack in expires timeout (ktime_t)
  1542. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1543. *
  1544. * Make the current task sleep until the given expiry time has
  1545. * elapsed. The routine will return immediately unless
  1546. * the current task state has been set (see set_current_state()).
  1547. *
  1548. * The @delta argument gives the kernel the freedom to schedule the
  1549. * actual wakeup to a time that is both power and performance friendly.
  1550. * The kernel give the normal best effort behavior for "@expires+@delta",
  1551. * but may decide to fire the timer earlier, but no earlier than @expires.
  1552. *
  1553. * You can set the task state as follows -
  1554. *
  1555. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  1556. * pass before the routine returns.
  1557. *
  1558. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1559. * delivered to the current task.
  1560. *
  1561. * The current task state is guaranteed to be TASK_RUNNING when this
  1562. * routine returns.
  1563. *
  1564. * Returns 0 when the timer has expired otherwise -EINTR
  1565. */
  1566. int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
  1567. const enum hrtimer_mode mode)
  1568. {
  1569. struct hrtimer_sleeper t;
  1570. /*
  1571. * Optimize when a zero timeout value is given. It does not
  1572. * matter whether this is an absolute or a relative time.
  1573. */
  1574. if (expires && !expires->tv64) {
  1575. __set_current_state(TASK_RUNNING);
  1576. return 0;
  1577. }
  1578. /*
  1579. * A NULL parameter means "inifinte"
  1580. */
  1581. if (!expires) {
  1582. schedule();
  1583. __set_current_state(TASK_RUNNING);
  1584. return -EINTR;
  1585. }
  1586. hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, mode);
  1587. hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
  1588. hrtimer_init_sleeper(&t, current);
  1589. hrtimer_start_expires(&t.timer, mode);
  1590. if (!hrtimer_active(&t.timer))
  1591. t.task = NULL;
  1592. if (likely(t.task))
  1593. schedule();
  1594. hrtimer_cancel(&t.timer);
  1595. destroy_hrtimer_on_stack(&t.timer);
  1596. __set_current_state(TASK_RUNNING);
  1597. return !t.task ? 0 : -EINTR;
  1598. }
  1599. EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
  1600. /**
  1601. * schedule_hrtimeout - sleep until timeout
  1602. * @expires: timeout value (ktime_t)
  1603. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1604. *
  1605. * Make the current task sleep until the given expiry time has
  1606. * elapsed. The routine will return immediately unless
  1607. * the current task state has been set (see set_current_state()).
  1608. *
  1609. * You can set the task state as follows -
  1610. *
  1611. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  1612. * pass before the routine returns.
  1613. *
  1614. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1615. * delivered to the current task.
  1616. *
  1617. * The current task state is guaranteed to be TASK_RUNNING when this
  1618. * routine returns.
  1619. *
  1620. * Returns 0 when the timer has expired otherwise -EINTR
  1621. */
  1622. int __sched schedule_hrtimeout(ktime_t *expires,
  1623. const enum hrtimer_mode mode)
  1624. {
  1625. return schedule_hrtimeout_range(expires, 0, mode);
  1626. }
  1627. EXPORT_SYMBOL_GPL(schedule_hrtimeout);