hrtimer.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900
  1. /*
  2. * linux/kernel/hrtimer.c
  3. *
  4. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  6. * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
  7. *
  8. * High-resolution kernel timers
  9. *
  10. * In contrast to the low-resolution timeout API implemented in
  11. * kernel/timer.c, hrtimers provide finer resolution and accuracy
  12. * depending on system configuration and capabilities.
  13. *
  14. * These timers are currently used for:
  15. * - itimers
  16. * - POSIX timers
  17. * - nanosleep
  18. * - precise in-kernel timing
  19. *
  20. * Started by: Thomas Gleixner and Ingo Molnar
  21. *
  22. * Credits:
  23. * based on kernel/timer.c
  24. *
  25. * Help, testing, suggestions, bugfixes, improvements were
  26. * provided by:
  27. *
  28. * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  29. * et. al.
  30. *
  31. * For licencing details see kernel-base/COPYING
  32. */
  33. #include <linux/cpu.h>
  34. #include <linux/export.h>
  35. #include <linux/percpu.h>
  36. #include <linux/hrtimer.h>
  37. #include <linux/notifier.h>
  38. #include <linux/syscalls.h>
  39. #include <linux/kallsyms.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/tick.h>
  42. #include <linux/seq_file.h>
  43. #include <linux/err.h>
  44. #include <linux/debugobjects.h>
  45. #include <linux/sched.h>
  46. #include <linux/sched/sysctl.h>
  47. #include <linux/sched/rt.h>
  48. #include <linux/timer.h>
  49. #include <asm/uaccess.h>
  50. #include <trace/events/timer.h>
  51. /*
  52. * The timer bases:
  53. *
  54. * There are more clockids then hrtimer bases. Thus, we index
  55. * into the timer bases by the hrtimer_base_type enum. When trying
  56. * to reach a base using a clockid, hrtimer_clockid_to_base()
  57. * is used to convert from clockid to the proper hrtimer_base_type.
  58. */
  59. DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  60. {
  61. .clock_base =
  62. {
  63. {
  64. .index = HRTIMER_BASE_MONOTONIC,
  65. .clockid = CLOCK_MONOTONIC,
  66. .get_time = &ktime_get,
  67. .resolution = KTIME_LOW_RES,
  68. },
  69. {
  70. .index = HRTIMER_BASE_REALTIME,
  71. .clockid = CLOCK_REALTIME,
  72. .get_time = &ktime_get_real,
  73. .resolution = KTIME_LOW_RES,
  74. },
  75. {
  76. .index = HRTIMER_BASE_BOOTTIME,
  77. .clockid = CLOCK_BOOTTIME,
  78. .get_time = &ktime_get_boottime,
  79. .resolution = KTIME_LOW_RES,
  80. },
  81. {
  82. .index = HRTIMER_BASE_TAI,
  83. .clockid = CLOCK_TAI,
  84. .get_time = &ktime_get_clocktai,
  85. .resolution = KTIME_LOW_RES,
  86. },
  87. }
  88. };
  89. static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
  90. [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
  91. [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
  92. [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
  93. [CLOCK_TAI] = HRTIMER_BASE_TAI,
  94. };
  95. static inline int hrtimer_clockid_to_base(clockid_t clock_id)
  96. {
  97. return hrtimer_clock_to_base_table[clock_id];
  98. }
  99. /*
  100. * Get the coarse grained time at the softirq based on xtime and
  101. * wall_to_monotonic.
  102. */
  103. static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
  104. {
  105. ktime_t xtim, mono, boot;
  106. struct timespec xts, tom, slp;
  107. s32 tai_offset;
  108. get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
  109. tai_offset = timekeeping_get_tai_offset();
  110. xtim = timespec_to_ktime(xts);
  111. mono = ktime_add(xtim, timespec_to_ktime(tom));
  112. boot = ktime_add(mono, timespec_to_ktime(slp));
  113. base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
  114. base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
  115. base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
  116. base->clock_base[HRTIMER_BASE_TAI].softirq_time =
  117. ktime_add(xtim, ktime_set(tai_offset, 0));
  118. }
  119. /*
  120. * Functions and macros which are different for UP/SMP systems are kept in a
  121. * single place
  122. */
  123. #ifdef CONFIG_SMP
  124. /*
  125. * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
  126. * means that all timers which are tied to this base via timer->base are
  127. * locked, and the base itself is locked too.
  128. *
  129. * So __run_timers/migrate_timers can safely modify all timers which could
  130. * be found on the lists/queues.
  131. *
  132. * When the timer's base is locked, and the timer removed from list, it is
  133. * possible to set timer->base = NULL and drop the lock: the timer remains
  134. * locked.
  135. */
  136. static
  137. struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
  138. unsigned long *flags)
  139. {
  140. struct hrtimer_clock_base *base;
  141. for (;;) {
  142. base = timer->base;
  143. if (likely(base != NULL)) {
  144. raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
  145. if (likely(base == timer->base))
  146. return base;
  147. /* The timer has migrated to another CPU: */
  148. raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
  149. }
  150. cpu_relax();
  151. }
  152. }
  153. /*
  154. * Get the preferred target CPU for NOHZ
  155. */
  156. static int hrtimer_get_target(int this_cpu, int pinned)
  157. {
  158. #ifdef CONFIG_NO_HZ
  159. if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
  160. return get_nohz_timer_target();
  161. #endif
  162. return this_cpu;
  163. }
  164. /*
  165. * With HIGHRES=y we do not migrate the timer when it is expiring
  166. * before the next event on the target cpu because we cannot reprogram
  167. * the target cpu hardware and we would cause it to fire late.
  168. *
  169. * Called with cpu_base->lock of target cpu held.
  170. */
  171. static int
  172. hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
  173. {
  174. #ifdef CONFIG_HIGH_RES_TIMERS
  175. ktime_t expires;
  176. if (!new_base->cpu_base->hres_active)
  177. return 0;
  178. expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
  179. return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
  180. #else
  181. return 0;
  182. #endif
  183. }
  184. /*
  185. * Switch the timer base to the current CPU when possible.
  186. */
  187. static inline struct hrtimer_clock_base *
  188. switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
  189. int pinned)
  190. {
  191. struct hrtimer_clock_base *new_base;
  192. struct hrtimer_cpu_base *new_cpu_base;
  193. int this_cpu = smp_processor_id();
  194. int cpu = hrtimer_get_target(this_cpu, pinned);
  195. int basenum = base->index;
  196. again:
  197. new_cpu_base = &per_cpu(hrtimer_bases, cpu);
  198. new_base = &new_cpu_base->clock_base[basenum];
  199. if (base != new_base) {
  200. /*
  201. * We are trying to move timer to new_base.
  202. * However we can't change timer's base while it is running,
  203. * so we keep it on the same CPU. No hassle vs. reprogramming
  204. * the event source in the high resolution case. The softirq
  205. * code will take care of this when the timer function has
  206. * completed. There is no conflict as we hold the lock until
  207. * the timer is enqueued.
  208. */
  209. if (unlikely(hrtimer_callback_running(timer)))
  210. return base;
  211. /* See the comment in lock_timer_base() */
  212. timer->base = NULL;
  213. raw_spin_unlock(&base->cpu_base->lock);
  214. raw_spin_lock(&new_base->cpu_base->lock);
  215. if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
  216. cpu = this_cpu;
  217. raw_spin_unlock(&new_base->cpu_base->lock);
  218. raw_spin_lock(&base->cpu_base->lock);
  219. timer->base = base;
  220. goto again;
  221. }
  222. timer->base = new_base;
  223. }
  224. return new_base;
  225. }
  226. #else /* CONFIG_SMP */
  227. static inline struct hrtimer_clock_base *
  228. lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  229. {
  230. struct hrtimer_clock_base *base = timer->base;
  231. raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
  232. return base;
  233. }
  234. # define switch_hrtimer_base(t, b, p) (b)
  235. #endif /* !CONFIG_SMP */
  236. /*
  237. * Functions for the union type storage format of ktime_t which are
  238. * too large for inlining:
  239. */
  240. #if BITS_PER_LONG < 64
  241. # ifndef CONFIG_KTIME_SCALAR
  242. /**
  243. * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
  244. * @kt: addend
  245. * @nsec: the scalar nsec value to add
  246. *
  247. * Returns the sum of kt and nsec in ktime_t format
  248. */
  249. ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
  250. {
  251. ktime_t tmp;
  252. if (likely(nsec < NSEC_PER_SEC)) {
  253. tmp.tv64 = nsec;
  254. } else {
  255. unsigned long rem = do_div(nsec, NSEC_PER_SEC);
  256. tmp = ktime_set((long)nsec, rem);
  257. }
  258. return ktime_add(kt, tmp);
  259. }
  260. EXPORT_SYMBOL_GPL(ktime_add_ns);
  261. /**
  262. * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
  263. * @kt: minuend
  264. * @nsec: the scalar nsec value to subtract
  265. *
  266. * Returns the subtraction of @nsec from @kt in ktime_t format
  267. */
  268. ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
  269. {
  270. ktime_t tmp;
  271. if (likely(nsec < NSEC_PER_SEC)) {
  272. tmp.tv64 = nsec;
  273. } else {
  274. unsigned long rem = do_div(nsec, NSEC_PER_SEC);
  275. tmp = ktime_set((long)nsec, rem);
  276. }
  277. return ktime_sub(kt, tmp);
  278. }
  279. EXPORT_SYMBOL_GPL(ktime_sub_ns);
  280. # endif /* !CONFIG_KTIME_SCALAR */
  281. /*
  282. * Divide a ktime value by a nanosecond value
  283. */
  284. u64 ktime_divns(const ktime_t kt, s64 div)
  285. {
  286. u64 dclc;
  287. int sft = 0;
  288. dclc = ktime_to_ns(kt);
  289. /* Make sure the divisor is less than 2^32: */
  290. while (div >> 32) {
  291. sft++;
  292. div >>= 1;
  293. }
  294. dclc >>= sft;
  295. do_div(dclc, (unsigned long) div);
  296. return dclc;
  297. }
  298. #endif /* BITS_PER_LONG >= 64 */
  299. /*
  300. * Add two ktime values and do a safety check for overflow:
  301. */
  302. ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
  303. {
  304. ktime_t res = ktime_add(lhs, rhs);
  305. /*
  306. * We use KTIME_SEC_MAX here, the maximum timeout which we can
  307. * return to user space in a timespec:
  308. */
  309. if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
  310. res = ktime_set(KTIME_SEC_MAX, 0);
  311. return res;
  312. }
  313. EXPORT_SYMBOL_GPL(ktime_add_safe);
  314. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  315. static struct debug_obj_descr hrtimer_debug_descr;
  316. static void *hrtimer_debug_hint(void *addr)
  317. {
  318. return ((struct hrtimer *) addr)->function;
  319. }
  320. /*
  321. * fixup_init is called when:
  322. * - an active object is initialized
  323. */
  324. static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
  325. {
  326. struct hrtimer *timer = addr;
  327. switch (state) {
  328. case ODEBUG_STATE_ACTIVE:
  329. hrtimer_cancel(timer);
  330. debug_object_init(timer, &hrtimer_debug_descr);
  331. return 1;
  332. default:
  333. return 0;
  334. }
  335. }
  336. /*
  337. * fixup_activate is called when:
  338. * - an active object is activated
  339. * - an unknown object is activated (might be a statically initialized object)
  340. */
  341. static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
  342. {
  343. switch (state) {
  344. case ODEBUG_STATE_NOTAVAILABLE:
  345. WARN_ON_ONCE(1);
  346. return 0;
  347. case ODEBUG_STATE_ACTIVE:
  348. WARN_ON(1);
  349. default:
  350. return 0;
  351. }
  352. }
  353. /*
  354. * fixup_free is called when:
  355. * - an active object is freed
  356. */
  357. static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
  358. {
  359. struct hrtimer *timer = addr;
  360. switch (state) {
  361. case ODEBUG_STATE_ACTIVE:
  362. hrtimer_cancel(timer);
  363. debug_object_free(timer, &hrtimer_debug_descr);
  364. return 1;
  365. default:
  366. return 0;
  367. }
  368. }
  369. static struct debug_obj_descr hrtimer_debug_descr = {
  370. .name = "hrtimer",
  371. .debug_hint = hrtimer_debug_hint,
  372. .fixup_init = hrtimer_fixup_init,
  373. .fixup_activate = hrtimer_fixup_activate,
  374. .fixup_free = hrtimer_fixup_free,
  375. };
  376. static inline void debug_hrtimer_init(struct hrtimer *timer)
  377. {
  378. debug_object_init(timer, &hrtimer_debug_descr);
  379. }
  380. static inline void debug_hrtimer_activate(struct hrtimer *timer)
  381. {
  382. debug_object_activate(timer, &hrtimer_debug_descr);
  383. }
  384. static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
  385. {
  386. debug_object_deactivate(timer, &hrtimer_debug_descr);
  387. }
  388. static inline void debug_hrtimer_free(struct hrtimer *timer)
  389. {
  390. debug_object_free(timer, &hrtimer_debug_descr);
  391. }
  392. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  393. enum hrtimer_mode mode);
  394. void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
  395. enum hrtimer_mode mode)
  396. {
  397. debug_object_init_on_stack(timer, &hrtimer_debug_descr);
  398. __hrtimer_init(timer, clock_id, mode);
  399. }
  400. EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
  401. void destroy_hrtimer_on_stack(struct hrtimer *timer)
  402. {
  403. debug_object_free(timer, &hrtimer_debug_descr);
  404. }
  405. #else
  406. static inline void debug_hrtimer_init(struct hrtimer *timer) { }
  407. static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
  408. static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
  409. #endif
  410. static inline void
  411. debug_init(struct hrtimer *timer, clockid_t clockid,
  412. enum hrtimer_mode mode)
  413. {
  414. debug_hrtimer_init(timer);
  415. trace_hrtimer_init(timer, clockid, mode);
  416. }
  417. static inline void debug_activate(struct hrtimer *timer)
  418. {
  419. debug_hrtimer_activate(timer);
  420. trace_hrtimer_start(timer);
  421. }
  422. static inline void debug_deactivate(struct hrtimer *timer)
  423. {
  424. debug_hrtimer_deactivate(timer);
  425. trace_hrtimer_cancel(timer);
  426. }
  427. /* High resolution timer related functions */
  428. #ifdef CONFIG_HIGH_RES_TIMERS
  429. /*
  430. * High resolution timer enabled ?
  431. */
  432. static int hrtimer_hres_enabled __read_mostly = 1;
  433. /*
  434. * Enable / Disable high resolution mode
  435. */
  436. static int __init setup_hrtimer_hres(char *str)
  437. {
  438. if (!strcmp(str, "off"))
  439. hrtimer_hres_enabled = 0;
  440. else if (!strcmp(str, "on"))
  441. hrtimer_hres_enabled = 1;
  442. else
  443. return 0;
  444. return 1;
  445. }
  446. __setup("highres=", setup_hrtimer_hres);
  447. /*
  448. * hrtimer_high_res_enabled - query, if the highres mode is enabled
  449. */
  450. static inline int hrtimer_is_hres_enabled(void)
  451. {
  452. return hrtimer_hres_enabled;
  453. }
  454. /*
  455. * Is the high resolution mode active ?
  456. */
  457. static inline int hrtimer_hres_active(void)
  458. {
  459. return __this_cpu_read(hrtimer_bases.hres_active);
  460. }
  461. /*
  462. * Reprogram the event source with checking both queues for the
  463. * next event
  464. * Called with interrupts disabled and base->lock held
  465. */
  466. static void
  467. hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
  468. {
  469. int i;
  470. struct hrtimer_clock_base *base = cpu_base->clock_base;
  471. ktime_t expires, expires_next;
  472. expires_next.tv64 = KTIME_MAX;
  473. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  474. struct hrtimer *timer;
  475. struct timerqueue_node *next;
  476. next = timerqueue_getnext(&base->active);
  477. if (!next)
  478. continue;
  479. timer = container_of(next, struct hrtimer, node);
  480. expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  481. /*
  482. * clock_was_set() has changed base->offset so the
  483. * result might be negative. Fix it up to prevent a
  484. * false positive in clockevents_program_event()
  485. */
  486. if (expires.tv64 < 0)
  487. expires.tv64 = 0;
  488. if (expires.tv64 < expires_next.tv64)
  489. expires_next = expires;
  490. }
  491. if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
  492. return;
  493. cpu_base->expires_next.tv64 = expires_next.tv64;
  494. if (cpu_base->expires_next.tv64 != KTIME_MAX)
  495. tick_program_event(cpu_base->expires_next, 1);
  496. }
  497. /*
  498. * Shared reprogramming for clock_realtime and clock_monotonic
  499. *
  500. * When a timer is enqueued and expires earlier than the already enqueued
  501. * timers, we have to check, whether it expires earlier than the timer for
  502. * which the clock event device was armed.
  503. *
  504. * Called with interrupts disabled and base->cpu_base.lock held
  505. */
  506. static int hrtimer_reprogram(struct hrtimer *timer,
  507. struct hrtimer_clock_base *base)
  508. {
  509. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  510. ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  511. int res;
  512. WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
  513. /*
  514. * When the callback is running, we do not reprogram the clock event
  515. * device. The timer callback is either running on a different CPU or
  516. * the callback is executed in the hrtimer_interrupt context. The
  517. * reprogramming is handled either by the softirq, which called the
  518. * callback or at the end of the hrtimer_interrupt.
  519. */
  520. if (hrtimer_callback_running(timer))
  521. return 0;
  522. /*
  523. * CLOCK_REALTIME timer might be requested with an absolute
  524. * expiry time which is less than base->offset. Nothing wrong
  525. * about that, just avoid to call into the tick code, which
  526. * has now objections against negative expiry values.
  527. */
  528. if (expires.tv64 < 0)
  529. return -ETIME;
  530. if (expires.tv64 >= cpu_base->expires_next.tv64)
  531. return 0;
  532. /*
  533. * If a hang was detected in the last timer interrupt then we
  534. * do not schedule a timer which is earlier than the expiry
  535. * which we enforced in the hang detection. We want the system
  536. * to make progress.
  537. */
  538. if (cpu_base->hang_detected)
  539. return 0;
  540. /*
  541. * Clockevents returns -ETIME, when the event was in the past.
  542. */
  543. res = tick_program_event(expires, 0);
  544. if (!IS_ERR_VALUE(res))
  545. cpu_base->expires_next = expires;
  546. return res;
  547. }
  548. /*
  549. * Initialize the high resolution related parts of cpu_base
  550. */
  551. static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
  552. {
  553. base->expires_next.tv64 = KTIME_MAX;
  554. base->hres_active = 0;
  555. }
  556. /*
  557. * When High resolution timers are active, try to reprogram. Note, that in case
  558. * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
  559. * check happens. The timer gets enqueued into the rbtree. The reprogramming
  560. * and expiry check is done in the hrtimer_interrupt or in the softirq.
  561. */
  562. static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
  563. struct hrtimer_clock_base *base)
  564. {
  565. return base->cpu_base->hres_active && hrtimer_reprogram(timer, base);
  566. }
  567. static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
  568. {
  569. ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
  570. ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
  571. ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
  572. return ktime_get_update_offsets(offs_real, offs_boot, offs_tai);
  573. }
  574. /*
  575. * Retrigger next event is called after clock was set
  576. *
  577. * Called with interrupts disabled via on_each_cpu()
  578. */
  579. static void retrigger_next_event(void *arg)
  580. {
  581. struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
  582. if (!hrtimer_hres_active())
  583. return;
  584. raw_spin_lock(&base->lock);
  585. hrtimer_update_base(base);
  586. hrtimer_force_reprogram(base, 0);
  587. raw_spin_unlock(&base->lock);
  588. }
  589. /*
  590. * Switch to high resolution mode
  591. */
  592. static int hrtimer_switch_to_hres(void)
  593. {
  594. int i, cpu = smp_processor_id();
  595. struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
  596. unsigned long flags;
  597. if (base->hres_active)
  598. return 1;
  599. local_irq_save(flags);
  600. if (tick_init_highres()) {
  601. local_irq_restore(flags);
  602. printk(KERN_WARNING "Could not switch to high resolution "
  603. "mode on CPU %d\n", cpu);
  604. return 0;
  605. }
  606. base->hres_active = 1;
  607. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
  608. base->clock_base[i].resolution = KTIME_HIGH_RES;
  609. tick_setup_sched_timer();
  610. /* "Retrigger" the interrupt to get things going */
  611. retrigger_next_event(NULL);
  612. local_irq_restore(flags);
  613. return 1;
  614. }
  615. /*
  616. * Called from timekeeping code to reprogramm the hrtimer interrupt
  617. * device. If called from the timer interrupt context we defer it to
  618. * softirq context.
  619. */
  620. void clock_was_set_delayed(void)
  621. {
  622. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  623. cpu_base->clock_was_set = 1;
  624. __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  625. }
  626. #else
  627. static inline int hrtimer_hres_active(void) { return 0; }
  628. static inline int hrtimer_is_hres_enabled(void) { return 0; }
  629. static inline int hrtimer_switch_to_hres(void) { return 0; }
  630. static inline void
  631. hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
  632. static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
  633. struct hrtimer_clock_base *base)
  634. {
  635. return 0;
  636. }
  637. static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
  638. static inline void retrigger_next_event(void *arg) { }
  639. #endif /* CONFIG_HIGH_RES_TIMERS */
  640. /*
  641. * Clock realtime was set
  642. *
  643. * Change the offset of the realtime clock vs. the monotonic
  644. * clock.
  645. *
  646. * We might have to reprogram the high resolution timer interrupt. On
  647. * SMP we call the architecture specific code to retrigger _all_ high
  648. * resolution timer interrupts. On UP we just disable interrupts and
  649. * call the high resolution interrupt code.
  650. */
  651. void clock_was_set(void)
  652. {
  653. #ifdef CONFIG_HIGH_RES_TIMERS
  654. /* Retrigger the CPU local events everywhere */
  655. on_each_cpu(retrigger_next_event, NULL, 1);
  656. #endif
  657. timerfd_clock_was_set();
  658. }
  659. /*
  660. * During resume we might have to reprogram the high resolution timer
  661. * interrupt (on the local CPU):
  662. */
  663. void hrtimers_resume(void)
  664. {
  665. WARN_ONCE(!irqs_disabled(),
  666. KERN_INFO "hrtimers_resume() called with IRQs enabled!");
  667. retrigger_next_event(NULL);
  668. timerfd_clock_was_set();
  669. }
  670. static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
  671. {
  672. #ifdef CONFIG_TIMER_STATS
  673. if (timer->start_site)
  674. return;
  675. timer->start_site = __builtin_return_address(0);
  676. memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
  677. timer->start_pid = current->pid;
  678. #endif
  679. }
  680. static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
  681. {
  682. #ifdef CONFIG_TIMER_STATS
  683. timer->start_site = NULL;
  684. #endif
  685. }
  686. static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
  687. {
  688. #ifdef CONFIG_TIMER_STATS
  689. if (likely(!timer_stats_active))
  690. return;
  691. timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
  692. timer->function, timer->start_comm, 0);
  693. #endif
  694. }
  695. /*
  696. * Counterpart to lock_hrtimer_base above:
  697. */
  698. static inline
  699. void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  700. {
  701. raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
  702. }
  703. /**
  704. * hrtimer_forward - forward the timer expiry
  705. * @timer: hrtimer to forward
  706. * @now: forward past this time
  707. * @interval: the interval to forward
  708. *
  709. * Forward the timer expiry so it will expire in the future.
  710. * Returns the number of overruns.
  711. */
  712. u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
  713. {
  714. u64 orun = 1;
  715. ktime_t delta;
  716. delta = ktime_sub(now, hrtimer_get_expires(timer));
  717. if (delta.tv64 < 0)
  718. return 0;
  719. if (interval.tv64 < timer->base->resolution.tv64)
  720. interval.tv64 = timer->base->resolution.tv64;
  721. if (unlikely(delta.tv64 >= interval.tv64)) {
  722. s64 incr = ktime_to_ns(interval);
  723. orun = ktime_divns(delta, incr);
  724. hrtimer_add_expires_ns(timer, incr * orun);
  725. if (hrtimer_get_expires_tv64(timer) > now.tv64)
  726. return orun;
  727. /*
  728. * This (and the ktime_add() below) is the
  729. * correction for exact:
  730. */
  731. orun++;
  732. }
  733. hrtimer_add_expires(timer, interval);
  734. return orun;
  735. }
  736. EXPORT_SYMBOL_GPL(hrtimer_forward);
  737. /*
  738. * enqueue_hrtimer - internal function to (re)start a timer
  739. *
  740. * The timer is inserted in expiry order. Insertion into the
  741. * red black tree is O(log(n)). Must hold the base lock.
  742. *
  743. * Returns 1 when the new timer is the leftmost timer in the tree.
  744. */
  745. static int enqueue_hrtimer(struct hrtimer *timer,
  746. struct hrtimer_clock_base *base)
  747. {
  748. debug_activate(timer);
  749. timerqueue_add(&base->active, &timer->node);
  750. base->cpu_base->active_bases |= 1 << base->index;
  751. /*
  752. * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
  753. * state of a possibly running callback.
  754. */
  755. timer->state |= HRTIMER_STATE_ENQUEUED;
  756. return (&timer->node == base->active.next);
  757. }
  758. /*
  759. * __remove_hrtimer - internal function to remove a timer
  760. *
  761. * Caller must hold the base lock.
  762. *
  763. * High resolution timer mode reprograms the clock event device when the
  764. * timer is the one which expires next. The caller can disable this by setting
  765. * reprogram to zero. This is useful, when the context does a reprogramming
  766. * anyway (e.g. timer interrupt)
  767. */
  768. static void __remove_hrtimer(struct hrtimer *timer,
  769. struct hrtimer_clock_base *base,
  770. unsigned long newstate, int reprogram)
  771. {
  772. struct timerqueue_node *next_timer;
  773. if (!(timer->state & HRTIMER_STATE_ENQUEUED))
  774. goto out;
  775. next_timer = timerqueue_getnext(&base->active);
  776. timerqueue_del(&base->active, &timer->node);
  777. if (&timer->node == next_timer) {
  778. #ifdef CONFIG_HIGH_RES_TIMERS
  779. /* Reprogram the clock event device. if enabled */
  780. if (reprogram && hrtimer_hres_active()) {
  781. ktime_t expires;
  782. expires = ktime_sub(hrtimer_get_expires(timer),
  783. base->offset);
  784. if (base->cpu_base->expires_next.tv64 == expires.tv64)
  785. hrtimer_force_reprogram(base->cpu_base, 1);
  786. }
  787. #endif
  788. }
  789. if (!timerqueue_getnext(&base->active))
  790. base->cpu_base->active_bases &= ~(1 << base->index);
  791. out:
  792. timer->state = newstate;
  793. }
  794. /*
  795. * remove hrtimer, called with base lock held
  796. */
  797. static inline int
  798. remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
  799. {
  800. if (hrtimer_is_queued(timer)) {
  801. unsigned long state;
  802. int reprogram;
  803. /*
  804. * Remove the timer and force reprogramming when high
  805. * resolution mode is active and the timer is on the current
  806. * CPU. If we remove a timer on another CPU, reprogramming is
  807. * skipped. The interrupt event on this CPU is fired and
  808. * reprogramming happens in the interrupt handler. This is a
  809. * rare case and less expensive than a smp call.
  810. */
  811. debug_deactivate(timer);
  812. timer_stats_hrtimer_clear_start_info(timer);
  813. reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
  814. /*
  815. * We must preserve the CALLBACK state flag here,
  816. * otherwise we could move the timer base in
  817. * switch_hrtimer_base.
  818. */
  819. state = timer->state & HRTIMER_STATE_CALLBACK;
  820. __remove_hrtimer(timer, base, state, reprogram);
  821. return 1;
  822. }
  823. return 0;
  824. }
  825. int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  826. unsigned long delta_ns, const enum hrtimer_mode mode,
  827. int wakeup)
  828. {
  829. struct hrtimer_clock_base *base, *new_base;
  830. unsigned long flags;
  831. int ret, leftmost;
  832. base = lock_hrtimer_base(timer, &flags);
  833. /* Remove an active timer from the queue: */
  834. ret = remove_hrtimer(timer, base);
  835. /* Switch the timer base, if necessary: */
  836. new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
  837. if (mode & HRTIMER_MODE_REL) {
  838. tim = ktime_add_safe(tim, new_base->get_time());
  839. /*
  840. * CONFIG_TIME_LOW_RES is a temporary way for architectures
  841. * to signal that they simply return xtime in
  842. * do_gettimeoffset(). In this case we want to round up by
  843. * resolution when starting a relative timer, to avoid short
  844. * timeouts. This will go away with the GTOD framework.
  845. */
  846. #ifdef CONFIG_TIME_LOW_RES
  847. tim = ktime_add_safe(tim, base->resolution);
  848. #endif
  849. }
  850. hrtimer_set_expires_range_ns(timer, tim, delta_ns);
  851. timer_stats_hrtimer_set_start_info(timer);
  852. leftmost = enqueue_hrtimer(timer, new_base);
  853. /*
  854. * Only allow reprogramming if the new base is on this CPU.
  855. * (it might still be on another CPU if the timer was pending)
  856. *
  857. * XXX send_remote_softirq() ?
  858. */
  859. if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)
  860. && hrtimer_enqueue_reprogram(timer, new_base)) {
  861. if (wakeup) {
  862. /*
  863. * We need to drop cpu_base->lock to avoid a
  864. * lock ordering issue vs. rq->lock.
  865. */
  866. raw_spin_unlock(&new_base->cpu_base->lock);
  867. raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  868. local_irq_restore(flags);
  869. return ret;
  870. } else {
  871. __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  872. }
  873. }
  874. unlock_hrtimer_base(timer, &flags);
  875. return ret;
  876. }
  877. /**
  878. * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
  879. * @timer: the timer to be added
  880. * @tim: expiry time
  881. * @delta_ns: "slack" range for the timer
  882. * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
  883. * relative (HRTIMER_MODE_REL)
  884. *
  885. * Returns:
  886. * 0 on success
  887. * 1 when the timer was active
  888. */
  889. int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  890. unsigned long delta_ns, const enum hrtimer_mode mode)
  891. {
  892. return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
  893. }
  894. EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
  895. /**
  896. * hrtimer_start - (re)start an hrtimer on the current CPU
  897. * @timer: the timer to be added
  898. * @tim: expiry time
  899. * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or
  900. * relative (HRTIMER_MODE_REL)
  901. *
  902. * Returns:
  903. * 0 on success
  904. * 1 when the timer was active
  905. */
  906. int
  907. hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
  908. {
  909. return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
  910. }
  911. EXPORT_SYMBOL_GPL(hrtimer_start);
  912. /**
  913. * hrtimer_try_to_cancel - try to deactivate a timer
  914. * @timer: hrtimer to stop
  915. *
  916. * Returns:
  917. * 0 when the timer was not active
  918. * 1 when the timer was active
  919. * -1 when the timer is currently excuting the callback function and
  920. * cannot be stopped
  921. */
  922. int hrtimer_try_to_cancel(struct hrtimer *timer)
  923. {
  924. struct hrtimer_clock_base *base;
  925. unsigned long flags;
  926. int ret = -1;
  927. base = lock_hrtimer_base(timer, &flags);
  928. if (!hrtimer_callback_running(timer))
  929. ret = remove_hrtimer(timer, base);
  930. unlock_hrtimer_base(timer, &flags);
  931. return ret;
  932. }
  933. EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
  934. /**
  935. * hrtimer_cancel - cancel a timer and wait for the handler to finish.
  936. * @timer: the timer to be cancelled
  937. *
  938. * Returns:
  939. * 0 when the timer was not active
  940. * 1 when the timer was active
  941. */
  942. int hrtimer_cancel(struct hrtimer *timer)
  943. {
  944. for (;;) {
  945. int ret = hrtimer_try_to_cancel(timer);
  946. if (ret >= 0)
  947. return ret;
  948. cpu_relax();
  949. }
  950. }
  951. EXPORT_SYMBOL_GPL(hrtimer_cancel);
  952. /**
  953. * hrtimer_get_remaining - get remaining time for the timer
  954. * @timer: the timer to read
  955. */
  956. ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
  957. {
  958. unsigned long flags;
  959. ktime_t rem;
  960. lock_hrtimer_base(timer, &flags);
  961. rem = hrtimer_expires_remaining(timer);
  962. unlock_hrtimer_base(timer, &flags);
  963. return rem;
  964. }
  965. EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
  966. #ifdef CONFIG_NO_HZ
  967. /**
  968. * hrtimer_get_next_event - get the time until next expiry event
  969. *
  970. * Returns the delta to the next expiry event or KTIME_MAX if no timer
  971. * is pending.
  972. */
  973. ktime_t hrtimer_get_next_event(void)
  974. {
  975. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  976. struct hrtimer_clock_base *base = cpu_base->clock_base;
  977. ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
  978. unsigned long flags;
  979. int i;
  980. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  981. if (!hrtimer_hres_active()) {
  982. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  983. struct hrtimer *timer;
  984. struct timerqueue_node *next;
  985. next = timerqueue_getnext(&base->active);
  986. if (!next)
  987. continue;
  988. timer = container_of(next, struct hrtimer, node);
  989. delta.tv64 = hrtimer_get_expires_tv64(timer);
  990. delta = ktime_sub(delta, base->get_time());
  991. if (delta.tv64 < mindelta.tv64)
  992. mindelta.tv64 = delta.tv64;
  993. }
  994. }
  995. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  996. if (mindelta.tv64 < 0)
  997. mindelta.tv64 = 0;
  998. return mindelta;
  999. }
  1000. #endif
  1001. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  1002. enum hrtimer_mode mode)
  1003. {
  1004. struct hrtimer_cpu_base *cpu_base;
  1005. int base;
  1006. memset(timer, 0, sizeof(struct hrtimer));
  1007. cpu_base = &__raw_get_cpu_var(hrtimer_bases);
  1008. if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
  1009. clock_id = CLOCK_MONOTONIC;
  1010. base = hrtimer_clockid_to_base(clock_id);
  1011. timer->base = &cpu_base->clock_base[base];
  1012. timerqueue_init(&timer->node);
  1013. #ifdef CONFIG_TIMER_STATS
  1014. timer->start_site = NULL;
  1015. timer->start_pid = -1;
  1016. memset(timer->start_comm, 0, TASK_COMM_LEN);
  1017. #endif
  1018. }
  1019. /**
  1020. * hrtimer_init - initialize a timer to the given clock
  1021. * @timer: the timer to be initialized
  1022. * @clock_id: the clock to be used
  1023. * @mode: timer mode abs/rel
  1024. */
  1025. void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  1026. enum hrtimer_mode mode)
  1027. {
  1028. debug_init(timer, clock_id, mode);
  1029. __hrtimer_init(timer, clock_id, mode);
  1030. }
  1031. EXPORT_SYMBOL_GPL(hrtimer_init);
  1032. /**
  1033. * hrtimer_get_res - get the timer resolution for a clock
  1034. * @which_clock: which clock to query
  1035. * @tp: pointer to timespec variable to store the resolution
  1036. *
  1037. * Store the resolution of the clock selected by @which_clock in the
  1038. * variable pointed to by @tp.
  1039. */
  1040. int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
  1041. {
  1042. struct hrtimer_cpu_base *cpu_base;
  1043. int base = hrtimer_clockid_to_base(which_clock);
  1044. cpu_base = &__raw_get_cpu_var(hrtimer_bases);
  1045. *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
  1046. return 0;
  1047. }
  1048. EXPORT_SYMBOL_GPL(hrtimer_get_res);
  1049. static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
  1050. {
  1051. struct hrtimer_clock_base *base = timer->base;
  1052. struct hrtimer_cpu_base *cpu_base = base->cpu_base;
  1053. enum hrtimer_restart (*fn)(struct hrtimer *);
  1054. int restart;
  1055. WARN_ON(!irqs_disabled());
  1056. debug_deactivate(timer);
  1057. __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
  1058. timer_stats_account_hrtimer(timer);
  1059. fn = timer->function;
  1060. /*
  1061. * Because we run timers from hardirq context, there is no chance
  1062. * they get migrated to another cpu, therefore its safe to unlock
  1063. * the timer base.
  1064. */
  1065. raw_spin_unlock(&cpu_base->lock);
  1066. trace_hrtimer_expire_entry(timer, now);
  1067. restart = fn(timer);
  1068. trace_hrtimer_expire_exit(timer);
  1069. raw_spin_lock(&cpu_base->lock);
  1070. /*
  1071. * Note: We clear the CALLBACK bit after enqueue_hrtimer and
  1072. * we do not reprogramm the event hardware. Happens either in
  1073. * hrtimer_start_range_ns() or in hrtimer_interrupt()
  1074. */
  1075. if (restart != HRTIMER_NORESTART) {
  1076. BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
  1077. enqueue_hrtimer(timer, base);
  1078. }
  1079. WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
  1080. timer->state &= ~HRTIMER_STATE_CALLBACK;
  1081. }
  1082. #ifdef CONFIG_HIGH_RES_TIMERS
  1083. /*
  1084. * High resolution timer interrupt
  1085. * Called with interrupts disabled
  1086. */
  1087. void hrtimer_interrupt(struct clock_event_device *dev)
  1088. {
  1089. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1090. ktime_t expires_next, now, entry_time, delta;
  1091. int i, retries = 0;
  1092. BUG_ON(!cpu_base->hres_active);
  1093. cpu_base->nr_events++;
  1094. dev->next_event.tv64 = KTIME_MAX;
  1095. raw_spin_lock(&cpu_base->lock);
  1096. entry_time = now = hrtimer_update_base(cpu_base);
  1097. retry:
  1098. expires_next.tv64 = KTIME_MAX;
  1099. /*
  1100. * We set expires_next to KTIME_MAX here with cpu_base->lock
  1101. * held to prevent that a timer is enqueued in our queue via
  1102. * the migration code. This does not affect enqueueing of
  1103. * timers which run their callback and need to be requeued on
  1104. * this CPU.
  1105. */
  1106. cpu_base->expires_next.tv64 = KTIME_MAX;
  1107. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1108. struct hrtimer_clock_base *base;
  1109. struct timerqueue_node *node;
  1110. ktime_t basenow;
  1111. if (!(cpu_base->active_bases & (1 << i)))
  1112. continue;
  1113. base = cpu_base->clock_base + i;
  1114. basenow = ktime_add(now, base->offset);
  1115. while ((node = timerqueue_getnext(&base->active))) {
  1116. struct hrtimer *timer;
  1117. timer = container_of(node, struct hrtimer, node);
  1118. /*
  1119. * The immediate goal for using the softexpires is
  1120. * minimizing wakeups, not running timers at the
  1121. * earliest interrupt after their soft expiration.
  1122. * This allows us to avoid using a Priority Search
  1123. * Tree, which can answer a stabbing querry for
  1124. * overlapping intervals and instead use the simple
  1125. * BST we already have.
  1126. * We don't add extra wakeups by delaying timers that
  1127. * are right-of a not yet expired timer, because that
  1128. * timer will have to trigger a wakeup anyway.
  1129. */
  1130. if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
  1131. ktime_t expires;
  1132. expires = ktime_sub(hrtimer_get_expires(timer),
  1133. base->offset);
  1134. if (expires.tv64 < 0)
  1135. expires.tv64 = KTIME_MAX;
  1136. if (expires.tv64 < expires_next.tv64)
  1137. expires_next = expires;
  1138. break;
  1139. }
  1140. __run_hrtimer(timer, &basenow);
  1141. }
  1142. }
  1143. /*
  1144. * Store the new expiry value so the migration code can verify
  1145. * against it.
  1146. */
  1147. cpu_base->expires_next = expires_next;
  1148. raw_spin_unlock(&cpu_base->lock);
  1149. /* Reprogramming necessary ? */
  1150. if (expires_next.tv64 == KTIME_MAX ||
  1151. !tick_program_event(expires_next, 0)) {
  1152. cpu_base->hang_detected = 0;
  1153. return;
  1154. }
  1155. /*
  1156. * The next timer was already expired due to:
  1157. * - tracing
  1158. * - long lasting callbacks
  1159. * - being scheduled away when running in a VM
  1160. *
  1161. * We need to prevent that we loop forever in the hrtimer
  1162. * interrupt routine. We give it 3 attempts to avoid
  1163. * overreacting on some spurious event.
  1164. *
  1165. * Acquire base lock for updating the offsets and retrieving
  1166. * the current time.
  1167. */
  1168. raw_spin_lock(&cpu_base->lock);
  1169. now = hrtimer_update_base(cpu_base);
  1170. cpu_base->nr_retries++;
  1171. if (++retries < 3)
  1172. goto retry;
  1173. /*
  1174. * Give the system a chance to do something else than looping
  1175. * here. We stored the entry time, so we know exactly how long
  1176. * we spent here. We schedule the next event this amount of
  1177. * time away.
  1178. */
  1179. cpu_base->nr_hangs++;
  1180. cpu_base->hang_detected = 1;
  1181. raw_spin_unlock(&cpu_base->lock);
  1182. delta = ktime_sub(now, entry_time);
  1183. if (delta.tv64 > cpu_base->max_hang_time.tv64)
  1184. cpu_base->max_hang_time = delta;
  1185. /*
  1186. * Limit it to a sensible value as we enforce a longer
  1187. * delay. Give the CPU at least 100ms to catch up.
  1188. */
  1189. if (delta.tv64 > 100 * NSEC_PER_MSEC)
  1190. expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
  1191. else
  1192. expires_next = ktime_add(now, delta);
  1193. tick_program_event(expires_next, 1);
  1194. printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
  1195. ktime_to_ns(delta));
  1196. }
  1197. /*
  1198. * local version of hrtimer_peek_ahead_timers() called with interrupts
  1199. * disabled.
  1200. */
  1201. static void __hrtimer_peek_ahead_timers(void)
  1202. {
  1203. struct tick_device *td;
  1204. if (!hrtimer_hres_active())
  1205. return;
  1206. td = &__get_cpu_var(tick_cpu_device);
  1207. if (td && td->evtdev)
  1208. hrtimer_interrupt(td->evtdev);
  1209. }
  1210. /**
  1211. * hrtimer_peek_ahead_timers -- run soft-expired timers now
  1212. *
  1213. * hrtimer_peek_ahead_timers will peek at the timer queue of
  1214. * the current cpu and check if there are any timers for which
  1215. * the soft expires time has passed. If any such timers exist,
  1216. * they are run immediately and then removed from the timer queue.
  1217. *
  1218. */
  1219. void hrtimer_peek_ahead_timers(void)
  1220. {
  1221. unsigned long flags;
  1222. local_irq_save(flags);
  1223. __hrtimer_peek_ahead_timers();
  1224. local_irq_restore(flags);
  1225. }
  1226. static void run_hrtimer_softirq(struct softirq_action *h)
  1227. {
  1228. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1229. if (cpu_base->clock_was_set) {
  1230. cpu_base->clock_was_set = 0;
  1231. clock_was_set();
  1232. }
  1233. hrtimer_peek_ahead_timers();
  1234. }
  1235. #else /* CONFIG_HIGH_RES_TIMERS */
  1236. static inline void __hrtimer_peek_ahead_timers(void) { }
  1237. #endif /* !CONFIG_HIGH_RES_TIMERS */
  1238. /*
  1239. * Called from timer softirq every jiffy, expire hrtimers:
  1240. *
  1241. * For HRT its the fall back code to run the softirq in the timer
  1242. * softirq context in case the hrtimer initialization failed or has
  1243. * not been done yet.
  1244. */
  1245. void hrtimer_run_pending(void)
  1246. {
  1247. if (hrtimer_hres_active())
  1248. return;
  1249. /*
  1250. * This _is_ ugly: We have to check in the softirq context,
  1251. * whether we can switch to highres and / or nohz mode. The
  1252. * clocksource switch happens in the timer interrupt with
  1253. * xtime_lock held. Notification from there only sets the
  1254. * check bit in the tick_oneshot code, otherwise we might
  1255. * deadlock vs. xtime_lock.
  1256. */
  1257. if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
  1258. hrtimer_switch_to_hres();
  1259. }
  1260. /*
  1261. * Called from hardirq context every jiffy
  1262. */
  1263. void hrtimer_run_queues(void)
  1264. {
  1265. struct timerqueue_node *node;
  1266. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1267. struct hrtimer_clock_base *base;
  1268. int index, gettime = 1;
  1269. if (hrtimer_hres_active())
  1270. return;
  1271. for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
  1272. base = &cpu_base->clock_base[index];
  1273. if (!timerqueue_getnext(&base->active))
  1274. continue;
  1275. if (gettime) {
  1276. hrtimer_get_softirq_time(cpu_base);
  1277. gettime = 0;
  1278. }
  1279. raw_spin_lock(&cpu_base->lock);
  1280. while ((node = timerqueue_getnext(&base->active))) {
  1281. struct hrtimer *timer;
  1282. timer = container_of(node, struct hrtimer, node);
  1283. if (base->softirq_time.tv64 <=
  1284. hrtimer_get_expires_tv64(timer))
  1285. break;
  1286. __run_hrtimer(timer, &base->softirq_time);
  1287. }
  1288. raw_spin_unlock(&cpu_base->lock);
  1289. }
  1290. }
  1291. /*
  1292. * Sleep related functions:
  1293. */
  1294. static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
  1295. {
  1296. struct hrtimer_sleeper *t =
  1297. container_of(timer, struct hrtimer_sleeper, timer);
  1298. struct task_struct *task = t->task;
  1299. t->task = NULL;
  1300. if (task)
  1301. wake_up_process(task);
  1302. return HRTIMER_NORESTART;
  1303. }
  1304. void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  1305. {
  1306. sl->timer.function = hrtimer_wakeup;
  1307. sl->task = task;
  1308. }
  1309. EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
  1310. static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
  1311. {
  1312. hrtimer_init_sleeper(t, current);
  1313. do {
  1314. set_current_state(TASK_INTERRUPTIBLE);
  1315. hrtimer_start_expires(&t->timer, mode);
  1316. if (!hrtimer_active(&t->timer))
  1317. t->task = NULL;
  1318. if (likely(t->task))
  1319. schedule();
  1320. hrtimer_cancel(&t->timer);
  1321. mode = HRTIMER_MODE_ABS;
  1322. } while (t->task && !signal_pending(current));
  1323. __set_current_state(TASK_RUNNING);
  1324. return t->task == NULL;
  1325. }
  1326. static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
  1327. {
  1328. struct timespec rmt;
  1329. ktime_t rem;
  1330. rem = hrtimer_expires_remaining(timer);
  1331. if (rem.tv64 <= 0)
  1332. return 0;
  1333. rmt = ktime_to_timespec(rem);
  1334. if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
  1335. return -EFAULT;
  1336. return 1;
  1337. }
  1338. long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
  1339. {
  1340. struct hrtimer_sleeper t;
  1341. struct timespec __user *rmtp;
  1342. int ret = 0;
  1343. hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
  1344. HRTIMER_MODE_ABS);
  1345. hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
  1346. if (do_nanosleep(&t, HRTIMER_MODE_ABS))
  1347. goto out;
  1348. rmtp = restart->nanosleep.rmtp;
  1349. if (rmtp) {
  1350. ret = update_rmtp(&t.timer, rmtp);
  1351. if (ret <= 0)
  1352. goto out;
  1353. }
  1354. /* The other values in restart are already filled in */
  1355. ret = -ERESTART_RESTARTBLOCK;
  1356. out:
  1357. destroy_hrtimer_on_stack(&t.timer);
  1358. return ret;
  1359. }
  1360. long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
  1361. const enum hrtimer_mode mode, const clockid_t clockid)
  1362. {
  1363. struct restart_block *restart;
  1364. struct hrtimer_sleeper t;
  1365. int ret = 0;
  1366. unsigned long slack;
  1367. slack = current->timer_slack_ns;
  1368. if (rt_task(current))
  1369. slack = 0;
  1370. hrtimer_init_on_stack(&t.timer, clockid, mode);
  1371. hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
  1372. if (do_nanosleep(&t, mode))
  1373. goto out;
  1374. /* Absolute timers do not update the rmtp value and restart: */
  1375. if (mode == HRTIMER_MODE_ABS) {
  1376. ret = -ERESTARTNOHAND;
  1377. goto out;
  1378. }
  1379. if (rmtp) {
  1380. ret = update_rmtp(&t.timer, rmtp);
  1381. if (ret <= 0)
  1382. goto out;
  1383. }
  1384. restart = &current_thread_info()->restart_block;
  1385. restart->fn = hrtimer_nanosleep_restart;
  1386. restart->nanosleep.clockid = t.timer.base->clockid;
  1387. restart->nanosleep.rmtp = rmtp;
  1388. restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
  1389. ret = -ERESTART_RESTARTBLOCK;
  1390. out:
  1391. destroy_hrtimer_on_stack(&t.timer);
  1392. return ret;
  1393. }
  1394. SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
  1395. struct timespec __user *, rmtp)
  1396. {
  1397. struct timespec tu;
  1398. if (copy_from_user(&tu, rqtp, sizeof(tu)))
  1399. return -EFAULT;
  1400. if (!timespec_valid(&tu))
  1401. return -EINVAL;
  1402. return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  1403. }
  1404. /*
  1405. * Functions related to boot-time initialization:
  1406. */
  1407. static void __cpuinit init_hrtimers_cpu(int cpu)
  1408. {
  1409. struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
  1410. int i;
  1411. raw_spin_lock_init(&cpu_base->lock);
  1412. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1413. cpu_base->clock_base[i].cpu_base = cpu_base;
  1414. timerqueue_init_head(&cpu_base->clock_base[i].active);
  1415. }
  1416. hrtimer_init_hres(cpu_base);
  1417. }
  1418. #ifdef CONFIG_HOTPLUG_CPU
  1419. static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
  1420. struct hrtimer_clock_base *new_base)
  1421. {
  1422. struct hrtimer *timer;
  1423. struct timerqueue_node *node;
  1424. while ((node = timerqueue_getnext(&old_base->active))) {
  1425. timer = container_of(node, struct hrtimer, node);
  1426. BUG_ON(hrtimer_callback_running(timer));
  1427. debug_deactivate(timer);
  1428. /*
  1429. * Mark it as STATE_MIGRATE not INACTIVE otherwise the
  1430. * timer could be seen as !active and just vanish away
  1431. * under us on another CPU
  1432. */
  1433. __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
  1434. timer->base = new_base;
  1435. /*
  1436. * Enqueue the timers on the new cpu. This does not
  1437. * reprogram the event device in case the timer
  1438. * expires before the earliest on this CPU, but we run
  1439. * hrtimer_interrupt after we migrated everything to
  1440. * sort out already expired timers and reprogram the
  1441. * event device.
  1442. */
  1443. enqueue_hrtimer(timer, new_base);
  1444. /* Clear the migration state bit */
  1445. timer->state &= ~HRTIMER_STATE_MIGRATE;
  1446. }
  1447. }
  1448. static void migrate_hrtimers(int scpu)
  1449. {
  1450. struct hrtimer_cpu_base *old_base, *new_base;
  1451. int i;
  1452. BUG_ON(cpu_online(scpu));
  1453. tick_cancel_sched_timer(scpu);
  1454. local_irq_disable();
  1455. old_base = &per_cpu(hrtimer_bases, scpu);
  1456. new_base = &__get_cpu_var(hrtimer_bases);
  1457. /*
  1458. * The caller is globally serialized and nobody else
  1459. * takes two locks at once, deadlock is not possible.
  1460. */
  1461. raw_spin_lock(&new_base->lock);
  1462. raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  1463. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1464. migrate_hrtimer_list(&old_base->clock_base[i],
  1465. &new_base->clock_base[i]);
  1466. }
  1467. raw_spin_unlock(&old_base->lock);
  1468. raw_spin_unlock(&new_base->lock);
  1469. /* Check, if we got expired work to do */
  1470. __hrtimer_peek_ahead_timers();
  1471. local_irq_enable();
  1472. }
  1473. #endif /* CONFIG_HOTPLUG_CPU */
  1474. static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
  1475. unsigned long action, void *hcpu)
  1476. {
  1477. int scpu = (long)hcpu;
  1478. switch (action) {
  1479. case CPU_UP_PREPARE:
  1480. case CPU_UP_PREPARE_FROZEN:
  1481. init_hrtimers_cpu(scpu);
  1482. break;
  1483. #ifdef CONFIG_HOTPLUG_CPU
  1484. case CPU_DYING:
  1485. case CPU_DYING_FROZEN:
  1486. clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
  1487. break;
  1488. case CPU_DEAD:
  1489. case CPU_DEAD_FROZEN:
  1490. {
  1491. clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
  1492. migrate_hrtimers(scpu);
  1493. break;
  1494. }
  1495. #endif
  1496. default:
  1497. break;
  1498. }
  1499. return NOTIFY_OK;
  1500. }
  1501. static struct notifier_block __cpuinitdata hrtimers_nb = {
  1502. .notifier_call = hrtimer_cpu_notify,
  1503. };
  1504. void __init hrtimers_init(void)
  1505. {
  1506. hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
  1507. (void *)(long)smp_processor_id());
  1508. register_cpu_notifier(&hrtimers_nb);
  1509. #ifdef CONFIG_HIGH_RES_TIMERS
  1510. open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
  1511. #endif
  1512. }
  1513. /**
  1514. * schedule_hrtimeout_range_clock - sleep until timeout
  1515. * @expires: timeout value (ktime_t)
  1516. * @delta: slack in expires timeout (ktime_t)
  1517. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1518. * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
  1519. */
  1520. int __sched
  1521. schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
  1522. const enum hrtimer_mode mode, int clock)
  1523. {
  1524. struct hrtimer_sleeper t;
  1525. /*
  1526. * Optimize when a zero timeout value is given. It does not
  1527. * matter whether this is an absolute or a relative time.
  1528. */
  1529. if (expires && !expires->tv64) {
  1530. __set_current_state(TASK_RUNNING);
  1531. return 0;
  1532. }
  1533. /*
  1534. * A NULL parameter means "infinite"
  1535. */
  1536. if (!expires) {
  1537. schedule();
  1538. __set_current_state(TASK_RUNNING);
  1539. return -EINTR;
  1540. }
  1541. hrtimer_init_on_stack(&t.timer, clock, mode);
  1542. hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
  1543. hrtimer_init_sleeper(&t, current);
  1544. hrtimer_start_expires(&t.timer, mode);
  1545. if (!hrtimer_active(&t.timer))
  1546. t.task = NULL;
  1547. if (likely(t.task))
  1548. schedule();
  1549. hrtimer_cancel(&t.timer);
  1550. destroy_hrtimer_on_stack(&t.timer);
  1551. __set_current_state(TASK_RUNNING);
  1552. return !t.task ? 0 : -EINTR;
  1553. }
  1554. /**
  1555. * schedule_hrtimeout_range - sleep until timeout
  1556. * @expires: timeout value (ktime_t)
  1557. * @delta: slack in expires timeout (ktime_t)
  1558. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1559. *
  1560. * Make the current task sleep until the given expiry time has
  1561. * elapsed. The routine will return immediately unless
  1562. * the current task state has been set (see set_current_state()).
  1563. *
  1564. * The @delta argument gives the kernel the freedom to schedule the
  1565. * actual wakeup to a time that is both power and performance friendly.
  1566. * The kernel give the normal best effort behavior for "@expires+@delta",
  1567. * but may decide to fire the timer earlier, but no earlier than @expires.
  1568. *
  1569. * You can set the task state as follows -
  1570. *
  1571. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  1572. * pass before the routine returns.
  1573. *
  1574. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1575. * delivered to the current task.
  1576. *
  1577. * The current task state is guaranteed to be TASK_RUNNING when this
  1578. * routine returns.
  1579. *
  1580. * Returns 0 when the timer has expired otherwise -EINTR
  1581. */
  1582. int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
  1583. const enum hrtimer_mode mode)
  1584. {
  1585. return schedule_hrtimeout_range_clock(expires, delta, mode,
  1586. CLOCK_MONOTONIC);
  1587. }
  1588. EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
  1589. /**
  1590. * schedule_hrtimeout - sleep until timeout
  1591. * @expires: timeout value (ktime_t)
  1592. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1593. *
  1594. * Make the current task sleep until the given expiry time has
  1595. * elapsed. The routine will return immediately unless
  1596. * the current task state has been set (see set_current_state()).
  1597. *
  1598. * You can set the task state as follows -
  1599. *
  1600. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  1601. * pass before the routine returns.
  1602. *
  1603. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1604. * delivered to the current task.
  1605. *
  1606. * The current task state is guaranteed to be TASK_RUNNING when this
  1607. * routine returns.
  1608. *
  1609. * Returns 0 when the timer has expired otherwise -EINTR
  1610. */
  1611. int __sched schedule_hrtimeout(ktime_t *expires,
  1612. const enum hrtimer_mode mode)
  1613. {
  1614. return schedule_hrtimeout_range(expires, 0, mode);
  1615. }
  1616. EXPORT_SYMBOL_GPL(schedule_hrtimeout);