hrtimer.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884
  1. /*
  2. * linux/kernel/hrtimer.c
  3. *
  4. * Copyright(C) 2005-2006, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright(C) 2005-2007, Red Hat, Inc., Ingo Molnar
  6. * Copyright(C) 2006-2007 Timesys Corp., Thomas Gleixner
  7. *
  8. * High-resolution kernel timers
  9. *
  10. * In contrast to the low-resolution timeout API implemented in
  11. * kernel/timer.c, hrtimers provide finer resolution and accuracy
  12. * depending on system configuration and capabilities.
  13. *
  14. * These timers are currently used for:
  15. * - itimers
  16. * - POSIX timers
  17. * - nanosleep
  18. * - precise in-kernel timing
  19. *
  20. * Started by: Thomas Gleixner and Ingo Molnar
  21. *
  22. * Credits:
  23. * based on kernel/timer.c
  24. *
  25. * Help, testing, suggestions, bugfixes, improvements were
  26. * provided by:
  27. *
  28. * George Anzinger, Andrew Morton, Steven Rostedt, Roman Zippel
  29. * et. al.
  30. *
  31. * For licencing details see kernel-base/COPYING
  32. */
  33. #include <linux/cpu.h>
  34. #include <linux/export.h>
  35. #include <linux/percpu.h>
  36. #include <linux/hrtimer.h>
  37. #include <linux/notifier.h>
  38. #include <linux/syscalls.h>
  39. #include <linux/kallsyms.h>
  40. #include <linux/interrupt.h>
  41. #include <linux/tick.h>
  42. #include <linux/seq_file.h>
  43. #include <linux/err.h>
  44. #include <linux/debugobjects.h>
  45. #include <linux/sched.h>
  46. #include <linux/sched/sysctl.h>
  47. #include <linux/sched/rt.h>
  48. #include <linux/timer.h>
  49. #include <asm/uaccess.h>
  50. #include <trace/events/timer.h>
  51. /*
  52. * The timer bases:
  53. *
  54. * There are more clockids then hrtimer bases. Thus, we index
  55. * into the timer bases by the hrtimer_base_type enum. When trying
  56. * to reach a base using a clockid, hrtimer_clockid_to_base()
  57. * is used to convert from clockid to the proper hrtimer_base_type.
  58. */
  59. DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
  60. {
  61. .clock_base =
  62. {
  63. {
  64. .index = HRTIMER_BASE_MONOTONIC,
  65. .clockid = CLOCK_MONOTONIC,
  66. .get_time = &ktime_get,
  67. .resolution = KTIME_LOW_RES,
  68. },
  69. {
  70. .index = HRTIMER_BASE_REALTIME,
  71. .clockid = CLOCK_REALTIME,
  72. .get_time = &ktime_get_real,
  73. .resolution = KTIME_LOW_RES,
  74. },
  75. {
  76. .index = HRTIMER_BASE_BOOTTIME,
  77. .clockid = CLOCK_BOOTTIME,
  78. .get_time = &ktime_get_boottime,
  79. .resolution = KTIME_LOW_RES,
  80. },
  81. }
  82. };
  83. static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
  84. [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
  85. [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
  86. [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
  87. };
  88. static inline int hrtimer_clockid_to_base(clockid_t clock_id)
  89. {
  90. return hrtimer_clock_to_base_table[clock_id];
  91. }
  92. /*
  93. * Get the coarse grained time at the softirq based on xtime and
  94. * wall_to_monotonic.
  95. */
  96. static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
  97. {
  98. ktime_t xtim, mono, boot;
  99. struct timespec xts, tom, slp;
  100. get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp);
  101. xtim = timespec_to_ktime(xts);
  102. mono = ktime_add(xtim, timespec_to_ktime(tom));
  103. boot = ktime_add(mono, timespec_to_ktime(slp));
  104. base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
  105. base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
  106. base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
  107. }
  108. /*
  109. * Functions and macros which are different for UP/SMP systems are kept in a
  110. * single place
  111. */
  112. #ifdef CONFIG_SMP
  113. /*
  114. * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
  115. * means that all timers which are tied to this base via timer->base are
  116. * locked, and the base itself is locked too.
  117. *
  118. * So __run_timers/migrate_timers can safely modify all timers which could
  119. * be found on the lists/queues.
  120. *
  121. * When the timer's base is locked, and the timer removed from list, it is
  122. * possible to set timer->base = NULL and drop the lock: the timer remains
  123. * locked.
  124. */
  125. static
  126. struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
  127. unsigned long *flags)
  128. {
  129. struct hrtimer_clock_base *base;
  130. for (;;) {
  131. base = timer->base;
  132. if (likely(base != NULL)) {
  133. raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
  134. if (likely(base == timer->base))
  135. return base;
  136. /* The timer has migrated to another CPU: */
  137. raw_spin_unlock_irqrestore(&base->cpu_base->lock, *flags);
  138. }
  139. cpu_relax();
  140. }
  141. }
  142. /*
  143. * Get the preferred target CPU for NOHZ
  144. */
  145. static int hrtimer_get_target(int this_cpu, int pinned)
  146. {
  147. #ifdef CONFIG_NO_HZ
  148. if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
  149. return get_nohz_timer_target();
  150. #endif
  151. return this_cpu;
  152. }
  153. /*
  154. * With HIGHRES=y we do not migrate the timer when it is expiring
  155. * before the next event on the target cpu because we cannot reprogram
  156. * the target cpu hardware and we would cause it to fire late.
  157. *
  158. * Called with cpu_base->lock of target cpu held.
  159. */
  160. static int
  161. hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
  162. {
  163. #ifdef CONFIG_HIGH_RES_TIMERS
  164. ktime_t expires;
  165. if (!new_base->cpu_base->hres_active)
  166. return 0;
  167. expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset);
  168. return expires.tv64 <= new_base->cpu_base->expires_next.tv64;
  169. #else
  170. return 0;
  171. #endif
  172. }
  173. /*
  174. * Switch the timer base to the current CPU when possible.
  175. */
  176. static inline struct hrtimer_clock_base *
  177. switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
  178. int pinned)
  179. {
  180. struct hrtimer_clock_base *new_base;
  181. struct hrtimer_cpu_base *new_cpu_base;
  182. int this_cpu = smp_processor_id();
  183. int cpu = hrtimer_get_target(this_cpu, pinned);
  184. int basenum = base->index;
  185. again:
  186. new_cpu_base = &per_cpu(hrtimer_bases, cpu);
  187. new_base = &new_cpu_base->clock_base[basenum];
  188. if (base != new_base) {
  189. /*
  190. * We are trying to move timer to new_base.
  191. * However we can't change timer's base while it is running,
  192. * so we keep it on the same CPU. No hassle vs. reprogramming
  193. * the event source in the high resolution case. The softirq
  194. * code will take care of this when the timer function has
  195. * completed. There is no conflict as we hold the lock until
  196. * the timer is enqueued.
  197. */
  198. if (unlikely(hrtimer_callback_running(timer)))
  199. return base;
  200. /* See the comment in lock_timer_base() */
  201. timer->base = NULL;
  202. raw_spin_unlock(&base->cpu_base->lock);
  203. raw_spin_lock(&new_base->cpu_base->lock);
  204. if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
  205. cpu = this_cpu;
  206. raw_spin_unlock(&new_base->cpu_base->lock);
  207. raw_spin_lock(&base->cpu_base->lock);
  208. timer->base = base;
  209. goto again;
  210. }
  211. timer->base = new_base;
  212. }
  213. return new_base;
  214. }
  215. #else /* CONFIG_SMP */
  216. static inline struct hrtimer_clock_base *
  217. lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  218. {
  219. struct hrtimer_clock_base *base = timer->base;
  220. raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
  221. return base;
  222. }
  223. # define switch_hrtimer_base(t, b, p) (b)
  224. #endif /* !CONFIG_SMP */
  225. /*
  226. * Functions for the union type storage format of ktime_t which are
  227. * too large for inlining:
  228. */
  229. #if BITS_PER_LONG < 64
  230. # ifndef CONFIG_KTIME_SCALAR
  231. /**
  232. * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
  233. * @kt: addend
  234. * @nsec: the scalar nsec value to add
  235. *
  236. * Returns the sum of kt and nsec in ktime_t format
  237. */
  238. ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
  239. {
  240. ktime_t tmp;
  241. if (likely(nsec < NSEC_PER_SEC)) {
  242. tmp.tv64 = nsec;
  243. } else {
  244. unsigned long rem = do_div(nsec, NSEC_PER_SEC);
  245. tmp = ktime_set((long)nsec, rem);
  246. }
  247. return ktime_add(kt, tmp);
  248. }
  249. EXPORT_SYMBOL_GPL(ktime_add_ns);
  250. /**
  251. * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
  252. * @kt: minuend
  253. * @nsec: the scalar nsec value to subtract
  254. *
  255. * Returns the subtraction of @nsec from @kt in ktime_t format
  256. */
  257. ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
  258. {
  259. ktime_t tmp;
  260. if (likely(nsec < NSEC_PER_SEC)) {
  261. tmp.tv64 = nsec;
  262. } else {
  263. unsigned long rem = do_div(nsec, NSEC_PER_SEC);
  264. tmp = ktime_set((long)nsec, rem);
  265. }
  266. return ktime_sub(kt, tmp);
  267. }
  268. EXPORT_SYMBOL_GPL(ktime_sub_ns);
  269. # endif /* !CONFIG_KTIME_SCALAR */
  270. /*
  271. * Divide a ktime value by a nanosecond value
  272. */
  273. u64 ktime_divns(const ktime_t kt, s64 div)
  274. {
  275. u64 dclc;
  276. int sft = 0;
  277. dclc = ktime_to_ns(kt);
  278. /* Make sure the divisor is less than 2^32: */
  279. while (div >> 32) {
  280. sft++;
  281. div >>= 1;
  282. }
  283. dclc >>= sft;
  284. do_div(dclc, (unsigned long) div);
  285. return dclc;
  286. }
  287. #endif /* BITS_PER_LONG >= 64 */
  288. /*
  289. * Add two ktime values and do a safety check for overflow:
  290. */
  291. ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs)
  292. {
  293. ktime_t res = ktime_add(lhs, rhs);
  294. /*
  295. * We use KTIME_SEC_MAX here, the maximum timeout which we can
  296. * return to user space in a timespec:
  297. */
  298. if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64)
  299. res = ktime_set(KTIME_SEC_MAX, 0);
  300. return res;
  301. }
  302. EXPORT_SYMBOL_GPL(ktime_add_safe);
  303. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  304. static struct debug_obj_descr hrtimer_debug_descr;
  305. static void *hrtimer_debug_hint(void *addr)
  306. {
  307. return ((struct hrtimer *) addr)->function;
  308. }
  309. /*
  310. * fixup_init is called when:
  311. * - an active object is initialized
  312. */
  313. static int hrtimer_fixup_init(void *addr, enum debug_obj_state state)
  314. {
  315. struct hrtimer *timer = addr;
  316. switch (state) {
  317. case ODEBUG_STATE_ACTIVE:
  318. hrtimer_cancel(timer);
  319. debug_object_init(timer, &hrtimer_debug_descr);
  320. return 1;
  321. default:
  322. return 0;
  323. }
  324. }
  325. /*
  326. * fixup_activate is called when:
  327. * - an active object is activated
  328. * - an unknown object is activated (might be a statically initialized object)
  329. */
  330. static int hrtimer_fixup_activate(void *addr, enum debug_obj_state state)
  331. {
  332. switch (state) {
  333. case ODEBUG_STATE_NOTAVAILABLE:
  334. WARN_ON_ONCE(1);
  335. return 0;
  336. case ODEBUG_STATE_ACTIVE:
  337. WARN_ON(1);
  338. default:
  339. return 0;
  340. }
  341. }
  342. /*
  343. * fixup_free is called when:
  344. * - an active object is freed
  345. */
  346. static int hrtimer_fixup_free(void *addr, enum debug_obj_state state)
  347. {
  348. struct hrtimer *timer = addr;
  349. switch (state) {
  350. case ODEBUG_STATE_ACTIVE:
  351. hrtimer_cancel(timer);
  352. debug_object_free(timer, &hrtimer_debug_descr);
  353. return 1;
  354. default:
  355. return 0;
  356. }
  357. }
  358. static struct debug_obj_descr hrtimer_debug_descr = {
  359. .name = "hrtimer",
  360. .debug_hint = hrtimer_debug_hint,
  361. .fixup_init = hrtimer_fixup_init,
  362. .fixup_activate = hrtimer_fixup_activate,
  363. .fixup_free = hrtimer_fixup_free,
  364. };
  365. static inline void debug_hrtimer_init(struct hrtimer *timer)
  366. {
  367. debug_object_init(timer, &hrtimer_debug_descr);
  368. }
  369. static inline void debug_hrtimer_activate(struct hrtimer *timer)
  370. {
  371. debug_object_activate(timer, &hrtimer_debug_descr);
  372. }
  373. static inline void debug_hrtimer_deactivate(struct hrtimer *timer)
  374. {
  375. debug_object_deactivate(timer, &hrtimer_debug_descr);
  376. }
  377. static inline void debug_hrtimer_free(struct hrtimer *timer)
  378. {
  379. debug_object_free(timer, &hrtimer_debug_descr);
  380. }
  381. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  382. enum hrtimer_mode mode);
  383. void hrtimer_init_on_stack(struct hrtimer *timer, clockid_t clock_id,
  384. enum hrtimer_mode mode)
  385. {
  386. debug_object_init_on_stack(timer, &hrtimer_debug_descr);
  387. __hrtimer_init(timer, clock_id, mode);
  388. }
  389. EXPORT_SYMBOL_GPL(hrtimer_init_on_stack);
  390. void destroy_hrtimer_on_stack(struct hrtimer *timer)
  391. {
  392. debug_object_free(timer, &hrtimer_debug_descr);
  393. }
  394. #else
  395. static inline void debug_hrtimer_init(struct hrtimer *timer) { }
  396. static inline void debug_hrtimer_activate(struct hrtimer *timer) { }
  397. static inline void debug_hrtimer_deactivate(struct hrtimer *timer) { }
  398. #endif
  399. static inline void
  400. debug_init(struct hrtimer *timer, clockid_t clockid,
  401. enum hrtimer_mode mode)
  402. {
  403. debug_hrtimer_init(timer);
  404. trace_hrtimer_init(timer, clockid, mode);
  405. }
  406. static inline void debug_activate(struct hrtimer *timer)
  407. {
  408. debug_hrtimer_activate(timer);
  409. trace_hrtimer_start(timer);
  410. }
  411. static inline void debug_deactivate(struct hrtimer *timer)
  412. {
  413. debug_hrtimer_deactivate(timer);
  414. trace_hrtimer_cancel(timer);
  415. }
  416. /* High resolution timer related functions */
  417. #ifdef CONFIG_HIGH_RES_TIMERS
  418. /*
  419. * High resolution timer enabled ?
  420. */
  421. static int hrtimer_hres_enabled __read_mostly = 1;
  422. /*
  423. * Enable / Disable high resolution mode
  424. */
  425. static int __init setup_hrtimer_hres(char *str)
  426. {
  427. if (!strcmp(str, "off"))
  428. hrtimer_hres_enabled = 0;
  429. else if (!strcmp(str, "on"))
  430. hrtimer_hres_enabled = 1;
  431. else
  432. return 0;
  433. return 1;
  434. }
  435. __setup("highres=", setup_hrtimer_hres);
  436. /*
  437. * hrtimer_high_res_enabled - query, if the highres mode is enabled
  438. */
  439. static inline int hrtimer_is_hres_enabled(void)
  440. {
  441. return hrtimer_hres_enabled;
  442. }
  443. /*
  444. * Is the high resolution mode active ?
  445. */
  446. static inline int hrtimer_hres_active(void)
  447. {
  448. return __this_cpu_read(hrtimer_bases.hres_active);
  449. }
  450. /*
  451. * Reprogram the event source with checking both queues for the
  452. * next event
  453. * Called with interrupts disabled and base->lock held
  454. */
  455. static void
  456. hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
  457. {
  458. int i;
  459. struct hrtimer_clock_base *base = cpu_base->clock_base;
  460. ktime_t expires, expires_next;
  461. expires_next.tv64 = KTIME_MAX;
  462. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  463. struct hrtimer *timer;
  464. struct timerqueue_node *next;
  465. next = timerqueue_getnext(&base->active);
  466. if (!next)
  467. continue;
  468. timer = container_of(next, struct hrtimer, node);
  469. expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  470. /*
  471. * clock_was_set() has changed base->offset so the
  472. * result might be negative. Fix it up to prevent a
  473. * false positive in clockevents_program_event()
  474. */
  475. if (expires.tv64 < 0)
  476. expires.tv64 = 0;
  477. if (expires.tv64 < expires_next.tv64)
  478. expires_next = expires;
  479. }
  480. if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
  481. return;
  482. cpu_base->expires_next.tv64 = expires_next.tv64;
  483. if (cpu_base->expires_next.tv64 != KTIME_MAX)
  484. tick_program_event(cpu_base->expires_next, 1);
  485. }
  486. /*
  487. * Shared reprogramming for clock_realtime and clock_monotonic
  488. *
  489. * When a timer is enqueued and expires earlier than the already enqueued
  490. * timers, we have to check, whether it expires earlier than the timer for
  491. * which the clock event device was armed.
  492. *
  493. * Called with interrupts disabled and base->cpu_base.lock held
  494. */
  495. static int hrtimer_reprogram(struct hrtimer *timer,
  496. struct hrtimer_clock_base *base)
  497. {
  498. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  499. ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
  500. int res;
  501. WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
  502. /*
  503. * When the callback is running, we do not reprogram the clock event
  504. * device. The timer callback is either running on a different CPU or
  505. * the callback is executed in the hrtimer_interrupt context. The
  506. * reprogramming is handled either by the softirq, which called the
  507. * callback or at the end of the hrtimer_interrupt.
  508. */
  509. if (hrtimer_callback_running(timer))
  510. return 0;
  511. /*
  512. * CLOCK_REALTIME timer might be requested with an absolute
  513. * expiry time which is less than base->offset. Nothing wrong
  514. * about that, just avoid to call into the tick code, which
  515. * has now objections against negative expiry values.
  516. */
  517. if (expires.tv64 < 0)
  518. return -ETIME;
  519. if (expires.tv64 >= cpu_base->expires_next.tv64)
  520. return 0;
  521. /*
  522. * If a hang was detected in the last timer interrupt then we
  523. * do not schedule a timer which is earlier than the expiry
  524. * which we enforced in the hang detection. We want the system
  525. * to make progress.
  526. */
  527. if (cpu_base->hang_detected)
  528. return 0;
  529. /*
  530. * Clockevents returns -ETIME, when the event was in the past.
  531. */
  532. res = tick_program_event(expires, 0);
  533. if (!IS_ERR_VALUE(res))
  534. cpu_base->expires_next = expires;
  535. return res;
  536. }
  537. /*
  538. * Initialize the high resolution related parts of cpu_base
  539. */
  540. static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
  541. {
  542. base->expires_next.tv64 = KTIME_MAX;
  543. base->hres_active = 0;
  544. }
  545. /*
  546. * When High resolution timers are active, try to reprogram. Note, that in case
  547. * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry
  548. * check happens. The timer gets enqueued into the rbtree. The reprogramming
  549. * and expiry check is done in the hrtimer_interrupt or in the softirq.
  550. */
  551. static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
  552. struct hrtimer_clock_base *base,
  553. int wakeup)
  554. {
  555. if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) {
  556. if (wakeup) {
  557. raw_spin_unlock(&base->cpu_base->lock);
  558. raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  559. raw_spin_lock(&base->cpu_base->lock);
  560. } else
  561. __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  562. return 1;
  563. }
  564. return 0;
  565. }
  566. static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
  567. {
  568. ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
  569. ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
  570. return ktime_get_update_offsets(offs_real, offs_boot);
  571. }
  572. /*
  573. * Retrigger next event is called after clock was set
  574. *
  575. * Called with interrupts disabled via on_each_cpu()
  576. */
  577. static void retrigger_next_event(void *arg)
  578. {
  579. struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
  580. if (!hrtimer_hres_active())
  581. return;
  582. raw_spin_lock(&base->lock);
  583. hrtimer_update_base(base);
  584. hrtimer_force_reprogram(base, 0);
  585. raw_spin_unlock(&base->lock);
  586. }
  587. /*
  588. * Switch to high resolution mode
  589. */
  590. static int hrtimer_switch_to_hres(void)
  591. {
  592. int i, cpu = smp_processor_id();
  593. struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
  594. unsigned long flags;
  595. if (base->hres_active)
  596. return 1;
  597. local_irq_save(flags);
  598. if (tick_init_highres()) {
  599. local_irq_restore(flags);
  600. printk(KERN_WARNING "Could not switch to high resolution "
  601. "mode on CPU %d\n", cpu);
  602. return 0;
  603. }
  604. base->hres_active = 1;
  605. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
  606. base->clock_base[i].resolution = KTIME_HIGH_RES;
  607. tick_setup_sched_timer();
  608. /* "Retrigger" the interrupt to get things going */
  609. retrigger_next_event(NULL);
  610. local_irq_restore(flags);
  611. return 1;
  612. }
  613. /*
  614. * Called from timekeeping code to reprogramm the hrtimer interrupt
  615. * device. If called from the timer interrupt context we defer it to
  616. * softirq context.
  617. */
  618. void clock_was_set_delayed(void)
  619. {
  620. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  621. cpu_base->clock_was_set = 1;
  622. __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
  623. }
  624. #else
  625. static inline int hrtimer_hres_active(void) { return 0; }
  626. static inline int hrtimer_is_hres_enabled(void) { return 0; }
  627. static inline int hrtimer_switch_to_hres(void) { return 0; }
  628. static inline void
  629. hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { }
  630. static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
  631. struct hrtimer_clock_base *base,
  632. int wakeup)
  633. {
  634. return 0;
  635. }
  636. static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
  637. static inline void retrigger_next_event(void *arg) { }
  638. #endif /* CONFIG_HIGH_RES_TIMERS */
  639. /*
  640. * Clock realtime was set
  641. *
  642. * Change the offset of the realtime clock vs. the monotonic
  643. * clock.
  644. *
  645. * We might have to reprogram the high resolution timer interrupt. On
  646. * SMP we call the architecture specific code to retrigger _all_ high
  647. * resolution timer interrupts. On UP we just disable interrupts and
  648. * call the high resolution interrupt code.
  649. */
  650. void clock_was_set(void)
  651. {
  652. #ifdef CONFIG_HIGH_RES_TIMERS
  653. /* Retrigger the CPU local events everywhere */
  654. on_each_cpu(retrigger_next_event, NULL, 1);
  655. #endif
  656. timerfd_clock_was_set();
  657. }
  658. /*
  659. * During resume we might have to reprogram the high resolution timer
  660. * interrupt (on the local CPU):
  661. */
  662. void hrtimers_resume(void)
  663. {
  664. WARN_ONCE(!irqs_disabled(),
  665. KERN_INFO "hrtimers_resume() called with IRQs enabled!");
  666. retrigger_next_event(NULL);
  667. timerfd_clock_was_set();
  668. }
  669. static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
  670. {
  671. #ifdef CONFIG_TIMER_STATS
  672. if (timer->start_site)
  673. return;
  674. timer->start_site = __builtin_return_address(0);
  675. memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
  676. timer->start_pid = current->pid;
  677. #endif
  678. }
  679. static inline void timer_stats_hrtimer_clear_start_info(struct hrtimer *timer)
  680. {
  681. #ifdef CONFIG_TIMER_STATS
  682. timer->start_site = NULL;
  683. #endif
  684. }
  685. static inline void timer_stats_account_hrtimer(struct hrtimer *timer)
  686. {
  687. #ifdef CONFIG_TIMER_STATS
  688. if (likely(!timer_stats_active))
  689. return;
  690. timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
  691. timer->function, timer->start_comm, 0);
  692. #endif
  693. }
  694. /*
  695. * Counterpart to lock_hrtimer_base above:
  696. */
  697. static inline
  698. void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  699. {
  700. raw_spin_unlock_irqrestore(&timer->base->cpu_base->lock, *flags);
  701. }
  702. /**
  703. * hrtimer_forward - forward the timer expiry
  704. * @timer: hrtimer to forward
  705. * @now: forward past this time
  706. * @interval: the interval to forward
  707. *
  708. * Forward the timer expiry so it will expire in the future.
  709. * Returns the number of overruns.
  710. */
  711. u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
  712. {
  713. u64 orun = 1;
  714. ktime_t delta;
  715. delta = ktime_sub(now, hrtimer_get_expires(timer));
  716. if (delta.tv64 < 0)
  717. return 0;
  718. if (interval.tv64 < timer->base->resolution.tv64)
  719. interval.tv64 = timer->base->resolution.tv64;
  720. if (unlikely(delta.tv64 >= interval.tv64)) {
  721. s64 incr = ktime_to_ns(interval);
  722. orun = ktime_divns(delta, incr);
  723. hrtimer_add_expires_ns(timer, incr * orun);
  724. if (hrtimer_get_expires_tv64(timer) > now.tv64)
  725. return orun;
  726. /*
  727. * This (and the ktime_add() below) is the
  728. * correction for exact:
  729. */
  730. orun++;
  731. }
  732. hrtimer_add_expires(timer, interval);
  733. return orun;
  734. }
  735. EXPORT_SYMBOL_GPL(hrtimer_forward);
  736. /*
  737. * enqueue_hrtimer - internal function to (re)start a timer
  738. *
  739. * The timer is inserted in expiry order. Insertion into the
  740. * red black tree is O(log(n)). Must hold the base lock.
  741. *
  742. * Returns 1 when the new timer is the leftmost timer in the tree.
  743. */
  744. static int enqueue_hrtimer(struct hrtimer *timer,
  745. struct hrtimer_clock_base *base)
  746. {
  747. debug_activate(timer);
  748. timerqueue_add(&base->active, &timer->node);
  749. base->cpu_base->active_bases |= 1 << base->index;
  750. /*
  751. * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
  752. * state of a possibly running callback.
  753. */
  754. timer->state |= HRTIMER_STATE_ENQUEUED;
  755. return (&timer->node == base->active.next);
  756. }
  757. /*
  758. * __remove_hrtimer - internal function to remove a timer
  759. *
  760. * Caller must hold the base lock.
  761. *
  762. * High resolution timer mode reprograms the clock event device when the
  763. * timer is the one which expires next. The caller can disable this by setting
  764. * reprogram to zero. This is useful, when the context does a reprogramming
  765. * anyway (e.g. timer interrupt)
  766. */
  767. static void __remove_hrtimer(struct hrtimer *timer,
  768. struct hrtimer_clock_base *base,
  769. unsigned long newstate, int reprogram)
  770. {
  771. struct timerqueue_node *next_timer;
  772. if (!(timer->state & HRTIMER_STATE_ENQUEUED))
  773. goto out;
  774. next_timer = timerqueue_getnext(&base->active);
  775. timerqueue_del(&base->active, &timer->node);
  776. if (&timer->node == next_timer) {
  777. #ifdef CONFIG_HIGH_RES_TIMERS
  778. /* Reprogram the clock event device. if enabled */
  779. if (reprogram && hrtimer_hres_active()) {
  780. ktime_t expires;
  781. expires = ktime_sub(hrtimer_get_expires(timer),
  782. base->offset);
  783. if (base->cpu_base->expires_next.tv64 == expires.tv64)
  784. hrtimer_force_reprogram(base->cpu_base, 1);
  785. }
  786. #endif
  787. }
  788. if (!timerqueue_getnext(&base->active))
  789. base->cpu_base->active_bases &= ~(1 << base->index);
  790. out:
  791. timer->state = newstate;
  792. }
  793. /*
  794. * remove hrtimer, called with base lock held
  795. */
  796. static inline int
  797. remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
  798. {
  799. if (hrtimer_is_queued(timer)) {
  800. unsigned long state;
  801. int reprogram;
  802. /*
  803. * Remove the timer and force reprogramming when high
  804. * resolution mode is active and the timer is on the current
  805. * CPU. If we remove a timer on another CPU, reprogramming is
  806. * skipped. The interrupt event on this CPU is fired and
  807. * reprogramming happens in the interrupt handler. This is a
  808. * rare case and less expensive than a smp call.
  809. */
  810. debug_deactivate(timer);
  811. timer_stats_hrtimer_clear_start_info(timer);
  812. reprogram = base->cpu_base == &__get_cpu_var(hrtimer_bases);
  813. /*
  814. * We must preserve the CALLBACK state flag here,
  815. * otherwise we could move the timer base in
  816. * switch_hrtimer_base.
  817. */
  818. state = timer->state & HRTIMER_STATE_CALLBACK;
  819. __remove_hrtimer(timer, base, state, reprogram);
  820. return 1;
  821. }
  822. return 0;
  823. }
  824. int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  825. unsigned long delta_ns, const enum hrtimer_mode mode,
  826. int wakeup)
  827. {
  828. struct hrtimer_clock_base *base, *new_base;
  829. unsigned long flags;
  830. int ret, leftmost;
  831. base = lock_hrtimer_base(timer, &flags);
  832. /* Remove an active timer from the queue: */
  833. ret = remove_hrtimer(timer, base);
  834. /* Switch the timer base, if necessary: */
  835. new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED);
  836. if (mode & HRTIMER_MODE_REL) {
  837. tim = ktime_add_safe(tim, new_base->get_time());
  838. /*
  839. * CONFIG_TIME_LOW_RES is a temporary way for architectures
  840. * to signal that they simply return xtime in
  841. * do_gettimeoffset(). In this case we want to round up by
  842. * resolution when starting a relative timer, to avoid short
  843. * timeouts. This will go away with the GTOD framework.
  844. */
  845. #ifdef CONFIG_TIME_LOW_RES
  846. tim = ktime_add_safe(tim, base->resolution);
  847. #endif
  848. }
  849. hrtimer_set_expires_range_ns(timer, tim, delta_ns);
  850. timer_stats_hrtimer_set_start_info(timer);
  851. leftmost = enqueue_hrtimer(timer, new_base);
  852. /*
  853. * Only allow reprogramming if the new base is on this CPU.
  854. * (it might still be on another CPU if the timer was pending)
  855. *
  856. * XXX send_remote_softirq() ?
  857. */
  858. if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases))
  859. hrtimer_enqueue_reprogram(timer, new_base, wakeup);
  860. unlock_hrtimer_base(timer, &flags);
  861. return ret;
  862. }
  863. /**
  864. * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
  865. * @timer: the timer to be added
  866. * @tim: expiry time
  867. * @delta_ns: "slack" range for the timer
  868. * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
  869. *
  870. * Returns:
  871. * 0 on success
  872. * 1 when the timer was active
  873. */
  874. int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
  875. unsigned long delta_ns, const enum hrtimer_mode mode)
  876. {
  877. return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
  878. }
  879. EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
  880. /**
  881. * hrtimer_start - (re)start an hrtimer on the current CPU
  882. * @timer: the timer to be added
  883. * @tim: expiry time
  884. * @mode: expiry mode: absolute (HRTIMER_ABS) or relative (HRTIMER_REL)
  885. *
  886. * Returns:
  887. * 0 on success
  888. * 1 when the timer was active
  889. */
  890. int
  891. hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
  892. {
  893. return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
  894. }
  895. EXPORT_SYMBOL_GPL(hrtimer_start);
  896. /**
  897. * hrtimer_try_to_cancel - try to deactivate a timer
  898. * @timer: hrtimer to stop
  899. *
  900. * Returns:
  901. * 0 when the timer was not active
  902. * 1 when the timer was active
  903. * -1 when the timer is currently excuting the callback function and
  904. * cannot be stopped
  905. */
  906. int hrtimer_try_to_cancel(struct hrtimer *timer)
  907. {
  908. struct hrtimer_clock_base *base;
  909. unsigned long flags;
  910. int ret = -1;
  911. base = lock_hrtimer_base(timer, &flags);
  912. if (!hrtimer_callback_running(timer))
  913. ret = remove_hrtimer(timer, base);
  914. unlock_hrtimer_base(timer, &flags);
  915. return ret;
  916. }
  917. EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
  918. /**
  919. * hrtimer_cancel - cancel a timer and wait for the handler to finish.
  920. * @timer: the timer to be cancelled
  921. *
  922. * Returns:
  923. * 0 when the timer was not active
  924. * 1 when the timer was active
  925. */
  926. int hrtimer_cancel(struct hrtimer *timer)
  927. {
  928. for (;;) {
  929. int ret = hrtimer_try_to_cancel(timer);
  930. if (ret >= 0)
  931. return ret;
  932. cpu_relax();
  933. }
  934. }
  935. EXPORT_SYMBOL_GPL(hrtimer_cancel);
  936. /**
  937. * hrtimer_get_remaining - get remaining time for the timer
  938. * @timer: the timer to read
  939. */
  940. ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
  941. {
  942. unsigned long flags;
  943. ktime_t rem;
  944. lock_hrtimer_base(timer, &flags);
  945. rem = hrtimer_expires_remaining(timer);
  946. unlock_hrtimer_base(timer, &flags);
  947. return rem;
  948. }
  949. EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
  950. #ifdef CONFIG_NO_HZ
  951. /**
  952. * hrtimer_get_next_event - get the time until next expiry event
  953. *
  954. * Returns the delta to the next expiry event or KTIME_MAX if no timer
  955. * is pending.
  956. */
  957. ktime_t hrtimer_get_next_event(void)
  958. {
  959. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  960. struct hrtimer_clock_base *base = cpu_base->clock_base;
  961. ktime_t delta, mindelta = { .tv64 = KTIME_MAX };
  962. unsigned long flags;
  963. int i;
  964. raw_spin_lock_irqsave(&cpu_base->lock, flags);
  965. if (!hrtimer_hres_active()) {
  966. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
  967. struct hrtimer *timer;
  968. struct timerqueue_node *next;
  969. next = timerqueue_getnext(&base->active);
  970. if (!next)
  971. continue;
  972. timer = container_of(next, struct hrtimer, node);
  973. delta.tv64 = hrtimer_get_expires_tv64(timer);
  974. delta = ktime_sub(delta, base->get_time());
  975. if (delta.tv64 < mindelta.tv64)
  976. mindelta.tv64 = delta.tv64;
  977. }
  978. }
  979. raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
  980. if (mindelta.tv64 < 0)
  981. mindelta.tv64 = 0;
  982. return mindelta;
  983. }
  984. #endif
  985. static void __hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  986. enum hrtimer_mode mode)
  987. {
  988. struct hrtimer_cpu_base *cpu_base;
  989. int base;
  990. memset(timer, 0, sizeof(struct hrtimer));
  991. cpu_base = &__raw_get_cpu_var(hrtimer_bases);
  992. if (clock_id == CLOCK_REALTIME && mode != HRTIMER_MODE_ABS)
  993. clock_id = CLOCK_MONOTONIC;
  994. base = hrtimer_clockid_to_base(clock_id);
  995. timer->base = &cpu_base->clock_base[base];
  996. timerqueue_init(&timer->node);
  997. #ifdef CONFIG_TIMER_STATS
  998. timer->start_site = NULL;
  999. timer->start_pid = -1;
  1000. memset(timer->start_comm, 0, TASK_COMM_LEN);
  1001. #endif
  1002. }
  1003. /**
  1004. * hrtimer_init - initialize a timer to the given clock
  1005. * @timer: the timer to be initialized
  1006. * @clock_id: the clock to be used
  1007. * @mode: timer mode abs/rel
  1008. */
  1009. void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
  1010. enum hrtimer_mode mode)
  1011. {
  1012. debug_init(timer, clock_id, mode);
  1013. __hrtimer_init(timer, clock_id, mode);
  1014. }
  1015. EXPORT_SYMBOL_GPL(hrtimer_init);
  1016. /**
  1017. * hrtimer_get_res - get the timer resolution for a clock
  1018. * @which_clock: which clock to query
  1019. * @tp: pointer to timespec variable to store the resolution
  1020. *
  1021. * Store the resolution of the clock selected by @which_clock in the
  1022. * variable pointed to by @tp.
  1023. */
  1024. int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
  1025. {
  1026. struct hrtimer_cpu_base *cpu_base;
  1027. int base = hrtimer_clockid_to_base(which_clock);
  1028. cpu_base = &__raw_get_cpu_var(hrtimer_bases);
  1029. *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
  1030. return 0;
  1031. }
  1032. EXPORT_SYMBOL_GPL(hrtimer_get_res);
  1033. static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
  1034. {
  1035. struct hrtimer_clock_base *base = timer->base;
  1036. struct hrtimer_cpu_base *cpu_base = base->cpu_base;
  1037. enum hrtimer_restart (*fn)(struct hrtimer *);
  1038. int restart;
  1039. WARN_ON(!irqs_disabled());
  1040. debug_deactivate(timer);
  1041. __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
  1042. timer_stats_account_hrtimer(timer);
  1043. fn = timer->function;
  1044. /*
  1045. * Because we run timers from hardirq context, there is no chance
  1046. * they get migrated to another cpu, therefore its safe to unlock
  1047. * the timer base.
  1048. */
  1049. raw_spin_unlock(&cpu_base->lock);
  1050. trace_hrtimer_expire_entry(timer, now);
  1051. restart = fn(timer);
  1052. trace_hrtimer_expire_exit(timer);
  1053. raw_spin_lock(&cpu_base->lock);
  1054. /*
  1055. * Note: We clear the CALLBACK bit after enqueue_hrtimer and
  1056. * we do not reprogramm the event hardware. Happens either in
  1057. * hrtimer_start_range_ns() or in hrtimer_interrupt()
  1058. */
  1059. if (restart != HRTIMER_NORESTART) {
  1060. BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
  1061. enqueue_hrtimer(timer, base);
  1062. }
  1063. WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
  1064. timer->state &= ~HRTIMER_STATE_CALLBACK;
  1065. }
  1066. #ifdef CONFIG_HIGH_RES_TIMERS
  1067. /*
  1068. * High resolution timer interrupt
  1069. * Called with interrupts disabled
  1070. */
  1071. void hrtimer_interrupt(struct clock_event_device *dev)
  1072. {
  1073. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1074. ktime_t expires_next, now, entry_time, delta;
  1075. int i, retries = 0;
  1076. BUG_ON(!cpu_base->hres_active);
  1077. cpu_base->nr_events++;
  1078. dev->next_event.tv64 = KTIME_MAX;
  1079. raw_spin_lock(&cpu_base->lock);
  1080. entry_time = now = hrtimer_update_base(cpu_base);
  1081. retry:
  1082. expires_next.tv64 = KTIME_MAX;
  1083. /*
  1084. * We set expires_next to KTIME_MAX here with cpu_base->lock
  1085. * held to prevent that a timer is enqueued in our queue via
  1086. * the migration code. This does not affect enqueueing of
  1087. * timers which run their callback and need to be requeued on
  1088. * this CPU.
  1089. */
  1090. cpu_base->expires_next.tv64 = KTIME_MAX;
  1091. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1092. struct hrtimer_clock_base *base;
  1093. struct timerqueue_node *node;
  1094. ktime_t basenow;
  1095. if (!(cpu_base->active_bases & (1 << i)))
  1096. continue;
  1097. base = cpu_base->clock_base + i;
  1098. basenow = ktime_add(now, base->offset);
  1099. while ((node = timerqueue_getnext(&base->active))) {
  1100. struct hrtimer *timer;
  1101. timer = container_of(node, struct hrtimer, node);
  1102. /*
  1103. * The immediate goal for using the softexpires is
  1104. * minimizing wakeups, not running timers at the
  1105. * earliest interrupt after their soft expiration.
  1106. * This allows us to avoid using a Priority Search
  1107. * Tree, which can answer a stabbing querry for
  1108. * overlapping intervals and instead use the simple
  1109. * BST we already have.
  1110. * We don't add extra wakeups by delaying timers that
  1111. * are right-of a not yet expired timer, because that
  1112. * timer will have to trigger a wakeup anyway.
  1113. */
  1114. if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) {
  1115. ktime_t expires;
  1116. expires = ktime_sub(hrtimer_get_expires(timer),
  1117. base->offset);
  1118. if (expires.tv64 < expires_next.tv64)
  1119. expires_next = expires;
  1120. break;
  1121. }
  1122. __run_hrtimer(timer, &basenow);
  1123. }
  1124. }
  1125. /*
  1126. * Store the new expiry value so the migration code can verify
  1127. * against it.
  1128. */
  1129. cpu_base->expires_next = expires_next;
  1130. raw_spin_unlock(&cpu_base->lock);
  1131. /* Reprogramming necessary ? */
  1132. if (expires_next.tv64 == KTIME_MAX ||
  1133. !tick_program_event(expires_next, 0)) {
  1134. cpu_base->hang_detected = 0;
  1135. return;
  1136. }
  1137. /*
  1138. * The next timer was already expired due to:
  1139. * - tracing
  1140. * - long lasting callbacks
  1141. * - being scheduled away when running in a VM
  1142. *
  1143. * We need to prevent that we loop forever in the hrtimer
  1144. * interrupt routine. We give it 3 attempts to avoid
  1145. * overreacting on some spurious event.
  1146. *
  1147. * Acquire base lock for updating the offsets and retrieving
  1148. * the current time.
  1149. */
  1150. raw_spin_lock(&cpu_base->lock);
  1151. now = hrtimer_update_base(cpu_base);
  1152. cpu_base->nr_retries++;
  1153. if (++retries < 3)
  1154. goto retry;
  1155. /*
  1156. * Give the system a chance to do something else than looping
  1157. * here. We stored the entry time, so we know exactly how long
  1158. * we spent here. We schedule the next event this amount of
  1159. * time away.
  1160. */
  1161. cpu_base->nr_hangs++;
  1162. cpu_base->hang_detected = 1;
  1163. raw_spin_unlock(&cpu_base->lock);
  1164. delta = ktime_sub(now, entry_time);
  1165. if (delta.tv64 > cpu_base->max_hang_time.tv64)
  1166. cpu_base->max_hang_time = delta;
  1167. /*
  1168. * Limit it to a sensible value as we enforce a longer
  1169. * delay. Give the CPU at least 100ms to catch up.
  1170. */
  1171. if (delta.tv64 > 100 * NSEC_PER_MSEC)
  1172. expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC);
  1173. else
  1174. expires_next = ktime_add(now, delta);
  1175. tick_program_event(expires_next, 1);
  1176. printk_once(KERN_WARNING "hrtimer: interrupt took %llu ns\n",
  1177. ktime_to_ns(delta));
  1178. }
  1179. /*
  1180. * local version of hrtimer_peek_ahead_timers() called with interrupts
  1181. * disabled.
  1182. */
  1183. static void __hrtimer_peek_ahead_timers(void)
  1184. {
  1185. struct tick_device *td;
  1186. if (!hrtimer_hres_active())
  1187. return;
  1188. td = &__get_cpu_var(tick_cpu_device);
  1189. if (td && td->evtdev)
  1190. hrtimer_interrupt(td->evtdev);
  1191. }
  1192. /**
  1193. * hrtimer_peek_ahead_timers -- run soft-expired timers now
  1194. *
  1195. * hrtimer_peek_ahead_timers will peek at the timer queue of
  1196. * the current cpu and check if there are any timers for which
  1197. * the soft expires time has passed. If any such timers exist,
  1198. * they are run immediately and then removed from the timer queue.
  1199. *
  1200. */
  1201. void hrtimer_peek_ahead_timers(void)
  1202. {
  1203. unsigned long flags;
  1204. local_irq_save(flags);
  1205. __hrtimer_peek_ahead_timers();
  1206. local_irq_restore(flags);
  1207. }
  1208. static void run_hrtimer_softirq(struct softirq_action *h)
  1209. {
  1210. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1211. if (cpu_base->clock_was_set) {
  1212. cpu_base->clock_was_set = 0;
  1213. clock_was_set();
  1214. }
  1215. hrtimer_peek_ahead_timers();
  1216. }
  1217. #else /* CONFIG_HIGH_RES_TIMERS */
  1218. static inline void __hrtimer_peek_ahead_timers(void) { }
  1219. #endif /* !CONFIG_HIGH_RES_TIMERS */
  1220. /*
  1221. * Called from timer softirq every jiffy, expire hrtimers:
  1222. *
  1223. * For HRT its the fall back code to run the softirq in the timer
  1224. * softirq context in case the hrtimer initialization failed or has
  1225. * not been done yet.
  1226. */
  1227. void hrtimer_run_pending(void)
  1228. {
  1229. if (hrtimer_hres_active())
  1230. return;
  1231. /*
  1232. * This _is_ ugly: We have to check in the softirq context,
  1233. * whether we can switch to highres and / or nohz mode. The
  1234. * clocksource switch happens in the timer interrupt with
  1235. * xtime_lock held. Notification from there only sets the
  1236. * check bit in the tick_oneshot code, otherwise we might
  1237. * deadlock vs. xtime_lock.
  1238. */
  1239. if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
  1240. hrtimer_switch_to_hres();
  1241. }
  1242. /*
  1243. * Called from hardirq context every jiffy
  1244. */
  1245. void hrtimer_run_queues(void)
  1246. {
  1247. struct timerqueue_node *node;
  1248. struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
  1249. struct hrtimer_clock_base *base;
  1250. int index, gettime = 1;
  1251. if (hrtimer_hres_active())
  1252. return;
  1253. for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
  1254. base = &cpu_base->clock_base[index];
  1255. if (!timerqueue_getnext(&base->active))
  1256. continue;
  1257. if (gettime) {
  1258. hrtimer_get_softirq_time(cpu_base);
  1259. gettime = 0;
  1260. }
  1261. raw_spin_lock(&cpu_base->lock);
  1262. while ((node = timerqueue_getnext(&base->active))) {
  1263. struct hrtimer *timer;
  1264. timer = container_of(node, struct hrtimer, node);
  1265. if (base->softirq_time.tv64 <=
  1266. hrtimer_get_expires_tv64(timer))
  1267. break;
  1268. __run_hrtimer(timer, &base->softirq_time);
  1269. }
  1270. raw_spin_unlock(&cpu_base->lock);
  1271. }
  1272. }
  1273. /*
  1274. * Sleep related functions:
  1275. */
  1276. static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer)
  1277. {
  1278. struct hrtimer_sleeper *t =
  1279. container_of(timer, struct hrtimer_sleeper, timer);
  1280. struct task_struct *task = t->task;
  1281. t->task = NULL;
  1282. if (task)
  1283. wake_up_process(task);
  1284. return HRTIMER_NORESTART;
  1285. }
  1286. void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task)
  1287. {
  1288. sl->timer.function = hrtimer_wakeup;
  1289. sl->task = task;
  1290. }
  1291. EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
  1292. static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode)
  1293. {
  1294. hrtimer_init_sleeper(t, current);
  1295. do {
  1296. set_current_state(TASK_INTERRUPTIBLE);
  1297. hrtimer_start_expires(&t->timer, mode);
  1298. if (!hrtimer_active(&t->timer))
  1299. t->task = NULL;
  1300. if (likely(t->task))
  1301. schedule();
  1302. hrtimer_cancel(&t->timer);
  1303. mode = HRTIMER_MODE_ABS;
  1304. } while (t->task && !signal_pending(current));
  1305. __set_current_state(TASK_RUNNING);
  1306. return t->task == NULL;
  1307. }
  1308. static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp)
  1309. {
  1310. struct timespec rmt;
  1311. ktime_t rem;
  1312. rem = hrtimer_expires_remaining(timer);
  1313. if (rem.tv64 <= 0)
  1314. return 0;
  1315. rmt = ktime_to_timespec(rem);
  1316. if (copy_to_user(rmtp, &rmt, sizeof(*rmtp)))
  1317. return -EFAULT;
  1318. return 1;
  1319. }
  1320. long __sched hrtimer_nanosleep_restart(struct restart_block *restart)
  1321. {
  1322. struct hrtimer_sleeper t;
  1323. struct timespec __user *rmtp;
  1324. int ret = 0;
  1325. hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid,
  1326. HRTIMER_MODE_ABS);
  1327. hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires);
  1328. if (do_nanosleep(&t, HRTIMER_MODE_ABS))
  1329. goto out;
  1330. rmtp = restart->nanosleep.rmtp;
  1331. if (rmtp) {
  1332. ret = update_rmtp(&t.timer, rmtp);
  1333. if (ret <= 0)
  1334. goto out;
  1335. }
  1336. /* The other values in restart are already filled in */
  1337. ret = -ERESTART_RESTARTBLOCK;
  1338. out:
  1339. destroy_hrtimer_on_stack(&t.timer);
  1340. return ret;
  1341. }
  1342. long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp,
  1343. const enum hrtimer_mode mode, const clockid_t clockid)
  1344. {
  1345. struct restart_block *restart;
  1346. struct hrtimer_sleeper t;
  1347. int ret = 0;
  1348. unsigned long slack;
  1349. slack = current->timer_slack_ns;
  1350. if (rt_task(current))
  1351. slack = 0;
  1352. hrtimer_init_on_stack(&t.timer, clockid, mode);
  1353. hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack);
  1354. if (do_nanosleep(&t, mode))
  1355. goto out;
  1356. /* Absolute timers do not update the rmtp value and restart: */
  1357. if (mode == HRTIMER_MODE_ABS) {
  1358. ret = -ERESTARTNOHAND;
  1359. goto out;
  1360. }
  1361. if (rmtp) {
  1362. ret = update_rmtp(&t.timer, rmtp);
  1363. if (ret <= 0)
  1364. goto out;
  1365. }
  1366. restart = &current_thread_info()->restart_block;
  1367. restart->fn = hrtimer_nanosleep_restart;
  1368. restart->nanosleep.clockid = t.timer.base->clockid;
  1369. restart->nanosleep.rmtp = rmtp;
  1370. restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer);
  1371. ret = -ERESTART_RESTARTBLOCK;
  1372. out:
  1373. destroy_hrtimer_on_stack(&t.timer);
  1374. return ret;
  1375. }
  1376. SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
  1377. struct timespec __user *, rmtp)
  1378. {
  1379. struct timespec tu;
  1380. if (copy_from_user(&tu, rqtp, sizeof(tu)))
  1381. return -EFAULT;
  1382. if (!timespec_valid(&tu))
  1383. return -EINVAL;
  1384. return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC);
  1385. }
  1386. /*
  1387. * Functions related to boot-time initialization:
  1388. */
  1389. static void __cpuinit init_hrtimers_cpu(int cpu)
  1390. {
  1391. struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
  1392. int i;
  1393. raw_spin_lock_init(&cpu_base->lock);
  1394. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1395. cpu_base->clock_base[i].cpu_base = cpu_base;
  1396. timerqueue_init_head(&cpu_base->clock_base[i].active);
  1397. }
  1398. hrtimer_init_hres(cpu_base);
  1399. }
  1400. #ifdef CONFIG_HOTPLUG_CPU
  1401. static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
  1402. struct hrtimer_clock_base *new_base)
  1403. {
  1404. struct hrtimer *timer;
  1405. struct timerqueue_node *node;
  1406. while ((node = timerqueue_getnext(&old_base->active))) {
  1407. timer = container_of(node, struct hrtimer, node);
  1408. BUG_ON(hrtimer_callback_running(timer));
  1409. debug_deactivate(timer);
  1410. /*
  1411. * Mark it as STATE_MIGRATE not INACTIVE otherwise the
  1412. * timer could be seen as !active and just vanish away
  1413. * under us on another CPU
  1414. */
  1415. __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
  1416. timer->base = new_base;
  1417. /*
  1418. * Enqueue the timers on the new cpu. This does not
  1419. * reprogram the event device in case the timer
  1420. * expires before the earliest on this CPU, but we run
  1421. * hrtimer_interrupt after we migrated everything to
  1422. * sort out already expired timers and reprogram the
  1423. * event device.
  1424. */
  1425. enqueue_hrtimer(timer, new_base);
  1426. /* Clear the migration state bit */
  1427. timer->state &= ~HRTIMER_STATE_MIGRATE;
  1428. }
  1429. }
  1430. static void migrate_hrtimers(int scpu)
  1431. {
  1432. struct hrtimer_cpu_base *old_base, *new_base;
  1433. int i;
  1434. BUG_ON(cpu_online(scpu));
  1435. tick_cancel_sched_timer(scpu);
  1436. local_irq_disable();
  1437. old_base = &per_cpu(hrtimer_bases, scpu);
  1438. new_base = &__get_cpu_var(hrtimer_bases);
  1439. /*
  1440. * The caller is globally serialized and nobody else
  1441. * takes two locks at once, deadlock is not possible.
  1442. */
  1443. raw_spin_lock(&new_base->lock);
  1444. raw_spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  1445. for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
  1446. migrate_hrtimer_list(&old_base->clock_base[i],
  1447. &new_base->clock_base[i]);
  1448. }
  1449. raw_spin_unlock(&old_base->lock);
  1450. raw_spin_unlock(&new_base->lock);
  1451. /* Check, if we got expired work to do */
  1452. __hrtimer_peek_ahead_timers();
  1453. local_irq_enable();
  1454. }
  1455. #endif /* CONFIG_HOTPLUG_CPU */
  1456. static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
  1457. unsigned long action, void *hcpu)
  1458. {
  1459. int scpu = (long)hcpu;
  1460. switch (action) {
  1461. case CPU_UP_PREPARE:
  1462. case CPU_UP_PREPARE_FROZEN:
  1463. init_hrtimers_cpu(scpu);
  1464. break;
  1465. #ifdef CONFIG_HOTPLUG_CPU
  1466. case CPU_DYING:
  1467. case CPU_DYING_FROZEN:
  1468. clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DYING, &scpu);
  1469. break;
  1470. case CPU_DEAD:
  1471. case CPU_DEAD_FROZEN:
  1472. {
  1473. clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu);
  1474. migrate_hrtimers(scpu);
  1475. break;
  1476. }
  1477. #endif
  1478. default:
  1479. break;
  1480. }
  1481. return NOTIFY_OK;
  1482. }
  1483. static struct notifier_block __cpuinitdata hrtimers_nb = {
  1484. .notifier_call = hrtimer_cpu_notify,
  1485. };
  1486. void __init hrtimers_init(void)
  1487. {
  1488. hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
  1489. (void *)(long)smp_processor_id());
  1490. register_cpu_notifier(&hrtimers_nb);
  1491. #ifdef CONFIG_HIGH_RES_TIMERS
  1492. open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
  1493. #endif
  1494. }
  1495. /**
  1496. * schedule_hrtimeout_range_clock - sleep until timeout
  1497. * @expires: timeout value (ktime_t)
  1498. * @delta: slack in expires timeout (ktime_t)
  1499. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1500. * @clock: timer clock, CLOCK_MONOTONIC or CLOCK_REALTIME
  1501. */
  1502. int __sched
  1503. schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
  1504. const enum hrtimer_mode mode, int clock)
  1505. {
  1506. struct hrtimer_sleeper t;
  1507. /*
  1508. * Optimize when a zero timeout value is given. It does not
  1509. * matter whether this is an absolute or a relative time.
  1510. */
  1511. if (expires && !expires->tv64) {
  1512. __set_current_state(TASK_RUNNING);
  1513. return 0;
  1514. }
  1515. /*
  1516. * A NULL parameter means "infinite"
  1517. */
  1518. if (!expires) {
  1519. schedule();
  1520. __set_current_state(TASK_RUNNING);
  1521. return -EINTR;
  1522. }
  1523. hrtimer_init_on_stack(&t.timer, clock, mode);
  1524. hrtimer_set_expires_range_ns(&t.timer, *expires, delta);
  1525. hrtimer_init_sleeper(&t, current);
  1526. hrtimer_start_expires(&t.timer, mode);
  1527. if (!hrtimer_active(&t.timer))
  1528. t.task = NULL;
  1529. if (likely(t.task))
  1530. schedule();
  1531. hrtimer_cancel(&t.timer);
  1532. destroy_hrtimer_on_stack(&t.timer);
  1533. __set_current_state(TASK_RUNNING);
  1534. return !t.task ? 0 : -EINTR;
  1535. }
  1536. /**
  1537. * schedule_hrtimeout_range - sleep until timeout
  1538. * @expires: timeout value (ktime_t)
  1539. * @delta: slack in expires timeout (ktime_t)
  1540. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1541. *
  1542. * Make the current task sleep until the given expiry time has
  1543. * elapsed. The routine will return immediately unless
  1544. * the current task state has been set (see set_current_state()).
  1545. *
  1546. * The @delta argument gives the kernel the freedom to schedule the
  1547. * actual wakeup to a time that is both power and performance friendly.
  1548. * The kernel give the normal best effort behavior for "@expires+@delta",
  1549. * but may decide to fire the timer earlier, but no earlier than @expires.
  1550. *
  1551. * You can set the task state as follows -
  1552. *
  1553. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  1554. * pass before the routine returns.
  1555. *
  1556. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1557. * delivered to the current task.
  1558. *
  1559. * The current task state is guaranteed to be TASK_RUNNING when this
  1560. * routine returns.
  1561. *
  1562. * Returns 0 when the timer has expired otherwise -EINTR
  1563. */
  1564. int __sched schedule_hrtimeout_range(ktime_t *expires, unsigned long delta,
  1565. const enum hrtimer_mode mode)
  1566. {
  1567. return schedule_hrtimeout_range_clock(expires, delta, mode,
  1568. CLOCK_MONOTONIC);
  1569. }
  1570. EXPORT_SYMBOL_GPL(schedule_hrtimeout_range);
  1571. /**
  1572. * schedule_hrtimeout - sleep until timeout
  1573. * @expires: timeout value (ktime_t)
  1574. * @mode: timer mode, HRTIMER_MODE_ABS or HRTIMER_MODE_REL
  1575. *
  1576. * Make the current task sleep until the given expiry time has
  1577. * elapsed. The routine will return immediately unless
  1578. * the current task state has been set (see set_current_state()).
  1579. *
  1580. * You can set the task state as follows -
  1581. *
  1582. * %TASK_UNINTERRUPTIBLE - at least @timeout time is guaranteed to
  1583. * pass before the routine returns.
  1584. *
  1585. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1586. * delivered to the current task.
  1587. *
  1588. * The current task state is guaranteed to be TASK_RUNNING when this
  1589. * routine returns.
  1590. *
  1591. * Returns 0 when the timer has expired otherwise -EINTR
  1592. */
  1593. int __sched schedule_hrtimeout(ktime_t *expires,
  1594. const enum hrtimer_mode mode)
  1595. {
  1596. return schedule_hrtimeout_range(expires, 0, mode);
  1597. }
  1598. EXPORT_SYMBOL_GPL(schedule_hrtimeout);