timer.c 50 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846
  1. /*
  2. * linux/kernel/timer.c
  3. *
  4. * Kernel internal timers, basic process system calls
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. *
  8. * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
  9. *
  10. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13. * serialize accesses to xtime/lost_ticks).
  14. * Copyright (C) 1998 Andrea Arcangeli
  15. * 1999-03-10 Improved NTP compatibility by Ulrich Windl
  16. * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
  17. * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
  18. * Copyright (C) 2000, 2001, 2002 Ingo Molnar
  19. * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20. */
  21. #include <linux/kernel_stat.h>
  22. #include <linux/export.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/percpu.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/pid_namespace.h>
  29. #include <linux/notifier.h>
  30. #include <linux/thread_info.h>
  31. #include <linux/time.h>
  32. #include <linux/jiffies.h>
  33. #include <linux/posix-timers.h>
  34. #include <linux/cpu.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/delay.h>
  37. #include <linux/tick.h>
  38. #include <linux/kallsyms.h>
  39. #include <linux/irq_work.h>
  40. #include <linux/sched.h>
  41. #include <linux/sched/sysctl.h>
  42. #include <linux/slab.h>
  43. #include <asm/uaccess.h>
  44. #include <asm/unistd.h>
  45. #include <asm/div64.h>
  46. #include <asm/timex.h>
  47. #include <asm/io.h>
  48. #define CREATE_TRACE_POINTS
  49. #include <trace/events/timer.h>
  50. u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  51. EXPORT_SYMBOL(jiffies_64);
  52. /*
  53. * per-CPU timer vector definitions:
  54. */
  55. #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  56. #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  57. #define TVN_SIZE (1 << TVN_BITS)
  58. #define TVR_SIZE (1 << TVR_BITS)
  59. #define TVN_MASK (TVN_SIZE - 1)
  60. #define TVR_MASK (TVR_SIZE - 1)
  61. #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
  62. struct tvec {
  63. struct list_head vec[TVN_SIZE];
  64. };
  65. struct tvec_root {
  66. struct list_head vec[TVR_SIZE];
  67. };
  68. struct tvec_base {
  69. spinlock_t lock;
  70. struct timer_list *running_timer;
  71. unsigned long timer_jiffies;
  72. unsigned long next_timer;
  73. unsigned long active_timers;
  74. struct tvec_root tv1;
  75. struct tvec tv2;
  76. struct tvec tv3;
  77. struct tvec tv4;
  78. struct tvec tv5;
  79. } ____cacheline_aligned;
  80. struct tvec_base boot_tvec_bases;
  81. EXPORT_SYMBOL(boot_tvec_bases);
  82. static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
  83. /* Functions below help us manage 'deferrable' flag */
  84. static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
  85. {
  86. return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
  87. }
  88. static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
  89. {
  90. return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
  91. }
  92. static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
  93. {
  94. return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
  95. }
  96. static inline void
  97. timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
  98. {
  99. unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
  100. timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
  101. }
  102. static unsigned long round_jiffies_common(unsigned long j, int cpu,
  103. bool force_up)
  104. {
  105. int rem;
  106. unsigned long original = j;
  107. /*
  108. * We don't want all cpus firing their timers at once hitting the
  109. * same lock or cachelines, so we skew each extra cpu with an extra
  110. * 3 jiffies. This 3 jiffies came originally from the mm/ code which
  111. * already did this.
  112. * The skew is done by adding 3*cpunr, then round, then subtract this
  113. * extra offset again.
  114. */
  115. j += cpu * 3;
  116. rem = j % HZ;
  117. /*
  118. * If the target jiffie is just after a whole second (which can happen
  119. * due to delays of the timer irq, long irq off times etc etc) then
  120. * we should round down to the whole second, not up. Use 1/4th second
  121. * as cutoff for this rounding as an extreme upper bound for this.
  122. * But never round down if @force_up is set.
  123. */
  124. if (rem < HZ/4 && !force_up) /* round down */
  125. j = j - rem;
  126. else /* round up */
  127. j = j - rem + HZ;
  128. /* now that we have rounded, subtract the extra skew again */
  129. j -= cpu * 3;
  130. if (j <= jiffies) /* rounding ate our timeout entirely; */
  131. return original;
  132. return j;
  133. }
  134. /**
  135. * __round_jiffies - function to round jiffies to a full second
  136. * @j: the time in (absolute) jiffies that should be rounded
  137. * @cpu: the processor number on which the timeout will happen
  138. *
  139. * __round_jiffies() rounds an absolute time in the future (in jiffies)
  140. * up or down to (approximately) full seconds. This is useful for timers
  141. * for which the exact time they fire does not matter too much, as long as
  142. * they fire approximately every X seconds.
  143. *
  144. * By rounding these timers to whole seconds, all such timers will fire
  145. * at the same time, rather than at various times spread out. The goal
  146. * of this is to have the CPU wake up less, which saves power.
  147. *
  148. * The exact rounding is skewed for each processor to avoid all
  149. * processors firing at the exact same time, which could lead
  150. * to lock contention or spurious cache line bouncing.
  151. *
  152. * The return value is the rounded version of the @j parameter.
  153. */
  154. unsigned long __round_jiffies(unsigned long j, int cpu)
  155. {
  156. return round_jiffies_common(j, cpu, false);
  157. }
  158. EXPORT_SYMBOL_GPL(__round_jiffies);
  159. /**
  160. * __round_jiffies_relative - function to round jiffies to a full second
  161. * @j: the time in (relative) jiffies that should be rounded
  162. * @cpu: the processor number on which the timeout will happen
  163. *
  164. * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
  165. * up or down to (approximately) full seconds. This is useful for timers
  166. * for which the exact time they fire does not matter too much, as long as
  167. * they fire approximately every X seconds.
  168. *
  169. * By rounding these timers to whole seconds, all such timers will fire
  170. * at the same time, rather than at various times spread out. The goal
  171. * of this is to have the CPU wake up less, which saves power.
  172. *
  173. * The exact rounding is skewed for each processor to avoid all
  174. * processors firing at the exact same time, which could lead
  175. * to lock contention or spurious cache line bouncing.
  176. *
  177. * The return value is the rounded version of the @j parameter.
  178. */
  179. unsigned long __round_jiffies_relative(unsigned long j, int cpu)
  180. {
  181. unsigned long j0 = jiffies;
  182. /* Use j0 because jiffies might change while we run */
  183. return round_jiffies_common(j + j0, cpu, false) - j0;
  184. }
  185. EXPORT_SYMBOL_GPL(__round_jiffies_relative);
  186. /**
  187. * round_jiffies - function to round jiffies to a full second
  188. * @j: the time in (absolute) jiffies that should be rounded
  189. *
  190. * round_jiffies() rounds an absolute time in the future (in jiffies)
  191. * up or down to (approximately) full seconds. This is useful for timers
  192. * for which the exact time they fire does not matter too much, as long as
  193. * they fire approximately every X seconds.
  194. *
  195. * By rounding these timers to whole seconds, all such timers will fire
  196. * at the same time, rather than at various times spread out. The goal
  197. * of this is to have the CPU wake up less, which saves power.
  198. *
  199. * The return value is the rounded version of the @j parameter.
  200. */
  201. unsigned long round_jiffies(unsigned long j)
  202. {
  203. return round_jiffies_common(j, raw_smp_processor_id(), false);
  204. }
  205. EXPORT_SYMBOL_GPL(round_jiffies);
  206. /**
  207. * round_jiffies_relative - function to round jiffies to a full second
  208. * @j: the time in (relative) jiffies that should be rounded
  209. *
  210. * round_jiffies_relative() rounds a time delta in the future (in jiffies)
  211. * up or down to (approximately) full seconds. This is useful for timers
  212. * for which the exact time they fire does not matter too much, as long as
  213. * they fire approximately every X seconds.
  214. *
  215. * By rounding these timers to whole seconds, all such timers will fire
  216. * at the same time, rather than at various times spread out. The goal
  217. * of this is to have the CPU wake up less, which saves power.
  218. *
  219. * The return value is the rounded version of the @j parameter.
  220. */
  221. unsigned long round_jiffies_relative(unsigned long j)
  222. {
  223. return __round_jiffies_relative(j, raw_smp_processor_id());
  224. }
  225. EXPORT_SYMBOL_GPL(round_jiffies_relative);
  226. /**
  227. * __round_jiffies_up - function to round jiffies up to a full second
  228. * @j: the time in (absolute) jiffies that should be rounded
  229. * @cpu: the processor number on which the timeout will happen
  230. *
  231. * This is the same as __round_jiffies() except that it will never
  232. * round down. This is useful for timeouts for which the exact time
  233. * of firing does not matter too much, as long as they don't fire too
  234. * early.
  235. */
  236. unsigned long __round_jiffies_up(unsigned long j, int cpu)
  237. {
  238. return round_jiffies_common(j, cpu, true);
  239. }
  240. EXPORT_SYMBOL_GPL(__round_jiffies_up);
  241. /**
  242. * __round_jiffies_up_relative - function to round jiffies up to a full second
  243. * @j: the time in (relative) jiffies that should be rounded
  244. * @cpu: the processor number on which the timeout will happen
  245. *
  246. * This is the same as __round_jiffies_relative() except that it will never
  247. * round down. This is useful for timeouts for which the exact time
  248. * of firing does not matter too much, as long as they don't fire too
  249. * early.
  250. */
  251. unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
  252. {
  253. unsigned long j0 = jiffies;
  254. /* Use j0 because jiffies might change while we run */
  255. return round_jiffies_common(j + j0, cpu, true) - j0;
  256. }
  257. EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
  258. /**
  259. * round_jiffies_up - function to round jiffies up to a full second
  260. * @j: the time in (absolute) jiffies that should be rounded
  261. *
  262. * This is the same as round_jiffies() except that it will never
  263. * round down. This is useful for timeouts for which the exact time
  264. * of firing does not matter too much, as long as they don't fire too
  265. * early.
  266. */
  267. unsigned long round_jiffies_up(unsigned long j)
  268. {
  269. return round_jiffies_common(j, raw_smp_processor_id(), true);
  270. }
  271. EXPORT_SYMBOL_GPL(round_jiffies_up);
  272. /**
  273. * round_jiffies_up_relative - function to round jiffies up to a full second
  274. * @j: the time in (relative) jiffies that should be rounded
  275. *
  276. * This is the same as round_jiffies_relative() except that it will never
  277. * round down. This is useful for timeouts for which the exact time
  278. * of firing does not matter too much, as long as they don't fire too
  279. * early.
  280. */
  281. unsigned long round_jiffies_up_relative(unsigned long j)
  282. {
  283. return __round_jiffies_up_relative(j, raw_smp_processor_id());
  284. }
  285. EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
  286. /**
  287. * set_timer_slack - set the allowed slack for a timer
  288. * @timer: the timer to be modified
  289. * @slack_hz: the amount of time (in jiffies) allowed for rounding
  290. *
  291. * Set the amount of time, in jiffies, that a certain timer has
  292. * in terms of slack. By setting this value, the timer subsystem
  293. * will schedule the actual timer somewhere between
  294. * the time mod_timer() asks for, and that time plus the slack.
  295. *
  296. * By setting the slack to -1, a percentage of the delay is used
  297. * instead.
  298. */
  299. void set_timer_slack(struct timer_list *timer, int slack_hz)
  300. {
  301. timer->slack = slack_hz;
  302. }
  303. EXPORT_SYMBOL_GPL(set_timer_slack);
  304. static void
  305. __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
  306. {
  307. unsigned long expires = timer->expires;
  308. unsigned long idx = expires - base->timer_jiffies;
  309. struct list_head *vec;
  310. if (idx < TVR_SIZE) {
  311. int i = expires & TVR_MASK;
  312. vec = base->tv1.vec + i;
  313. } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  314. int i = (expires >> TVR_BITS) & TVN_MASK;
  315. vec = base->tv2.vec + i;
  316. } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  317. int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  318. vec = base->tv3.vec + i;
  319. } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  320. int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  321. vec = base->tv4.vec + i;
  322. } else if ((signed long) idx < 0) {
  323. /*
  324. * Can happen if you add a timer with expires == jiffies,
  325. * or you set a timer to go off in the past
  326. */
  327. vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
  328. } else {
  329. int i;
  330. /* If the timeout is larger than MAX_TVAL (on 64-bit
  331. * architectures or with CONFIG_BASE_SMALL=1) then we
  332. * use the maximum timeout.
  333. */
  334. if (idx > MAX_TVAL) {
  335. idx = MAX_TVAL;
  336. expires = idx + base->timer_jiffies;
  337. }
  338. i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  339. vec = base->tv5.vec + i;
  340. }
  341. /*
  342. * Timers are FIFO:
  343. */
  344. list_add_tail(&timer->entry, vec);
  345. }
  346. static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
  347. {
  348. __internal_add_timer(base, timer);
  349. /*
  350. * Update base->active_timers and base->next_timer
  351. */
  352. if (!tbase_get_deferrable(timer->base)) {
  353. if (time_before(timer->expires, base->next_timer))
  354. base->next_timer = timer->expires;
  355. base->active_timers++;
  356. }
  357. }
  358. #ifdef CONFIG_TIMER_STATS
  359. void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
  360. {
  361. if (timer->start_site)
  362. return;
  363. timer->start_site = addr;
  364. memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
  365. timer->start_pid = current->pid;
  366. }
  367. static void timer_stats_account_timer(struct timer_list *timer)
  368. {
  369. unsigned int flag = 0;
  370. if (likely(!timer->start_site))
  371. return;
  372. if (unlikely(tbase_get_deferrable(timer->base)))
  373. flag |= TIMER_STATS_FLAG_DEFERRABLE;
  374. timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
  375. timer->function, timer->start_comm, flag);
  376. }
  377. #else
  378. static void timer_stats_account_timer(struct timer_list *timer) {}
  379. #endif
  380. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  381. static struct debug_obj_descr timer_debug_descr;
  382. static void *timer_debug_hint(void *addr)
  383. {
  384. return ((struct timer_list *) addr)->function;
  385. }
  386. /*
  387. * fixup_init is called when:
  388. * - an active object is initialized
  389. */
  390. static int timer_fixup_init(void *addr, enum debug_obj_state state)
  391. {
  392. struct timer_list *timer = addr;
  393. switch (state) {
  394. case ODEBUG_STATE_ACTIVE:
  395. del_timer_sync(timer);
  396. debug_object_init(timer, &timer_debug_descr);
  397. return 1;
  398. default:
  399. return 0;
  400. }
  401. }
  402. /* Stub timer callback for improperly used timers. */
  403. static void stub_timer(unsigned long data)
  404. {
  405. WARN_ON(1);
  406. }
  407. /*
  408. * fixup_activate is called when:
  409. * - an active object is activated
  410. * - an unknown object is activated (might be a statically initialized object)
  411. */
  412. static int timer_fixup_activate(void *addr, enum debug_obj_state state)
  413. {
  414. struct timer_list *timer = addr;
  415. switch (state) {
  416. case ODEBUG_STATE_NOTAVAILABLE:
  417. /*
  418. * This is not really a fixup. The timer was
  419. * statically initialized. We just make sure that it
  420. * is tracked in the object tracker.
  421. */
  422. if (timer->entry.next == NULL &&
  423. timer->entry.prev == TIMER_ENTRY_STATIC) {
  424. debug_object_init(timer, &timer_debug_descr);
  425. debug_object_activate(timer, &timer_debug_descr);
  426. return 0;
  427. } else {
  428. setup_timer(timer, stub_timer, 0);
  429. return 1;
  430. }
  431. return 0;
  432. case ODEBUG_STATE_ACTIVE:
  433. WARN_ON(1);
  434. default:
  435. return 0;
  436. }
  437. }
  438. /*
  439. * fixup_free is called when:
  440. * - an active object is freed
  441. */
  442. static int timer_fixup_free(void *addr, enum debug_obj_state state)
  443. {
  444. struct timer_list *timer = addr;
  445. switch (state) {
  446. case ODEBUG_STATE_ACTIVE:
  447. del_timer_sync(timer);
  448. debug_object_free(timer, &timer_debug_descr);
  449. return 1;
  450. default:
  451. return 0;
  452. }
  453. }
  454. /*
  455. * fixup_assert_init is called when:
  456. * - an untracked/uninit-ed object is found
  457. */
  458. static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
  459. {
  460. struct timer_list *timer = addr;
  461. switch (state) {
  462. case ODEBUG_STATE_NOTAVAILABLE:
  463. if (timer->entry.prev == TIMER_ENTRY_STATIC) {
  464. /*
  465. * This is not really a fixup. The timer was
  466. * statically initialized. We just make sure that it
  467. * is tracked in the object tracker.
  468. */
  469. debug_object_init(timer, &timer_debug_descr);
  470. return 0;
  471. } else {
  472. setup_timer(timer, stub_timer, 0);
  473. return 1;
  474. }
  475. default:
  476. return 0;
  477. }
  478. }
  479. static struct debug_obj_descr timer_debug_descr = {
  480. .name = "timer_list",
  481. .debug_hint = timer_debug_hint,
  482. .fixup_init = timer_fixup_init,
  483. .fixup_activate = timer_fixup_activate,
  484. .fixup_free = timer_fixup_free,
  485. .fixup_assert_init = timer_fixup_assert_init,
  486. };
  487. static inline void debug_timer_init(struct timer_list *timer)
  488. {
  489. debug_object_init(timer, &timer_debug_descr);
  490. }
  491. static inline void debug_timer_activate(struct timer_list *timer)
  492. {
  493. debug_object_activate(timer, &timer_debug_descr);
  494. }
  495. static inline void debug_timer_deactivate(struct timer_list *timer)
  496. {
  497. debug_object_deactivate(timer, &timer_debug_descr);
  498. }
  499. static inline void debug_timer_free(struct timer_list *timer)
  500. {
  501. debug_object_free(timer, &timer_debug_descr);
  502. }
  503. static inline void debug_timer_assert_init(struct timer_list *timer)
  504. {
  505. debug_object_assert_init(timer, &timer_debug_descr);
  506. }
  507. static void do_init_timer(struct timer_list *timer, unsigned int flags,
  508. const char *name, struct lock_class_key *key);
  509. void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
  510. const char *name, struct lock_class_key *key)
  511. {
  512. debug_object_init_on_stack(timer, &timer_debug_descr);
  513. do_init_timer(timer, flags, name, key);
  514. }
  515. EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
  516. void destroy_timer_on_stack(struct timer_list *timer)
  517. {
  518. debug_object_free(timer, &timer_debug_descr);
  519. }
  520. EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
  521. #else
  522. static inline void debug_timer_init(struct timer_list *timer) { }
  523. static inline void debug_timer_activate(struct timer_list *timer) { }
  524. static inline void debug_timer_deactivate(struct timer_list *timer) { }
  525. static inline void debug_timer_assert_init(struct timer_list *timer) { }
  526. #endif
  527. static inline void debug_init(struct timer_list *timer)
  528. {
  529. debug_timer_init(timer);
  530. trace_timer_init(timer);
  531. }
  532. static inline void
  533. debug_activate(struct timer_list *timer, unsigned long expires)
  534. {
  535. debug_timer_activate(timer);
  536. trace_timer_start(timer, expires);
  537. }
  538. static inline void debug_deactivate(struct timer_list *timer)
  539. {
  540. debug_timer_deactivate(timer);
  541. trace_timer_cancel(timer);
  542. }
  543. static inline void debug_assert_init(struct timer_list *timer)
  544. {
  545. debug_timer_assert_init(timer);
  546. }
  547. static void do_init_timer(struct timer_list *timer, unsigned int flags,
  548. const char *name, struct lock_class_key *key)
  549. {
  550. struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
  551. timer->entry.next = NULL;
  552. timer->base = (void *)((unsigned long)base | flags);
  553. timer->slack = -1;
  554. #ifdef CONFIG_TIMER_STATS
  555. timer->start_site = NULL;
  556. timer->start_pid = -1;
  557. memset(timer->start_comm, 0, TASK_COMM_LEN);
  558. #endif
  559. lockdep_init_map(&timer->lockdep_map, name, key, 0);
  560. }
  561. /**
  562. * init_timer_key - initialize a timer
  563. * @timer: the timer to be initialized
  564. * @flags: timer flags
  565. * @name: name of the timer
  566. * @key: lockdep class key of the fake lock used for tracking timer
  567. * sync lock dependencies
  568. *
  569. * init_timer_key() must be done to a timer prior calling *any* of the
  570. * other timer functions.
  571. */
  572. void init_timer_key(struct timer_list *timer, unsigned int flags,
  573. const char *name, struct lock_class_key *key)
  574. {
  575. debug_init(timer);
  576. do_init_timer(timer, flags, name, key);
  577. }
  578. EXPORT_SYMBOL(init_timer_key);
  579. static inline void detach_timer(struct timer_list *timer, bool clear_pending)
  580. {
  581. struct list_head *entry = &timer->entry;
  582. debug_deactivate(timer);
  583. __list_del(entry->prev, entry->next);
  584. if (clear_pending)
  585. entry->next = NULL;
  586. entry->prev = LIST_POISON2;
  587. }
  588. static inline void
  589. detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
  590. {
  591. detach_timer(timer, true);
  592. if (!tbase_get_deferrable(timer->base))
  593. base->active_timers--;
  594. }
  595. static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
  596. bool clear_pending)
  597. {
  598. if (!timer_pending(timer))
  599. return 0;
  600. detach_timer(timer, clear_pending);
  601. if (!tbase_get_deferrable(timer->base)) {
  602. base->active_timers--;
  603. if (timer->expires == base->next_timer)
  604. base->next_timer = base->timer_jiffies;
  605. }
  606. return 1;
  607. }
  608. /*
  609. * We are using hashed locking: holding per_cpu(tvec_bases).lock
  610. * means that all timers which are tied to this base via timer->base are
  611. * locked, and the base itself is locked too.
  612. *
  613. * So __run_timers/migrate_timers can safely modify all timers which could
  614. * be found on ->tvX lists.
  615. *
  616. * When the timer's base is locked, and the timer removed from list, it is
  617. * possible to set timer->base = NULL and drop the lock: the timer remains
  618. * locked.
  619. */
  620. static struct tvec_base *lock_timer_base(struct timer_list *timer,
  621. unsigned long *flags)
  622. __acquires(timer->base->lock)
  623. {
  624. struct tvec_base *base;
  625. for (;;) {
  626. struct tvec_base *prelock_base = timer->base;
  627. base = tbase_get_base(prelock_base);
  628. if (likely(base != NULL)) {
  629. spin_lock_irqsave(&base->lock, *flags);
  630. if (likely(prelock_base == timer->base))
  631. return base;
  632. /* The timer has migrated to another CPU */
  633. spin_unlock_irqrestore(&base->lock, *flags);
  634. }
  635. cpu_relax();
  636. }
  637. }
  638. static inline int
  639. __mod_timer(struct timer_list *timer, unsigned long expires,
  640. bool pending_only, int pinned)
  641. {
  642. struct tvec_base *base, *new_base;
  643. unsigned long flags;
  644. int ret = 0 , cpu;
  645. timer_stats_timer_set_start_info(timer);
  646. BUG_ON(!timer->function);
  647. base = lock_timer_base(timer, &flags);
  648. ret = detach_if_pending(timer, base, false);
  649. if (!ret && pending_only)
  650. goto out_unlock;
  651. debug_activate(timer, expires);
  652. cpu = smp_processor_id();
  653. #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
  654. if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
  655. cpu = get_nohz_timer_target();
  656. #endif
  657. new_base = per_cpu(tvec_bases, cpu);
  658. if (base != new_base) {
  659. /*
  660. * We are trying to schedule the timer on the local CPU.
  661. * However we can't change timer's base while it is running,
  662. * otherwise del_timer_sync() can't detect that the timer's
  663. * handler yet has not finished. This also guarantees that
  664. * the timer is serialized wrt itself.
  665. */
  666. if (likely(base->running_timer != timer)) {
  667. /* See the comment in lock_timer_base() */
  668. timer_set_base(timer, NULL);
  669. spin_unlock(&base->lock);
  670. base = new_base;
  671. spin_lock(&base->lock);
  672. timer_set_base(timer, base);
  673. }
  674. }
  675. timer->expires = expires;
  676. internal_add_timer(base, timer);
  677. out_unlock:
  678. spin_unlock_irqrestore(&base->lock, flags);
  679. return ret;
  680. }
  681. /**
  682. * mod_timer_pending - modify a pending timer's timeout
  683. * @timer: the pending timer to be modified
  684. * @expires: new timeout in jiffies
  685. *
  686. * mod_timer_pending() is the same for pending timers as mod_timer(),
  687. * but will not re-activate and modify already deleted timers.
  688. *
  689. * It is useful for unserialized use of timers.
  690. */
  691. int mod_timer_pending(struct timer_list *timer, unsigned long expires)
  692. {
  693. return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
  694. }
  695. EXPORT_SYMBOL(mod_timer_pending);
  696. /*
  697. * Decide where to put the timer while taking the slack into account
  698. *
  699. * Algorithm:
  700. * 1) calculate the maximum (absolute) time
  701. * 2) calculate the highest bit where the expires and new max are different
  702. * 3) use this bit to make a mask
  703. * 4) use the bitmask to round down the maximum time, so that all last
  704. * bits are zeros
  705. */
  706. static inline
  707. unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
  708. {
  709. unsigned long expires_limit, mask;
  710. int bit;
  711. if (timer->slack >= 0) {
  712. expires_limit = expires + timer->slack;
  713. } else {
  714. long delta = expires - jiffies;
  715. if (delta < 256)
  716. return expires;
  717. expires_limit = expires + delta / 256;
  718. }
  719. mask = expires ^ expires_limit;
  720. if (mask == 0)
  721. return expires;
  722. bit = find_last_bit(&mask, BITS_PER_LONG);
  723. mask = (1 << bit) - 1;
  724. expires_limit = expires_limit & ~(mask);
  725. return expires_limit;
  726. }
  727. /**
  728. * mod_timer - modify a timer's timeout
  729. * @timer: the timer to be modified
  730. * @expires: new timeout in jiffies
  731. *
  732. * mod_timer() is a more efficient way to update the expire field of an
  733. * active timer (if the timer is inactive it will be activated)
  734. *
  735. * mod_timer(timer, expires) is equivalent to:
  736. *
  737. * del_timer(timer); timer->expires = expires; add_timer(timer);
  738. *
  739. * Note that if there are multiple unserialized concurrent users of the
  740. * same timer, then mod_timer() is the only safe way to modify the timeout,
  741. * since add_timer() cannot modify an already running timer.
  742. *
  743. * The function returns whether it has modified a pending timer or not.
  744. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
  745. * active timer returns 1.)
  746. */
  747. int mod_timer(struct timer_list *timer, unsigned long expires)
  748. {
  749. expires = apply_slack(timer, expires);
  750. /*
  751. * This is a common optimization triggered by the
  752. * networking code - if the timer is re-modified
  753. * to be the same thing then just return:
  754. */
  755. if (timer_pending(timer) && timer->expires == expires)
  756. return 1;
  757. return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
  758. }
  759. EXPORT_SYMBOL(mod_timer);
  760. /**
  761. * mod_timer_pinned - modify a timer's timeout
  762. * @timer: the timer to be modified
  763. * @expires: new timeout in jiffies
  764. *
  765. * mod_timer_pinned() is a way to update the expire field of an
  766. * active timer (if the timer is inactive it will be activated)
  767. * and to ensure that the timer is scheduled on the current CPU.
  768. *
  769. * Note that this does not prevent the timer from being migrated
  770. * when the current CPU goes offline. If this is a problem for
  771. * you, use CPU-hotplug notifiers to handle it correctly, for
  772. * example, cancelling the timer when the corresponding CPU goes
  773. * offline.
  774. *
  775. * mod_timer_pinned(timer, expires) is equivalent to:
  776. *
  777. * del_timer(timer); timer->expires = expires; add_timer(timer);
  778. */
  779. int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
  780. {
  781. if (timer->expires == expires && timer_pending(timer))
  782. return 1;
  783. return __mod_timer(timer, expires, false, TIMER_PINNED);
  784. }
  785. EXPORT_SYMBOL(mod_timer_pinned);
  786. /**
  787. * add_timer - start a timer
  788. * @timer: the timer to be added
  789. *
  790. * The kernel will do a ->function(->data) callback from the
  791. * timer interrupt at the ->expires point in the future. The
  792. * current time is 'jiffies'.
  793. *
  794. * The timer's ->expires, ->function (and if the handler uses it, ->data)
  795. * fields must be set prior calling this function.
  796. *
  797. * Timers with an ->expires field in the past will be executed in the next
  798. * timer tick.
  799. */
  800. void add_timer(struct timer_list *timer)
  801. {
  802. BUG_ON(timer_pending(timer));
  803. mod_timer(timer, timer->expires);
  804. }
  805. EXPORT_SYMBOL(add_timer);
  806. /**
  807. * add_timer_on - start a timer on a particular CPU
  808. * @timer: the timer to be added
  809. * @cpu: the CPU to start it on
  810. *
  811. * This is not very scalable on SMP. Double adds are not possible.
  812. */
  813. void add_timer_on(struct timer_list *timer, int cpu)
  814. {
  815. struct tvec_base *base = per_cpu(tvec_bases, cpu);
  816. unsigned long flags;
  817. timer_stats_timer_set_start_info(timer);
  818. BUG_ON(timer_pending(timer) || !timer->function);
  819. spin_lock_irqsave(&base->lock, flags);
  820. timer_set_base(timer, base);
  821. debug_activate(timer, timer->expires);
  822. internal_add_timer(base, timer);
  823. /*
  824. * Check whether the other CPU is idle and needs to be
  825. * triggered to reevaluate the timer wheel when nohz is
  826. * active. We are protected against the other CPU fiddling
  827. * with the timer by holding the timer base lock. This also
  828. * makes sure that a CPU on the way to idle can not evaluate
  829. * the timer wheel.
  830. */
  831. wake_up_idle_cpu(cpu);
  832. spin_unlock_irqrestore(&base->lock, flags);
  833. }
  834. EXPORT_SYMBOL_GPL(add_timer_on);
  835. /**
  836. * del_timer - deactive a timer.
  837. * @timer: the timer to be deactivated
  838. *
  839. * del_timer() deactivates a timer - this works on both active and inactive
  840. * timers.
  841. *
  842. * The function returns whether it has deactivated a pending timer or not.
  843. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
  844. * active timer returns 1.)
  845. */
  846. int del_timer(struct timer_list *timer)
  847. {
  848. struct tvec_base *base;
  849. unsigned long flags;
  850. int ret = 0;
  851. debug_assert_init(timer);
  852. timer_stats_timer_clear_start_info(timer);
  853. if (timer_pending(timer)) {
  854. base = lock_timer_base(timer, &flags);
  855. ret = detach_if_pending(timer, base, true);
  856. spin_unlock_irqrestore(&base->lock, flags);
  857. }
  858. return ret;
  859. }
  860. EXPORT_SYMBOL(del_timer);
  861. /**
  862. * try_to_del_timer_sync - Try to deactivate a timer
  863. * @timer: timer do del
  864. *
  865. * This function tries to deactivate a timer. Upon successful (ret >= 0)
  866. * exit the timer is not queued and the handler is not running on any CPU.
  867. */
  868. int try_to_del_timer_sync(struct timer_list *timer)
  869. {
  870. struct tvec_base *base;
  871. unsigned long flags;
  872. int ret = -1;
  873. debug_assert_init(timer);
  874. base = lock_timer_base(timer, &flags);
  875. if (base->running_timer != timer) {
  876. timer_stats_timer_clear_start_info(timer);
  877. ret = detach_if_pending(timer, base, true);
  878. }
  879. spin_unlock_irqrestore(&base->lock, flags);
  880. return ret;
  881. }
  882. EXPORT_SYMBOL(try_to_del_timer_sync);
  883. #ifdef CONFIG_SMP
  884. /**
  885. * del_timer_sync - deactivate a timer and wait for the handler to finish.
  886. * @timer: the timer to be deactivated
  887. *
  888. * This function only differs from del_timer() on SMP: besides deactivating
  889. * the timer it also makes sure the handler has finished executing on other
  890. * CPUs.
  891. *
  892. * Synchronization rules: Callers must prevent restarting of the timer,
  893. * otherwise this function is meaningless. It must not be called from
  894. * interrupt contexts unless the timer is an irqsafe one. The caller must
  895. * not hold locks which would prevent completion of the timer's
  896. * handler. The timer's handler must not call add_timer_on(). Upon exit the
  897. * timer is not queued and the handler is not running on any CPU.
  898. *
  899. * Note: For !irqsafe timers, you must not hold locks that are held in
  900. * interrupt context while calling this function. Even if the lock has
  901. * nothing to do with the timer in question. Here's why:
  902. *
  903. * CPU0 CPU1
  904. * ---- ----
  905. * <SOFTIRQ>
  906. * call_timer_fn();
  907. * base->running_timer = mytimer;
  908. * spin_lock_irq(somelock);
  909. * <IRQ>
  910. * spin_lock(somelock);
  911. * del_timer_sync(mytimer);
  912. * while (base->running_timer == mytimer);
  913. *
  914. * Now del_timer_sync() will never return and never release somelock.
  915. * The interrupt on the other CPU is waiting to grab somelock but
  916. * it has interrupted the softirq that CPU0 is waiting to finish.
  917. *
  918. * The function returns whether it has deactivated a pending timer or not.
  919. */
  920. int del_timer_sync(struct timer_list *timer)
  921. {
  922. #ifdef CONFIG_LOCKDEP
  923. unsigned long flags;
  924. /*
  925. * If lockdep gives a backtrace here, please reference
  926. * the synchronization rules above.
  927. */
  928. local_irq_save(flags);
  929. lock_map_acquire(&timer->lockdep_map);
  930. lock_map_release(&timer->lockdep_map);
  931. local_irq_restore(flags);
  932. #endif
  933. /*
  934. * don't use it in hardirq context, because it
  935. * could lead to deadlock.
  936. */
  937. WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
  938. for (;;) {
  939. int ret = try_to_del_timer_sync(timer);
  940. if (ret >= 0)
  941. return ret;
  942. cpu_relax();
  943. }
  944. }
  945. EXPORT_SYMBOL(del_timer_sync);
  946. #endif
  947. static int cascade(struct tvec_base *base, struct tvec *tv, int index)
  948. {
  949. /* cascade all the timers from tv up one level */
  950. struct timer_list *timer, *tmp;
  951. struct list_head tv_list;
  952. list_replace_init(tv->vec + index, &tv_list);
  953. /*
  954. * We are removing _all_ timers from the list, so we
  955. * don't have to detach them individually.
  956. */
  957. list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
  958. BUG_ON(tbase_get_base(timer->base) != base);
  959. /* No accounting, while moving them */
  960. __internal_add_timer(base, timer);
  961. }
  962. return index;
  963. }
  964. static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
  965. unsigned long data)
  966. {
  967. int preempt_count = preempt_count();
  968. #ifdef CONFIG_LOCKDEP
  969. /*
  970. * It is permissible to free the timer from inside the
  971. * function that is called from it, this we need to take into
  972. * account for lockdep too. To avoid bogus "held lock freed"
  973. * warnings as well as problems when looking into
  974. * timer->lockdep_map, make a copy and use that here.
  975. */
  976. struct lockdep_map lockdep_map;
  977. lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
  978. #endif
  979. /*
  980. * Couple the lock chain with the lock chain at
  981. * del_timer_sync() by acquiring the lock_map around the fn()
  982. * call here and in del_timer_sync().
  983. */
  984. lock_map_acquire(&lockdep_map);
  985. trace_timer_expire_entry(timer);
  986. fn(data);
  987. trace_timer_expire_exit(timer);
  988. lock_map_release(&lockdep_map);
  989. if (preempt_count != preempt_count()) {
  990. WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
  991. fn, preempt_count, preempt_count());
  992. /*
  993. * Restore the preempt count. That gives us a decent
  994. * chance to survive and extract information. If the
  995. * callback kept a lock held, bad luck, but not worse
  996. * than the BUG() we had.
  997. */
  998. preempt_count() = preempt_count;
  999. }
  1000. }
  1001. #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
  1002. /**
  1003. * __run_timers - run all expired timers (if any) on this CPU.
  1004. * @base: the timer vector to be processed.
  1005. *
  1006. * This function cascades all vectors and executes all expired timer
  1007. * vectors.
  1008. */
  1009. static inline void __run_timers(struct tvec_base *base)
  1010. {
  1011. struct timer_list *timer;
  1012. spin_lock_irq(&base->lock);
  1013. while (time_after_eq(jiffies, base->timer_jiffies)) {
  1014. struct list_head work_list;
  1015. struct list_head *head = &work_list;
  1016. int index = base->timer_jiffies & TVR_MASK;
  1017. /*
  1018. * Cascade timers:
  1019. */
  1020. if (!index &&
  1021. (!cascade(base, &base->tv2, INDEX(0))) &&
  1022. (!cascade(base, &base->tv3, INDEX(1))) &&
  1023. !cascade(base, &base->tv4, INDEX(2)))
  1024. cascade(base, &base->tv5, INDEX(3));
  1025. ++base->timer_jiffies;
  1026. list_replace_init(base->tv1.vec + index, &work_list);
  1027. while (!list_empty(head)) {
  1028. void (*fn)(unsigned long);
  1029. unsigned long data;
  1030. bool irqsafe;
  1031. timer = list_first_entry(head, struct timer_list,entry);
  1032. fn = timer->function;
  1033. data = timer->data;
  1034. irqsafe = tbase_get_irqsafe(timer->base);
  1035. timer_stats_account_timer(timer);
  1036. base->running_timer = timer;
  1037. detach_expired_timer(timer, base);
  1038. if (irqsafe) {
  1039. spin_unlock(&base->lock);
  1040. call_timer_fn(timer, fn, data);
  1041. spin_lock(&base->lock);
  1042. } else {
  1043. spin_unlock_irq(&base->lock);
  1044. call_timer_fn(timer, fn, data);
  1045. spin_lock_irq(&base->lock);
  1046. }
  1047. }
  1048. }
  1049. base->running_timer = NULL;
  1050. spin_unlock_irq(&base->lock);
  1051. }
  1052. #ifdef CONFIG_NO_HZ
  1053. /*
  1054. * Find out when the next timer event is due to happen. This
  1055. * is used on S/390 to stop all activity when a CPU is idle.
  1056. * This function needs to be called with interrupts disabled.
  1057. */
  1058. static unsigned long __next_timer_interrupt(struct tvec_base *base)
  1059. {
  1060. unsigned long timer_jiffies = base->timer_jiffies;
  1061. unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
  1062. int index, slot, array, found = 0;
  1063. struct timer_list *nte;
  1064. struct tvec *varray[4];
  1065. /* Look for timer events in tv1. */
  1066. index = slot = timer_jiffies & TVR_MASK;
  1067. do {
  1068. list_for_each_entry(nte, base->tv1.vec + slot, entry) {
  1069. if (tbase_get_deferrable(nte->base))
  1070. continue;
  1071. found = 1;
  1072. expires = nte->expires;
  1073. /* Look at the cascade bucket(s)? */
  1074. if (!index || slot < index)
  1075. goto cascade;
  1076. return expires;
  1077. }
  1078. slot = (slot + 1) & TVR_MASK;
  1079. } while (slot != index);
  1080. cascade:
  1081. /* Calculate the next cascade event */
  1082. if (index)
  1083. timer_jiffies += TVR_SIZE - index;
  1084. timer_jiffies >>= TVR_BITS;
  1085. /* Check tv2-tv5. */
  1086. varray[0] = &base->tv2;
  1087. varray[1] = &base->tv3;
  1088. varray[2] = &base->tv4;
  1089. varray[3] = &base->tv5;
  1090. for (array = 0; array < 4; array++) {
  1091. struct tvec *varp = varray[array];
  1092. index = slot = timer_jiffies & TVN_MASK;
  1093. do {
  1094. list_for_each_entry(nte, varp->vec + slot, entry) {
  1095. if (tbase_get_deferrable(nte->base))
  1096. continue;
  1097. found = 1;
  1098. if (time_before(nte->expires, expires))
  1099. expires = nte->expires;
  1100. }
  1101. /*
  1102. * Do we still search for the first timer or are
  1103. * we looking up the cascade buckets ?
  1104. */
  1105. if (found) {
  1106. /* Look at the cascade bucket(s)? */
  1107. if (!index || slot < index)
  1108. break;
  1109. return expires;
  1110. }
  1111. slot = (slot + 1) & TVN_MASK;
  1112. } while (slot != index);
  1113. if (index)
  1114. timer_jiffies += TVN_SIZE - index;
  1115. timer_jiffies >>= TVN_BITS;
  1116. }
  1117. return expires;
  1118. }
  1119. /*
  1120. * Check, if the next hrtimer event is before the next timer wheel
  1121. * event:
  1122. */
  1123. static unsigned long cmp_next_hrtimer_event(unsigned long now,
  1124. unsigned long expires)
  1125. {
  1126. ktime_t hr_delta = hrtimer_get_next_event();
  1127. struct timespec tsdelta;
  1128. unsigned long delta;
  1129. if (hr_delta.tv64 == KTIME_MAX)
  1130. return expires;
  1131. /*
  1132. * Expired timer available, let it expire in the next tick
  1133. */
  1134. if (hr_delta.tv64 <= 0)
  1135. return now + 1;
  1136. tsdelta = ktime_to_timespec(hr_delta);
  1137. delta = timespec_to_jiffies(&tsdelta);
  1138. /*
  1139. * Limit the delta to the max value, which is checked in
  1140. * tick_nohz_stop_sched_tick():
  1141. */
  1142. if (delta > NEXT_TIMER_MAX_DELTA)
  1143. delta = NEXT_TIMER_MAX_DELTA;
  1144. /*
  1145. * Take rounding errors in to account and make sure, that it
  1146. * expires in the next tick. Otherwise we go into an endless
  1147. * ping pong due to tick_nohz_stop_sched_tick() retriggering
  1148. * the timer softirq
  1149. */
  1150. if (delta < 1)
  1151. delta = 1;
  1152. now += delta;
  1153. if (time_before(now, expires))
  1154. return now;
  1155. return expires;
  1156. }
  1157. /**
  1158. * get_next_timer_interrupt - return the jiffy of the next pending timer
  1159. * @now: current time (in jiffies)
  1160. */
  1161. unsigned long get_next_timer_interrupt(unsigned long now)
  1162. {
  1163. struct tvec_base *base = __this_cpu_read(tvec_bases);
  1164. unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
  1165. /*
  1166. * Pretend that there is no timer pending if the cpu is offline.
  1167. * Possible pending timers will be migrated later to an active cpu.
  1168. */
  1169. if (cpu_is_offline(smp_processor_id()))
  1170. return expires;
  1171. spin_lock(&base->lock);
  1172. if (base->active_timers) {
  1173. if (time_before_eq(base->next_timer, base->timer_jiffies))
  1174. base->next_timer = __next_timer_interrupt(base);
  1175. expires = base->next_timer;
  1176. }
  1177. spin_unlock(&base->lock);
  1178. if (time_before_eq(expires, now))
  1179. return now;
  1180. return cmp_next_hrtimer_event(now, expires);
  1181. }
  1182. #endif
  1183. /*
  1184. * Called from the timer interrupt handler to charge one tick to the current
  1185. * process. user_tick is 1 if the tick is user time, 0 for system.
  1186. */
  1187. void update_process_times(int user_tick)
  1188. {
  1189. struct task_struct *p = current;
  1190. int cpu = smp_processor_id();
  1191. /* Note: this timer irq context must be accounted for as well. */
  1192. account_process_tick(p, user_tick);
  1193. run_local_timers();
  1194. rcu_check_callbacks(cpu, user_tick);
  1195. printk_tick();
  1196. #ifdef CONFIG_IRQ_WORK
  1197. if (in_irq())
  1198. irq_work_run();
  1199. #endif
  1200. scheduler_tick();
  1201. run_posix_cpu_timers(p);
  1202. }
  1203. /*
  1204. * This function runs timers and the timer-tq in bottom half context.
  1205. */
  1206. static void run_timer_softirq(struct softirq_action *h)
  1207. {
  1208. struct tvec_base *base = __this_cpu_read(tvec_bases);
  1209. hrtimer_run_pending();
  1210. if (time_after_eq(jiffies, base->timer_jiffies))
  1211. __run_timers(base);
  1212. }
  1213. /*
  1214. * Called by the local, per-CPU timer interrupt on SMP.
  1215. */
  1216. void run_local_timers(void)
  1217. {
  1218. hrtimer_run_queues();
  1219. raise_softirq(TIMER_SOFTIRQ);
  1220. }
  1221. #ifdef __ARCH_WANT_SYS_ALARM
  1222. /*
  1223. * For backwards compatibility? This can be done in libc so Alpha
  1224. * and all newer ports shouldn't need it.
  1225. */
  1226. SYSCALL_DEFINE1(alarm, unsigned int, seconds)
  1227. {
  1228. return alarm_setitimer(seconds);
  1229. }
  1230. #endif
  1231. /**
  1232. * sys_getpid - return the thread group id of the current process
  1233. *
  1234. * Note, despite the name, this returns the tgid not the pid. The tgid and
  1235. * the pid are identical unless CLONE_THREAD was specified on clone() in
  1236. * which case the tgid is the same in all threads of the same group.
  1237. *
  1238. * This is SMP safe as current->tgid does not change.
  1239. */
  1240. SYSCALL_DEFINE0(getpid)
  1241. {
  1242. return task_tgid_vnr(current);
  1243. }
  1244. /*
  1245. * Accessing ->real_parent is not SMP-safe, it could
  1246. * change from under us. However, we can use a stale
  1247. * value of ->real_parent under rcu_read_lock(), see
  1248. * release_task()->call_rcu(delayed_put_task_struct).
  1249. */
  1250. SYSCALL_DEFINE0(getppid)
  1251. {
  1252. int pid;
  1253. rcu_read_lock();
  1254. pid = task_tgid_vnr(rcu_dereference(current->real_parent));
  1255. rcu_read_unlock();
  1256. return pid;
  1257. }
  1258. SYSCALL_DEFINE0(getuid)
  1259. {
  1260. /* Only we change this so SMP safe */
  1261. return from_kuid_munged(current_user_ns(), current_uid());
  1262. }
  1263. SYSCALL_DEFINE0(geteuid)
  1264. {
  1265. /* Only we change this so SMP safe */
  1266. return from_kuid_munged(current_user_ns(), current_euid());
  1267. }
  1268. SYSCALL_DEFINE0(getgid)
  1269. {
  1270. /* Only we change this so SMP safe */
  1271. return from_kgid_munged(current_user_ns(), current_gid());
  1272. }
  1273. SYSCALL_DEFINE0(getegid)
  1274. {
  1275. /* Only we change this so SMP safe */
  1276. return from_kgid_munged(current_user_ns(), current_egid());
  1277. }
  1278. static void process_timeout(unsigned long __data)
  1279. {
  1280. wake_up_process((struct task_struct *)__data);
  1281. }
  1282. /**
  1283. * schedule_timeout - sleep until timeout
  1284. * @timeout: timeout value in jiffies
  1285. *
  1286. * Make the current task sleep until @timeout jiffies have
  1287. * elapsed. The routine will return immediately unless
  1288. * the current task state has been set (see set_current_state()).
  1289. *
  1290. * You can set the task state as follows -
  1291. *
  1292. * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
  1293. * pass before the routine returns. The routine will return 0
  1294. *
  1295. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1296. * delivered to the current task. In this case the remaining time
  1297. * in jiffies will be returned, or 0 if the timer expired in time
  1298. *
  1299. * The current task state is guaranteed to be TASK_RUNNING when this
  1300. * routine returns.
  1301. *
  1302. * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
  1303. * the CPU away without a bound on the timeout. In this case the return
  1304. * value will be %MAX_SCHEDULE_TIMEOUT.
  1305. *
  1306. * In all cases the return value is guaranteed to be non-negative.
  1307. */
  1308. signed long __sched schedule_timeout(signed long timeout)
  1309. {
  1310. struct timer_list timer;
  1311. unsigned long expire;
  1312. switch (timeout)
  1313. {
  1314. case MAX_SCHEDULE_TIMEOUT:
  1315. /*
  1316. * These two special cases are useful to be comfortable
  1317. * in the caller. Nothing more. We could take
  1318. * MAX_SCHEDULE_TIMEOUT from one of the negative value
  1319. * but I' d like to return a valid offset (>=0) to allow
  1320. * the caller to do everything it want with the retval.
  1321. */
  1322. schedule();
  1323. goto out;
  1324. default:
  1325. /*
  1326. * Another bit of PARANOID. Note that the retval will be
  1327. * 0 since no piece of kernel is supposed to do a check
  1328. * for a negative retval of schedule_timeout() (since it
  1329. * should never happens anyway). You just have the printk()
  1330. * that will tell you if something is gone wrong and where.
  1331. */
  1332. if (timeout < 0) {
  1333. printk(KERN_ERR "schedule_timeout: wrong timeout "
  1334. "value %lx\n", timeout);
  1335. dump_stack();
  1336. current->state = TASK_RUNNING;
  1337. goto out;
  1338. }
  1339. }
  1340. expire = timeout + jiffies;
  1341. setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
  1342. __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
  1343. schedule();
  1344. del_singleshot_timer_sync(&timer);
  1345. /* Remove the timer from the object tracker */
  1346. destroy_timer_on_stack(&timer);
  1347. timeout = expire - jiffies;
  1348. out:
  1349. return timeout < 0 ? 0 : timeout;
  1350. }
  1351. EXPORT_SYMBOL(schedule_timeout);
  1352. /*
  1353. * We can use __set_current_state() here because schedule_timeout() calls
  1354. * schedule() unconditionally.
  1355. */
  1356. signed long __sched schedule_timeout_interruptible(signed long timeout)
  1357. {
  1358. __set_current_state(TASK_INTERRUPTIBLE);
  1359. return schedule_timeout(timeout);
  1360. }
  1361. EXPORT_SYMBOL(schedule_timeout_interruptible);
  1362. signed long __sched schedule_timeout_killable(signed long timeout)
  1363. {
  1364. __set_current_state(TASK_KILLABLE);
  1365. return schedule_timeout(timeout);
  1366. }
  1367. EXPORT_SYMBOL(schedule_timeout_killable);
  1368. signed long __sched schedule_timeout_uninterruptible(signed long timeout)
  1369. {
  1370. __set_current_state(TASK_UNINTERRUPTIBLE);
  1371. return schedule_timeout(timeout);
  1372. }
  1373. EXPORT_SYMBOL(schedule_timeout_uninterruptible);
  1374. /* Thread ID - the internal kernel "pid" */
  1375. SYSCALL_DEFINE0(gettid)
  1376. {
  1377. return task_pid_vnr(current);
  1378. }
  1379. /**
  1380. * do_sysinfo - fill in sysinfo struct
  1381. * @info: pointer to buffer to fill
  1382. */
  1383. int do_sysinfo(struct sysinfo *info)
  1384. {
  1385. unsigned long mem_total, sav_total;
  1386. unsigned int mem_unit, bitcount;
  1387. struct timespec tp;
  1388. memset(info, 0, sizeof(struct sysinfo));
  1389. ktime_get_ts(&tp);
  1390. monotonic_to_bootbased(&tp);
  1391. info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
  1392. get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
  1393. info->procs = nr_threads;
  1394. si_meminfo(info);
  1395. si_swapinfo(info);
  1396. /*
  1397. * If the sum of all the available memory (i.e. ram + swap)
  1398. * is less than can be stored in a 32 bit unsigned long then
  1399. * we can be binary compatible with 2.2.x kernels. If not,
  1400. * well, in that case 2.2.x was broken anyways...
  1401. *
  1402. * -Erik Andersen <andersee@debian.org>
  1403. */
  1404. mem_total = info->totalram + info->totalswap;
  1405. if (mem_total < info->totalram || mem_total < info->totalswap)
  1406. goto out;
  1407. bitcount = 0;
  1408. mem_unit = info->mem_unit;
  1409. while (mem_unit > 1) {
  1410. bitcount++;
  1411. mem_unit >>= 1;
  1412. sav_total = mem_total;
  1413. mem_total <<= 1;
  1414. if (mem_total < sav_total)
  1415. goto out;
  1416. }
  1417. /*
  1418. * If mem_total did not overflow, multiply all memory values by
  1419. * info->mem_unit and set it to 1. This leaves things compatible
  1420. * with 2.2.x, and also retains compatibility with earlier 2.4.x
  1421. * kernels...
  1422. */
  1423. info->mem_unit = 1;
  1424. info->totalram <<= bitcount;
  1425. info->freeram <<= bitcount;
  1426. info->sharedram <<= bitcount;
  1427. info->bufferram <<= bitcount;
  1428. info->totalswap <<= bitcount;
  1429. info->freeswap <<= bitcount;
  1430. info->totalhigh <<= bitcount;
  1431. info->freehigh <<= bitcount;
  1432. out:
  1433. return 0;
  1434. }
  1435. SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
  1436. {
  1437. struct sysinfo val;
  1438. do_sysinfo(&val);
  1439. if (copy_to_user(info, &val, sizeof(struct sysinfo)))
  1440. return -EFAULT;
  1441. return 0;
  1442. }
  1443. static int __cpuinit init_timers_cpu(int cpu)
  1444. {
  1445. int j;
  1446. struct tvec_base *base;
  1447. static char __cpuinitdata tvec_base_done[NR_CPUS];
  1448. if (!tvec_base_done[cpu]) {
  1449. static char boot_done;
  1450. if (boot_done) {
  1451. /*
  1452. * The APs use this path later in boot
  1453. */
  1454. base = kmalloc_node(sizeof(*base),
  1455. GFP_KERNEL | __GFP_ZERO,
  1456. cpu_to_node(cpu));
  1457. if (!base)
  1458. return -ENOMEM;
  1459. /* Make sure that tvec_base is 2 byte aligned */
  1460. if (tbase_get_deferrable(base)) {
  1461. WARN_ON(1);
  1462. kfree(base);
  1463. return -ENOMEM;
  1464. }
  1465. per_cpu(tvec_bases, cpu) = base;
  1466. } else {
  1467. /*
  1468. * This is for the boot CPU - we use compile-time
  1469. * static initialisation because per-cpu memory isn't
  1470. * ready yet and because the memory allocators are not
  1471. * initialised either.
  1472. */
  1473. boot_done = 1;
  1474. base = &boot_tvec_bases;
  1475. }
  1476. tvec_base_done[cpu] = 1;
  1477. } else {
  1478. base = per_cpu(tvec_bases, cpu);
  1479. }
  1480. spin_lock_init(&base->lock);
  1481. for (j = 0; j < TVN_SIZE; j++) {
  1482. INIT_LIST_HEAD(base->tv5.vec + j);
  1483. INIT_LIST_HEAD(base->tv4.vec + j);
  1484. INIT_LIST_HEAD(base->tv3.vec + j);
  1485. INIT_LIST_HEAD(base->tv2.vec + j);
  1486. }
  1487. for (j = 0; j < TVR_SIZE; j++)
  1488. INIT_LIST_HEAD(base->tv1.vec + j);
  1489. base->timer_jiffies = jiffies;
  1490. base->next_timer = base->timer_jiffies;
  1491. base->active_timers = 0;
  1492. return 0;
  1493. }
  1494. #ifdef CONFIG_HOTPLUG_CPU
  1495. static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
  1496. {
  1497. struct timer_list *timer;
  1498. while (!list_empty(head)) {
  1499. timer = list_first_entry(head, struct timer_list, entry);
  1500. /* We ignore the accounting on the dying cpu */
  1501. detach_timer(timer, false);
  1502. timer_set_base(timer, new_base);
  1503. internal_add_timer(new_base, timer);
  1504. }
  1505. }
  1506. static void __cpuinit migrate_timers(int cpu)
  1507. {
  1508. struct tvec_base *old_base;
  1509. struct tvec_base *new_base;
  1510. int i;
  1511. BUG_ON(cpu_online(cpu));
  1512. old_base = per_cpu(tvec_bases, cpu);
  1513. new_base = get_cpu_var(tvec_bases);
  1514. /*
  1515. * The caller is globally serialized and nobody else
  1516. * takes two locks at once, deadlock is not possible.
  1517. */
  1518. spin_lock_irq(&new_base->lock);
  1519. spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  1520. BUG_ON(old_base->running_timer);
  1521. for (i = 0; i < TVR_SIZE; i++)
  1522. migrate_timer_list(new_base, old_base->tv1.vec + i);
  1523. for (i = 0; i < TVN_SIZE; i++) {
  1524. migrate_timer_list(new_base, old_base->tv2.vec + i);
  1525. migrate_timer_list(new_base, old_base->tv3.vec + i);
  1526. migrate_timer_list(new_base, old_base->tv4.vec + i);
  1527. migrate_timer_list(new_base, old_base->tv5.vec + i);
  1528. }
  1529. spin_unlock(&old_base->lock);
  1530. spin_unlock_irq(&new_base->lock);
  1531. put_cpu_var(tvec_bases);
  1532. }
  1533. #endif /* CONFIG_HOTPLUG_CPU */
  1534. static int __cpuinit timer_cpu_notify(struct notifier_block *self,
  1535. unsigned long action, void *hcpu)
  1536. {
  1537. long cpu = (long)hcpu;
  1538. int err;
  1539. switch(action) {
  1540. case CPU_UP_PREPARE:
  1541. case CPU_UP_PREPARE_FROZEN:
  1542. err = init_timers_cpu(cpu);
  1543. if (err < 0)
  1544. return notifier_from_errno(err);
  1545. break;
  1546. #ifdef CONFIG_HOTPLUG_CPU
  1547. case CPU_DEAD:
  1548. case CPU_DEAD_FROZEN:
  1549. migrate_timers(cpu);
  1550. break;
  1551. #endif
  1552. default:
  1553. break;
  1554. }
  1555. return NOTIFY_OK;
  1556. }
  1557. static struct notifier_block __cpuinitdata timers_nb = {
  1558. .notifier_call = timer_cpu_notify,
  1559. };
  1560. void __init init_timers(void)
  1561. {
  1562. int err;
  1563. /* ensure there are enough low bits for flags in timer->base pointer */
  1564. BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
  1565. err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
  1566. (void *)(long)smp_processor_id());
  1567. init_timer_stats();
  1568. BUG_ON(err != NOTIFY_OK);
  1569. register_cpu_notifier(&timers_nb);
  1570. open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
  1571. }
  1572. /**
  1573. * msleep - sleep safely even with waitqueue interruptions
  1574. * @msecs: Time in milliseconds to sleep for
  1575. */
  1576. void msleep(unsigned int msecs)
  1577. {
  1578. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1579. while (timeout)
  1580. timeout = schedule_timeout_uninterruptible(timeout);
  1581. }
  1582. EXPORT_SYMBOL(msleep);
  1583. /**
  1584. * msleep_interruptible - sleep waiting for signals
  1585. * @msecs: Time in milliseconds to sleep for
  1586. */
  1587. unsigned long msleep_interruptible(unsigned int msecs)
  1588. {
  1589. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1590. while (timeout && !signal_pending(current))
  1591. timeout = schedule_timeout_interruptible(timeout);
  1592. return jiffies_to_msecs(timeout);
  1593. }
  1594. EXPORT_SYMBOL(msleep_interruptible);
  1595. static int __sched do_usleep_range(unsigned long min, unsigned long max)
  1596. {
  1597. ktime_t kmin;
  1598. unsigned long delta;
  1599. kmin = ktime_set(0, min * NSEC_PER_USEC);
  1600. delta = (max - min) * NSEC_PER_USEC;
  1601. return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
  1602. }
  1603. /**
  1604. * usleep_range - Drop in replacement for udelay where wakeup is flexible
  1605. * @min: Minimum time in usecs to sleep
  1606. * @max: Maximum time in usecs to sleep
  1607. */
  1608. void usleep_range(unsigned long min, unsigned long max)
  1609. {
  1610. __set_current_state(TASK_UNINTERRUPTIBLE);
  1611. do_usleep_range(min, max);
  1612. }
  1613. EXPORT_SYMBOL(usleep_range);