timer.c 49 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842
  1. /*
  2. * linux/kernel/timer.c
  3. *
  4. * Kernel internal timers, basic process system calls
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. *
  8. * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
  9. *
  10. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13. * serialize accesses to xtime/lost_ticks).
  14. * Copyright (C) 1998 Andrea Arcangeli
  15. * 1999-03-10 Improved NTP compatibility by Ulrich Windl
  16. * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
  17. * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
  18. * Copyright (C) 2000, 2001, 2002 Ingo Molnar
  19. * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20. */
  21. #include <linux/kernel_stat.h>
  22. #include <linux/export.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/percpu.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/pid_namespace.h>
  29. #include <linux/notifier.h>
  30. #include <linux/thread_info.h>
  31. #include <linux/time.h>
  32. #include <linux/jiffies.h>
  33. #include <linux/posix-timers.h>
  34. #include <linux/cpu.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/delay.h>
  37. #include <linux/tick.h>
  38. #include <linux/kallsyms.h>
  39. #include <linux/irq_work.h>
  40. #include <linux/sched.h>
  41. #include <linux/slab.h>
  42. #include <asm/uaccess.h>
  43. #include <asm/unistd.h>
  44. #include <asm/div64.h>
  45. #include <asm/timex.h>
  46. #include <asm/io.h>
  47. #define CREATE_TRACE_POINTS
  48. #include <trace/events/timer.h>
  49. u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  50. EXPORT_SYMBOL(jiffies_64);
  51. /*
  52. * per-CPU timer vector definitions:
  53. */
  54. #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  55. #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  56. #define TVN_SIZE (1 << TVN_BITS)
  57. #define TVR_SIZE (1 << TVR_BITS)
  58. #define TVN_MASK (TVN_SIZE - 1)
  59. #define TVR_MASK (TVR_SIZE - 1)
  60. struct tvec {
  61. struct list_head vec[TVN_SIZE];
  62. };
  63. struct tvec_root {
  64. struct list_head vec[TVR_SIZE];
  65. };
  66. struct tvec_base {
  67. spinlock_t lock;
  68. struct timer_list *running_timer;
  69. unsigned long timer_jiffies;
  70. unsigned long next_timer;
  71. struct tvec_root tv1;
  72. struct tvec tv2;
  73. struct tvec tv3;
  74. struct tvec tv4;
  75. struct tvec tv5;
  76. } ____cacheline_aligned;
  77. struct tvec_base boot_tvec_bases;
  78. EXPORT_SYMBOL(boot_tvec_bases);
  79. static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
  80. /* Functions below help us manage 'deferrable' flag */
  81. static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
  82. {
  83. return ((unsigned int)(unsigned long)base & TBASE_DEFERRABLE_FLAG);
  84. }
  85. static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
  86. {
  87. return ((struct tvec_base *)((unsigned long)base & ~TBASE_DEFERRABLE_FLAG));
  88. }
  89. static inline void timer_set_deferrable(struct timer_list *timer)
  90. {
  91. timer->base = TBASE_MAKE_DEFERRED(timer->base);
  92. }
  93. static inline void
  94. timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
  95. {
  96. timer->base = (struct tvec_base *)((unsigned long)(new_base) |
  97. tbase_get_deferrable(timer->base));
  98. }
  99. static unsigned long round_jiffies_common(unsigned long j, int cpu,
  100. bool force_up)
  101. {
  102. int rem;
  103. unsigned long original = j;
  104. /*
  105. * We don't want all cpus firing their timers at once hitting the
  106. * same lock or cachelines, so we skew each extra cpu with an extra
  107. * 3 jiffies. This 3 jiffies came originally from the mm/ code which
  108. * already did this.
  109. * The skew is done by adding 3*cpunr, then round, then subtract this
  110. * extra offset again.
  111. */
  112. j += cpu * 3;
  113. rem = j % HZ;
  114. /*
  115. * If the target jiffie is just after a whole second (which can happen
  116. * due to delays of the timer irq, long irq off times etc etc) then
  117. * we should round down to the whole second, not up. Use 1/4th second
  118. * as cutoff for this rounding as an extreme upper bound for this.
  119. * But never round down if @force_up is set.
  120. */
  121. if (rem < HZ/4 && !force_up) /* round down */
  122. j = j - rem;
  123. else /* round up */
  124. j = j - rem + HZ;
  125. /* now that we have rounded, subtract the extra skew again */
  126. j -= cpu * 3;
  127. if (j <= jiffies) /* rounding ate our timeout entirely; */
  128. return original;
  129. return j;
  130. }
  131. /**
  132. * __round_jiffies - function to round jiffies to a full second
  133. * @j: the time in (absolute) jiffies that should be rounded
  134. * @cpu: the processor number on which the timeout will happen
  135. *
  136. * __round_jiffies() rounds an absolute time in the future (in jiffies)
  137. * up or down to (approximately) full seconds. This is useful for timers
  138. * for which the exact time they fire does not matter too much, as long as
  139. * they fire approximately every X seconds.
  140. *
  141. * By rounding these timers to whole seconds, all such timers will fire
  142. * at the same time, rather than at various times spread out. The goal
  143. * of this is to have the CPU wake up less, which saves power.
  144. *
  145. * The exact rounding is skewed for each processor to avoid all
  146. * processors firing at the exact same time, which could lead
  147. * to lock contention or spurious cache line bouncing.
  148. *
  149. * The return value is the rounded version of the @j parameter.
  150. */
  151. unsigned long __round_jiffies(unsigned long j, int cpu)
  152. {
  153. return round_jiffies_common(j, cpu, false);
  154. }
  155. EXPORT_SYMBOL_GPL(__round_jiffies);
  156. /**
  157. * __round_jiffies_relative - function to round jiffies to a full second
  158. * @j: the time in (relative) jiffies that should be rounded
  159. * @cpu: the processor number on which the timeout will happen
  160. *
  161. * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
  162. * up or down to (approximately) full seconds. This is useful for timers
  163. * for which the exact time they fire does not matter too much, as long as
  164. * they fire approximately every X seconds.
  165. *
  166. * By rounding these timers to whole seconds, all such timers will fire
  167. * at the same time, rather than at various times spread out. The goal
  168. * of this is to have the CPU wake up less, which saves power.
  169. *
  170. * The exact rounding is skewed for each processor to avoid all
  171. * processors firing at the exact same time, which could lead
  172. * to lock contention or spurious cache line bouncing.
  173. *
  174. * The return value is the rounded version of the @j parameter.
  175. */
  176. unsigned long __round_jiffies_relative(unsigned long j, int cpu)
  177. {
  178. unsigned long j0 = jiffies;
  179. /* Use j0 because jiffies might change while we run */
  180. return round_jiffies_common(j + j0, cpu, false) - j0;
  181. }
  182. EXPORT_SYMBOL_GPL(__round_jiffies_relative);
  183. /**
  184. * round_jiffies - function to round jiffies to a full second
  185. * @j: the time in (absolute) jiffies that should be rounded
  186. *
  187. * round_jiffies() rounds an absolute time in the future (in jiffies)
  188. * up or down to (approximately) full seconds. This is useful for timers
  189. * for which the exact time they fire does not matter too much, as long as
  190. * they fire approximately every X seconds.
  191. *
  192. * By rounding these timers to whole seconds, all such timers will fire
  193. * at the same time, rather than at various times spread out. The goal
  194. * of this is to have the CPU wake up less, which saves power.
  195. *
  196. * The return value is the rounded version of the @j parameter.
  197. */
  198. unsigned long round_jiffies(unsigned long j)
  199. {
  200. return round_jiffies_common(j, raw_smp_processor_id(), false);
  201. }
  202. EXPORT_SYMBOL_GPL(round_jiffies);
  203. /**
  204. * round_jiffies_relative - function to round jiffies to a full second
  205. * @j: the time in (relative) jiffies that should be rounded
  206. *
  207. * round_jiffies_relative() rounds a time delta in the future (in jiffies)
  208. * up or down to (approximately) full seconds. This is useful for timers
  209. * for which the exact time they fire does not matter too much, as long as
  210. * they fire approximately every X seconds.
  211. *
  212. * By rounding these timers to whole seconds, all such timers will fire
  213. * at the same time, rather than at various times spread out. The goal
  214. * of this is to have the CPU wake up less, which saves power.
  215. *
  216. * The return value is the rounded version of the @j parameter.
  217. */
  218. unsigned long round_jiffies_relative(unsigned long j)
  219. {
  220. return __round_jiffies_relative(j, raw_smp_processor_id());
  221. }
  222. EXPORT_SYMBOL_GPL(round_jiffies_relative);
  223. /**
  224. * __round_jiffies_up - function to round jiffies up to a full second
  225. * @j: the time in (absolute) jiffies that should be rounded
  226. * @cpu: the processor number on which the timeout will happen
  227. *
  228. * This is the same as __round_jiffies() except that it will never
  229. * round down. This is useful for timeouts for which the exact time
  230. * of firing does not matter too much, as long as they don't fire too
  231. * early.
  232. */
  233. unsigned long __round_jiffies_up(unsigned long j, int cpu)
  234. {
  235. return round_jiffies_common(j, cpu, true);
  236. }
  237. EXPORT_SYMBOL_GPL(__round_jiffies_up);
  238. /**
  239. * __round_jiffies_up_relative - function to round jiffies up to a full second
  240. * @j: the time in (relative) jiffies that should be rounded
  241. * @cpu: the processor number on which the timeout will happen
  242. *
  243. * This is the same as __round_jiffies_relative() except that it will never
  244. * round down. This is useful for timeouts for which the exact time
  245. * of firing does not matter too much, as long as they don't fire too
  246. * early.
  247. */
  248. unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
  249. {
  250. unsigned long j0 = jiffies;
  251. /* Use j0 because jiffies might change while we run */
  252. return round_jiffies_common(j + j0, cpu, true) - j0;
  253. }
  254. EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
  255. /**
  256. * round_jiffies_up - function to round jiffies up to a full second
  257. * @j: the time in (absolute) jiffies that should be rounded
  258. *
  259. * This is the same as round_jiffies() except that it will never
  260. * round down. This is useful for timeouts for which the exact time
  261. * of firing does not matter too much, as long as they don't fire too
  262. * early.
  263. */
  264. unsigned long round_jiffies_up(unsigned long j)
  265. {
  266. return round_jiffies_common(j, raw_smp_processor_id(), true);
  267. }
  268. EXPORT_SYMBOL_GPL(round_jiffies_up);
  269. /**
  270. * round_jiffies_up_relative - function to round jiffies up to a full second
  271. * @j: the time in (relative) jiffies that should be rounded
  272. *
  273. * This is the same as round_jiffies_relative() except that it will never
  274. * round down. This is useful for timeouts for which the exact time
  275. * of firing does not matter too much, as long as they don't fire too
  276. * early.
  277. */
  278. unsigned long round_jiffies_up_relative(unsigned long j)
  279. {
  280. return __round_jiffies_up_relative(j, raw_smp_processor_id());
  281. }
  282. EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
  283. /**
  284. * set_timer_slack - set the allowed slack for a timer
  285. * @timer: the timer to be modified
  286. * @slack_hz: the amount of time (in jiffies) allowed for rounding
  287. *
  288. * Set the amount of time, in jiffies, that a certain timer has
  289. * in terms of slack. By setting this value, the timer subsystem
  290. * will schedule the actual timer somewhere between
  291. * the time mod_timer() asks for, and that time plus the slack.
  292. *
  293. * By setting the slack to -1, a percentage of the delay is used
  294. * instead.
  295. */
  296. void set_timer_slack(struct timer_list *timer, int slack_hz)
  297. {
  298. timer->slack = slack_hz;
  299. }
  300. EXPORT_SYMBOL_GPL(set_timer_slack);
  301. static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
  302. {
  303. unsigned long expires = timer->expires;
  304. unsigned long idx = expires - base->timer_jiffies;
  305. struct list_head *vec;
  306. if (idx < TVR_SIZE) {
  307. int i = expires & TVR_MASK;
  308. vec = base->tv1.vec + i;
  309. } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  310. int i = (expires >> TVR_BITS) & TVN_MASK;
  311. vec = base->tv2.vec + i;
  312. } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  313. int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  314. vec = base->tv3.vec + i;
  315. } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  316. int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  317. vec = base->tv4.vec + i;
  318. } else if ((signed long) idx < 0) {
  319. /*
  320. * Can happen if you add a timer with expires == jiffies,
  321. * or you set a timer to go off in the past
  322. */
  323. vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
  324. } else {
  325. int i;
  326. /* If the timeout is larger than 0xffffffff on 64-bit
  327. * architectures then we use the maximum timeout:
  328. */
  329. if (idx > 0xffffffffUL) {
  330. idx = 0xffffffffUL;
  331. expires = idx + base->timer_jiffies;
  332. }
  333. i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  334. vec = base->tv5.vec + i;
  335. }
  336. /*
  337. * Timers are FIFO:
  338. */
  339. list_add_tail(&timer->entry, vec);
  340. }
  341. #ifdef CONFIG_TIMER_STATS
  342. void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
  343. {
  344. if (timer->start_site)
  345. return;
  346. timer->start_site = addr;
  347. memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
  348. timer->start_pid = current->pid;
  349. }
  350. static void timer_stats_account_timer(struct timer_list *timer)
  351. {
  352. unsigned int flag = 0;
  353. if (likely(!timer->start_site))
  354. return;
  355. if (unlikely(tbase_get_deferrable(timer->base)))
  356. flag |= TIMER_STATS_FLAG_DEFERRABLE;
  357. timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
  358. timer->function, timer->start_comm, flag);
  359. }
  360. #else
  361. static void timer_stats_account_timer(struct timer_list *timer) {}
  362. #endif
  363. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  364. static struct debug_obj_descr timer_debug_descr;
  365. static void *timer_debug_hint(void *addr)
  366. {
  367. return ((struct timer_list *) addr)->function;
  368. }
  369. /*
  370. * fixup_init is called when:
  371. * - an active object is initialized
  372. */
  373. static int timer_fixup_init(void *addr, enum debug_obj_state state)
  374. {
  375. struct timer_list *timer = addr;
  376. switch (state) {
  377. case ODEBUG_STATE_ACTIVE:
  378. del_timer_sync(timer);
  379. debug_object_init(timer, &timer_debug_descr);
  380. return 1;
  381. default:
  382. return 0;
  383. }
  384. }
  385. /* Stub timer callback for improperly used timers. */
  386. static void stub_timer(unsigned long data)
  387. {
  388. WARN_ON(1);
  389. }
  390. /*
  391. * fixup_activate is called when:
  392. * - an active object is activated
  393. * - an unknown object is activated (might be a statically initialized object)
  394. */
  395. static int timer_fixup_activate(void *addr, enum debug_obj_state state)
  396. {
  397. struct timer_list *timer = addr;
  398. switch (state) {
  399. case ODEBUG_STATE_NOTAVAILABLE:
  400. /*
  401. * This is not really a fixup. The timer was
  402. * statically initialized. We just make sure that it
  403. * is tracked in the object tracker.
  404. */
  405. if (timer->entry.next == NULL &&
  406. timer->entry.prev == TIMER_ENTRY_STATIC) {
  407. debug_object_init(timer, &timer_debug_descr);
  408. debug_object_activate(timer, &timer_debug_descr);
  409. return 0;
  410. } else {
  411. setup_timer(timer, stub_timer, 0);
  412. return 1;
  413. }
  414. return 0;
  415. case ODEBUG_STATE_ACTIVE:
  416. WARN_ON(1);
  417. default:
  418. return 0;
  419. }
  420. }
  421. /*
  422. * fixup_free is called when:
  423. * - an active object is freed
  424. */
  425. static int timer_fixup_free(void *addr, enum debug_obj_state state)
  426. {
  427. struct timer_list *timer = addr;
  428. switch (state) {
  429. case ODEBUG_STATE_ACTIVE:
  430. del_timer_sync(timer);
  431. debug_object_free(timer, &timer_debug_descr);
  432. return 1;
  433. default:
  434. return 0;
  435. }
  436. }
  437. /*
  438. * fixup_assert_init is called when:
  439. * - an untracked/uninit-ed object is found
  440. */
  441. static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
  442. {
  443. struct timer_list *timer = addr;
  444. switch (state) {
  445. case ODEBUG_STATE_NOTAVAILABLE:
  446. if (timer->entry.prev == TIMER_ENTRY_STATIC) {
  447. /*
  448. * This is not really a fixup. The timer was
  449. * statically initialized. We just make sure that it
  450. * is tracked in the object tracker.
  451. */
  452. debug_object_init(timer, &timer_debug_descr);
  453. return 0;
  454. } else {
  455. setup_timer(timer, stub_timer, 0);
  456. return 1;
  457. }
  458. default:
  459. return 0;
  460. }
  461. }
  462. static struct debug_obj_descr timer_debug_descr = {
  463. .name = "timer_list",
  464. .debug_hint = timer_debug_hint,
  465. .fixup_init = timer_fixup_init,
  466. .fixup_activate = timer_fixup_activate,
  467. .fixup_free = timer_fixup_free,
  468. .fixup_assert_init = timer_fixup_assert_init,
  469. };
  470. static inline void debug_timer_init(struct timer_list *timer)
  471. {
  472. debug_object_init(timer, &timer_debug_descr);
  473. }
  474. static inline void debug_timer_activate(struct timer_list *timer)
  475. {
  476. debug_object_activate(timer, &timer_debug_descr);
  477. }
  478. static inline void debug_timer_deactivate(struct timer_list *timer)
  479. {
  480. debug_object_deactivate(timer, &timer_debug_descr);
  481. }
  482. static inline void debug_timer_free(struct timer_list *timer)
  483. {
  484. debug_object_free(timer, &timer_debug_descr);
  485. }
  486. static inline void debug_timer_assert_init(struct timer_list *timer)
  487. {
  488. debug_object_assert_init(timer, &timer_debug_descr);
  489. }
  490. static void __init_timer(struct timer_list *timer,
  491. const char *name,
  492. struct lock_class_key *key);
  493. void init_timer_on_stack_key(struct timer_list *timer,
  494. const char *name,
  495. struct lock_class_key *key)
  496. {
  497. debug_object_init_on_stack(timer, &timer_debug_descr);
  498. __init_timer(timer, name, key);
  499. }
  500. EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
  501. void destroy_timer_on_stack(struct timer_list *timer)
  502. {
  503. debug_object_free(timer, &timer_debug_descr);
  504. }
  505. EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
  506. #else
  507. static inline void debug_timer_init(struct timer_list *timer) { }
  508. static inline void debug_timer_activate(struct timer_list *timer) { }
  509. static inline void debug_timer_deactivate(struct timer_list *timer) { }
  510. static inline void debug_timer_assert_init(struct timer_list *timer) { }
  511. #endif
  512. static inline void debug_init(struct timer_list *timer)
  513. {
  514. debug_timer_init(timer);
  515. trace_timer_init(timer);
  516. }
  517. static inline void
  518. debug_activate(struct timer_list *timer, unsigned long expires)
  519. {
  520. debug_timer_activate(timer);
  521. trace_timer_start(timer, expires);
  522. }
  523. static inline void debug_deactivate(struct timer_list *timer)
  524. {
  525. debug_timer_deactivate(timer);
  526. trace_timer_cancel(timer);
  527. }
  528. static inline void debug_assert_init(struct timer_list *timer)
  529. {
  530. debug_timer_assert_init(timer);
  531. }
  532. static void __init_timer(struct timer_list *timer,
  533. const char *name,
  534. struct lock_class_key *key)
  535. {
  536. timer->entry.next = NULL;
  537. timer->base = __raw_get_cpu_var(tvec_bases);
  538. timer->slack = -1;
  539. #ifdef CONFIG_TIMER_STATS
  540. timer->start_site = NULL;
  541. timer->start_pid = -1;
  542. memset(timer->start_comm, 0, TASK_COMM_LEN);
  543. #endif
  544. lockdep_init_map(&timer->lockdep_map, name, key, 0);
  545. }
  546. void setup_deferrable_timer_on_stack_key(struct timer_list *timer,
  547. const char *name,
  548. struct lock_class_key *key,
  549. void (*function)(unsigned long),
  550. unsigned long data)
  551. {
  552. timer->function = function;
  553. timer->data = data;
  554. init_timer_on_stack_key(timer, name, key);
  555. timer_set_deferrable(timer);
  556. }
  557. EXPORT_SYMBOL_GPL(setup_deferrable_timer_on_stack_key);
  558. /**
  559. * init_timer_key - initialize a timer
  560. * @timer: the timer to be initialized
  561. * @name: name of the timer
  562. * @key: lockdep class key of the fake lock used for tracking timer
  563. * sync lock dependencies
  564. *
  565. * init_timer_key() must be done to a timer prior calling *any* of the
  566. * other timer functions.
  567. */
  568. void init_timer_key(struct timer_list *timer,
  569. const char *name,
  570. struct lock_class_key *key)
  571. {
  572. debug_init(timer);
  573. __init_timer(timer, name, key);
  574. }
  575. EXPORT_SYMBOL(init_timer_key);
  576. void init_timer_deferrable_key(struct timer_list *timer,
  577. const char *name,
  578. struct lock_class_key *key)
  579. {
  580. init_timer_key(timer, name, key);
  581. timer_set_deferrable(timer);
  582. }
  583. EXPORT_SYMBOL(init_timer_deferrable_key);
  584. static inline void detach_timer(struct timer_list *timer,
  585. int clear_pending)
  586. {
  587. struct list_head *entry = &timer->entry;
  588. debug_deactivate(timer);
  589. __list_del(entry->prev, entry->next);
  590. if (clear_pending)
  591. entry->next = NULL;
  592. entry->prev = LIST_POISON2;
  593. }
  594. /*
  595. * We are using hashed locking: holding per_cpu(tvec_bases).lock
  596. * means that all timers which are tied to this base via timer->base are
  597. * locked, and the base itself is locked too.
  598. *
  599. * So __run_timers/migrate_timers can safely modify all timers which could
  600. * be found on ->tvX lists.
  601. *
  602. * When the timer's base is locked, and the timer removed from list, it is
  603. * possible to set timer->base = NULL and drop the lock: the timer remains
  604. * locked.
  605. */
  606. static struct tvec_base *lock_timer_base(struct timer_list *timer,
  607. unsigned long *flags)
  608. __acquires(timer->base->lock)
  609. {
  610. struct tvec_base *base;
  611. for (;;) {
  612. struct tvec_base *prelock_base = timer->base;
  613. base = tbase_get_base(prelock_base);
  614. if (likely(base != NULL)) {
  615. spin_lock_irqsave(&base->lock, *flags);
  616. if (likely(prelock_base == timer->base))
  617. return base;
  618. /* The timer has migrated to another CPU */
  619. spin_unlock_irqrestore(&base->lock, *flags);
  620. }
  621. cpu_relax();
  622. }
  623. }
  624. static inline int
  625. __mod_timer(struct timer_list *timer, unsigned long expires,
  626. bool pending_only, int pinned)
  627. {
  628. struct tvec_base *base, *new_base;
  629. unsigned long flags;
  630. int ret = 0 , cpu;
  631. timer_stats_timer_set_start_info(timer);
  632. BUG_ON(!timer->function);
  633. base = lock_timer_base(timer, &flags);
  634. if (timer_pending(timer)) {
  635. detach_timer(timer, 0);
  636. if (timer->expires == base->next_timer &&
  637. !tbase_get_deferrable(timer->base))
  638. base->next_timer = base->timer_jiffies;
  639. ret = 1;
  640. } else {
  641. if (pending_only)
  642. goto out_unlock;
  643. }
  644. debug_activate(timer, expires);
  645. cpu = smp_processor_id();
  646. #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
  647. if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
  648. cpu = get_nohz_timer_target();
  649. #endif
  650. new_base = per_cpu(tvec_bases, cpu);
  651. if (base != new_base) {
  652. /*
  653. * We are trying to schedule the timer on the local CPU.
  654. * However we can't change timer's base while it is running,
  655. * otherwise del_timer_sync() can't detect that the timer's
  656. * handler yet has not finished. This also guarantees that
  657. * the timer is serialized wrt itself.
  658. */
  659. if (likely(base->running_timer != timer)) {
  660. /* See the comment in lock_timer_base() */
  661. timer_set_base(timer, NULL);
  662. spin_unlock(&base->lock);
  663. base = new_base;
  664. spin_lock(&base->lock);
  665. timer_set_base(timer, base);
  666. }
  667. }
  668. timer->expires = expires;
  669. if (time_before(timer->expires, base->next_timer) &&
  670. !tbase_get_deferrable(timer->base))
  671. base->next_timer = timer->expires;
  672. internal_add_timer(base, timer);
  673. out_unlock:
  674. spin_unlock_irqrestore(&base->lock, flags);
  675. return ret;
  676. }
  677. /**
  678. * mod_timer_pending - modify a pending timer's timeout
  679. * @timer: the pending timer to be modified
  680. * @expires: new timeout in jiffies
  681. *
  682. * mod_timer_pending() is the same for pending timers as mod_timer(),
  683. * but will not re-activate and modify already deleted timers.
  684. *
  685. * It is useful for unserialized use of timers.
  686. */
  687. int mod_timer_pending(struct timer_list *timer, unsigned long expires)
  688. {
  689. return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
  690. }
  691. EXPORT_SYMBOL(mod_timer_pending);
  692. /*
  693. * Decide where to put the timer while taking the slack into account
  694. *
  695. * Algorithm:
  696. * 1) calculate the maximum (absolute) time
  697. * 2) calculate the highest bit where the expires and new max are different
  698. * 3) use this bit to make a mask
  699. * 4) use the bitmask to round down the maximum time, so that all last
  700. * bits are zeros
  701. */
  702. static inline
  703. unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
  704. {
  705. unsigned long expires_limit, mask;
  706. int bit;
  707. if (timer->slack >= 0) {
  708. expires_limit = expires + timer->slack;
  709. } else {
  710. long delta = expires - jiffies;
  711. if (delta < 256)
  712. return expires;
  713. expires_limit = expires + delta / 256;
  714. }
  715. mask = expires ^ expires_limit;
  716. if (mask == 0)
  717. return expires;
  718. bit = find_last_bit(&mask, BITS_PER_LONG);
  719. mask = (1 << bit) - 1;
  720. expires_limit = expires_limit & ~(mask);
  721. return expires_limit;
  722. }
  723. /**
  724. * mod_timer - modify a timer's timeout
  725. * @timer: the timer to be modified
  726. * @expires: new timeout in jiffies
  727. *
  728. * mod_timer() is a more efficient way to update the expire field of an
  729. * active timer (if the timer is inactive it will be activated)
  730. *
  731. * mod_timer(timer, expires) is equivalent to:
  732. *
  733. * del_timer(timer); timer->expires = expires; add_timer(timer);
  734. *
  735. * Note that if there are multiple unserialized concurrent users of the
  736. * same timer, then mod_timer() is the only safe way to modify the timeout,
  737. * since add_timer() cannot modify an already running timer.
  738. *
  739. * The function returns whether it has modified a pending timer or not.
  740. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
  741. * active timer returns 1.)
  742. */
  743. int mod_timer(struct timer_list *timer, unsigned long expires)
  744. {
  745. expires = apply_slack(timer, expires);
  746. /*
  747. * This is a common optimization triggered by the
  748. * networking code - if the timer is re-modified
  749. * to be the same thing then just return:
  750. */
  751. if (timer_pending(timer) && timer->expires == expires)
  752. return 1;
  753. return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
  754. }
  755. EXPORT_SYMBOL(mod_timer);
  756. /**
  757. * mod_timer_pinned - modify a timer's timeout
  758. * @timer: the timer to be modified
  759. * @expires: new timeout in jiffies
  760. *
  761. * mod_timer_pinned() is a way to update the expire field of an
  762. * active timer (if the timer is inactive it will be activated)
  763. * and not allow the timer to be migrated to a different CPU.
  764. *
  765. * mod_timer_pinned(timer, expires) is equivalent to:
  766. *
  767. * del_timer(timer); timer->expires = expires; add_timer(timer);
  768. */
  769. int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
  770. {
  771. if (timer->expires == expires && timer_pending(timer))
  772. return 1;
  773. return __mod_timer(timer, expires, false, TIMER_PINNED);
  774. }
  775. EXPORT_SYMBOL(mod_timer_pinned);
  776. /**
  777. * add_timer - start a timer
  778. * @timer: the timer to be added
  779. *
  780. * The kernel will do a ->function(->data) callback from the
  781. * timer interrupt at the ->expires point in the future. The
  782. * current time is 'jiffies'.
  783. *
  784. * The timer's ->expires, ->function (and if the handler uses it, ->data)
  785. * fields must be set prior calling this function.
  786. *
  787. * Timers with an ->expires field in the past will be executed in the next
  788. * timer tick.
  789. */
  790. void add_timer(struct timer_list *timer)
  791. {
  792. BUG_ON(timer_pending(timer));
  793. mod_timer(timer, timer->expires);
  794. }
  795. EXPORT_SYMBOL(add_timer);
  796. /**
  797. * add_timer_on - start a timer on a particular CPU
  798. * @timer: the timer to be added
  799. * @cpu: the CPU to start it on
  800. *
  801. * This is not very scalable on SMP. Double adds are not possible.
  802. */
  803. void add_timer_on(struct timer_list *timer, int cpu)
  804. {
  805. struct tvec_base *base = per_cpu(tvec_bases, cpu);
  806. unsigned long flags;
  807. timer_stats_timer_set_start_info(timer);
  808. BUG_ON(timer_pending(timer) || !timer->function);
  809. spin_lock_irqsave(&base->lock, flags);
  810. timer_set_base(timer, base);
  811. debug_activate(timer, timer->expires);
  812. if (time_before(timer->expires, base->next_timer) &&
  813. !tbase_get_deferrable(timer->base))
  814. base->next_timer = timer->expires;
  815. internal_add_timer(base, timer);
  816. /*
  817. * Check whether the other CPU is idle and needs to be
  818. * triggered to reevaluate the timer wheel when nohz is
  819. * active. We are protected against the other CPU fiddling
  820. * with the timer by holding the timer base lock. This also
  821. * makes sure that a CPU on the way to idle can not evaluate
  822. * the timer wheel.
  823. */
  824. wake_up_idle_cpu(cpu);
  825. spin_unlock_irqrestore(&base->lock, flags);
  826. }
  827. EXPORT_SYMBOL_GPL(add_timer_on);
  828. /**
  829. * del_timer - deactive a timer.
  830. * @timer: the timer to be deactivated
  831. *
  832. * del_timer() deactivates a timer - this works on both active and inactive
  833. * timers.
  834. *
  835. * The function returns whether it has deactivated a pending timer or not.
  836. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
  837. * active timer returns 1.)
  838. */
  839. int del_timer(struct timer_list *timer)
  840. {
  841. struct tvec_base *base;
  842. unsigned long flags;
  843. int ret = 0;
  844. debug_assert_init(timer);
  845. timer_stats_timer_clear_start_info(timer);
  846. if (timer_pending(timer)) {
  847. base = lock_timer_base(timer, &flags);
  848. if (timer_pending(timer)) {
  849. detach_timer(timer, 1);
  850. if (timer->expires == base->next_timer &&
  851. !tbase_get_deferrable(timer->base))
  852. base->next_timer = base->timer_jiffies;
  853. ret = 1;
  854. }
  855. spin_unlock_irqrestore(&base->lock, flags);
  856. }
  857. return ret;
  858. }
  859. EXPORT_SYMBOL(del_timer);
  860. /**
  861. * try_to_del_timer_sync - Try to deactivate a timer
  862. * @timer: timer do del
  863. *
  864. * This function tries to deactivate a timer. Upon successful (ret >= 0)
  865. * exit the timer is not queued and the handler is not running on any CPU.
  866. */
  867. int try_to_del_timer_sync(struct timer_list *timer)
  868. {
  869. struct tvec_base *base;
  870. unsigned long flags;
  871. int ret = -1;
  872. debug_assert_init(timer);
  873. base = lock_timer_base(timer, &flags);
  874. if (base->running_timer == timer)
  875. goto out;
  876. timer_stats_timer_clear_start_info(timer);
  877. ret = 0;
  878. if (timer_pending(timer)) {
  879. detach_timer(timer, 1);
  880. if (timer->expires == base->next_timer &&
  881. !tbase_get_deferrable(timer->base))
  882. base->next_timer = base->timer_jiffies;
  883. ret = 1;
  884. }
  885. out:
  886. spin_unlock_irqrestore(&base->lock, flags);
  887. return ret;
  888. }
  889. EXPORT_SYMBOL(try_to_del_timer_sync);
  890. #ifdef CONFIG_SMP
  891. /**
  892. * del_timer_sync - deactivate a timer and wait for the handler to finish.
  893. * @timer: the timer to be deactivated
  894. *
  895. * This function only differs from del_timer() on SMP: besides deactivating
  896. * the timer it also makes sure the handler has finished executing on other
  897. * CPUs.
  898. *
  899. * Synchronization rules: Callers must prevent restarting of the timer,
  900. * otherwise this function is meaningless. It must not be called from
  901. * interrupt contexts. The caller must not hold locks which would prevent
  902. * completion of the timer's handler. The timer's handler must not call
  903. * add_timer_on(). Upon exit the timer is not queued and the handler is
  904. * not running on any CPU.
  905. *
  906. * Note: You must not hold locks that are held in interrupt context
  907. * while calling this function. Even if the lock has nothing to do
  908. * with the timer in question. Here's why:
  909. *
  910. * CPU0 CPU1
  911. * ---- ----
  912. * <SOFTIRQ>
  913. * call_timer_fn();
  914. * base->running_timer = mytimer;
  915. * spin_lock_irq(somelock);
  916. * <IRQ>
  917. * spin_lock(somelock);
  918. * del_timer_sync(mytimer);
  919. * while (base->running_timer == mytimer);
  920. *
  921. * Now del_timer_sync() will never return and never release somelock.
  922. * The interrupt on the other CPU is waiting to grab somelock but
  923. * it has interrupted the softirq that CPU0 is waiting to finish.
  924. *
  925. * The function returns whether it has deactivated a pending timer or not.
  926. */
  927. int del_timer_sync(struct timer_list *timer)
  928. {
  929. #ifdef CONFIG_LOCKDEP
  930. unsigned long flags;
  931. /*
  932. * If lockdep gives a backtrace here, please reference
  933. * the synchronization rules above.
  934. */
  935. local_irq_save(flags);
  936. lock_map_acquire(&timer->lockdep_map);
  937. lock_map_release(&timer->lockdep_map);
  938. local_irq_restore(flags);
  939. #endif
  940. /*
  941. * don't use it in hardirq context, because it
  942. * could lead to deadlock.
  943. */
  944. WARN_ON(in_irq());
  945. for (;;) {
  946. int ret = try_to_del_timer_sync(timer);
  947. if (ret >= 0)
  948. return ret;
  949. cpu_relax();
  950. }
  951. }
  952. EXPORT_SYMBOL(del_timer_sync);
  953. #endif
  954. static int cascade(struct tvec_base *base, struct tvec *tv, int index)
  955. {
  956. /* cascade all the timers from tv up one level */
  957. struct timer_list *timer, *tmp;
  958. struct list_head tv_list;
  959. list_replace_init(tv->vec + index, &tv_list);
  960. /*
  961. * We are removing _all_ timers from the list, so we
  962. * don't have to detach them individually.
  963. */
  964. list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
  965. BUG_ON(tbase_get_base(timer->base) != base);
  966. internal_add_timer(base, timer);
  967. }
  968. return index;
  969. }
  970. static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
  971. unsigned long data)
  972. {
  973. int preempt_count = preempt_count();
  974. #ifdef CONFIG_LOCKDEP
  975. /*
  976. * It is permissible to free the timer from inside the
  977. * function that is called from it, this we need to take into
  978. * account for lockdep too. To avoid bogus "held lock freed"
  979. * warnings as well as problems when looking into
  980. * timer->lockdep_map, make a copy and use that here.
  981. */
  982. struct lockdep_map lockdep_map = timer->lockdep_map;
  983. #endif
  984. /*
  985. * Couple the lock chain with the lock chain at
  986. * del_timer_sync() by acquiring the lock_map around the fn()
  987. * call here and in del_timer_sync().
  988. */
  989. lock_map_acquire(&lockdep_map);
  990. trace_timer_expire_entry(timer);
  991. fn(data);
  992. trace_timer_expire_exit(timer);
  993. lock_map_release(&lockdep_map);
  994. if (preempt_count != preempt_count()) {
  995. WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
  996. fn, preempt_count, preempt_count());
  997. /*
  998. * Restore the preempt count. That gives us a decent
  999. * chance to survive and extract information. If the
  1000. * callback kept a lock held, bad luck, but not worse
  1001. * than the BUG() we had.
  1002. */
  1003. preempt_count() = preempt_count;
  1004. }
  1005. }
  1006. #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
  1007. /**
  1008. * __run_timers - run all expired timers (if any) on this CPU.
  1009. * @base: the timer vector to be processed.
  1010. *
  1011. * This function cascades all vectors and executes all expired timer
  1012. * vectors.
  1013. */
  1014. static inline void __run_timers(struct tvec_base *base)
  1015. {
  1016. struct timer_list *timer;
  1017. spin_lock_irq(&base->lock);
  1018. while (time_after_eq(jiffies, base->timer_jiffies)) {
  1019. struct list_head work_list;
  1020. struct list_head *head = &work_list;
  1021. int index = base->timer_jiffies & TVR_MASK;
  1022. /*
  1023. * Cascade timers:
  1024. */
  1025. if (!index &&
  1026. (!cascade(base, &base->tv2, INDEX(0))) &&
  1027. (!cascade(base, &base->tv3, INDEX(1))) &&
  1028. !cascade(base, &base->tv4, INDEX(2)))
  1029. cascade(base, &base->tv5, INDEX(3));
  1030. ++base->timer_jiffies;
  1031. list_replace_init(base->tv1.vec + index, &work_list);
  1032. while (!list_empty(head)) {
  1033. void (*fn)(unsigned long);
  1034. unsigned long data;
  1035. timer = list_first_entry(head, struct timer_list,entry);
  1036. fn = timer->function;
  1037. data = timer->data;
  1038. timer_stats_account_timer(timer);
  1039. base->running_timer = timer;
  1040. detach_timer(timer, 1);
  1041. spin_unlock_irq(&base->lock);
  1042. call_timer_fn(timer, fn, data);
  1043. spin_lock_irq(&base->lock);
  1044. }
  1045. }
  1046. base->running_timer = NULL;
  1047. spin_unlock_irq(&base->lock);
  1048. }
  1049. #ifdef CONFIG_NO_HZ
  1050. /*
  1051. * Find out when the next timer event is due to happen. This
  1052. * is used on S/390 to stop all activity when a CPU is idle.
  1053. * This function needs to be called with interrupts disabled.
  1054. */
  1055. static unsigned long __next_timer_interrupt(struct tvec_base *base)
  1056. {
  1057. unsigned long timer_jiffies = base->timer_jiffies;
  1058. unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
  1059. int index, slot, array, found = 0;
  1060. struct timer_list *nte;
  1061. struct tvec *varray[4];
  1062. /* Look for timer events in tv1. */
  1063. index = slot = timer_jiffies & TVR_MASK;
  1064. do {
  1065. list_for_each_entry(nte, base->tv1.vec + slot, entry) {
  1066. if (tbase_get_deferrable(nte->base))
  1067. continue;
  1068. found = 1;
  1069. expires = nte->expires;
  1070. /* Look at the cascade bucket(s)? */
  1071. if (!index || slot < index)
  1072. goto cascade;
  1073. return expires;
  1074. }
  1075. slot = (slot + 1) & TVR_MASK;
  1076. } while (slot != index);
  1077. cascade:
  1078. /* Calculate the next cascade event */
  1079. if (index)
  1080. timer_jiffies += TVR_SIZE - index;
  1081. timer_jiffies >>= TVR_BITS;
  1082. /* Check tv2-tv5. */
  1083. varray[0] = &base->tv2;
  1084. varray[1] = &base->tv3;
  1085. varray[2] = &base->tv4;
  1086. varray[3] = &base->tv5;
  1087. for (array = 0; array < 4; array++) {
  1088. struct tvec *varp = varray[array];
  1089. index = slot = timer_jiffies & TVN_MASK;
  1090. do {
  1091. list_for_each_entry(nte, varp->vec + slot, entry) {
  1092. if (tbase_get_deferrable(nte->base))
  1093. continue;
  1094. found = 1;
  1095. if (time_before(nte->expires, expires))
  1096. expires = nte->expires;
  1097. }
  1098. /*
  1099. * Do we still search for the first timer or are
  1100. * we looking up the cascade buckets ?
  1101. */
  1102. if (found) {
  1103. /* Look at the cascade bucket(s)? */
  1104. if (!index || slot < index)
  1105. break;
  1106. return expires;
  1107. }
  1108. slot = (slot + 1) & TVN_MASK;
  1109. } while (slot != index);
  1110. if (index)
  1111. timer_jiffies += TVN_SIZE - index;
  1112. timer_jiffies >>= TVN_BITS;
  1113. }
  1114. return expires;
  1115. }
  1116. /*
  1117. * Check, if the next hrtimer event is before the next timer wheel
  1118. * event:
  1119. */
  1120. static unsigned long cmp_next_hrtimer_event(unsigned long now,
  1121. unsigned long expires)
  1122. {
  1123. ktime_t hr_delta = hrtimer_get_next_event();
  1124. struct timespec tsdelta;
  1125. unsigned long delta;
  1126. if (hr_delta.tv64 == KTIME_MAX)
  1127. return expires;
  1128. /*
  1129. * Expired timer available, let it expire in the next tick
  1130. */
  1131. if (hr_delta.tv64 <= 0)
  1132. return now + 1;
  1133. tsdelta = ktime_to_timespec(hr_delta);
  1134. delta = timespec_to_jiffies(&tsdelta);
  1135. /*
  1136. * Limit the delta to the max value, which is checked in
  1137. * tick_nohz_stop_sched_tick():
  1138. */
  1139. if (delta > NEXT_TIMER_MAX_DELTA)
  1140. delta = NEXT_TIMER_MAX_DELTA;
  1141. /*
  1142. * Take rounding errors in to account and make sure, that it
  1143. * expires in the next tick. Otherwise we go into an endless
  1144. * ping pong due to tick_nohz_stop_sched_tick() retriggering
  1145. * the timer softirq
  1146. */
  1147. if (delta < 1)
  1148. delta = 1;
  1149. now += delta;
  1150. if (time_before(now, expires))
  1151. return now;
  1152. return expires;
  1153. }
  1154. /**
  1155. * get_next_timer_interrupt - return the jiffy of the next pending timer
  1156. * @now: current time (in jiffies)
  1157. */
  1158. unsigned long get_next_timer_interrupt(unsigned long now)
  1159. {
  1160. struct tvec_base *base = __this_cpu_read(tvec_bases);
  1161. unsigned long expires;
  1162. /*
  1163. * Pretend that there is no timer pending if the cpu is offline.
  1164. * Possible pending timers will be migrated later to an active cpu.
  1165. */
  1166. if (cpu_is_offline(smp_processor_id()))
  1167. return now + NEXT_TIMER_MAX_DELTA;
  1168. spin_lock(&base->lock);
  1169. if (time_before_eq(base->next_timer, base->timer_jiffies))
  1170. base->next_timer = __next_timer_interrupt(base);
  1171. expires = base->next_timer;
  1172. spin_unlock(&base->lock);
  1173. if (time_before_eq(expires, now))
  1174. return now;
  1175. return cmp_next_hrtimer_event(now, expires);
  1176. }
  1177. #endif
  1178. /*
  1179. * Called from the timer interrupt handler to charge one tick to the current
  1180. * process. user_tick is 1 if the tick is user time, 0 for system.
  1181. */
  1182. void update_process_times(int user_tick)
  1183. {
  1184. struct task_struct *p = current;
  1185. int cpu = smp_processor_id();
  1186. /* Note: this timer irq context must be accounted for as well. */
  1187. account_process_tick(p, user_tick);
  1188. run_local_timers();
  1189. rcu_check_callbacks(cpu, user_tick);
  1190. printk_tick();
  1191. #ifdef CONFIG_IRQ_WORK
  1192. if (in_irq())
  1193. irq_work_run();
  1194. #endif
  1195. scheduler_tick();
  1196. run_posix_cpu_timers(p);
  1197. }
  1198. /*
  1199. * This function runs timers and the timer-tq in bottom half context.
  1200. */
  1201. static void run_timer_softirq(struct softirq_action *h)
  1202. {
  1203. struct tvec_base *base = __this_cpu_read(tvec_bases);
  1204. hrtimer_run_pending();
  1205. if (time_after_eq(jiffies, base->timer_jiffies))
  1206. __run_timers(base);
  1207. }
  1208. /*
  1209. * Called by the local, per-CPU timer interrupt on SMP.
  1210. */
  1211. void run_local_timers(void)
  1212. {
  1213. hrtimer_run_queues();
  1214. raise_softirq(TIMER_SOFTIRQ);
  1215. }
  1216. #ifdef __ARCH_WANT_SYS_ALARM
  1217. /*
  1218. * For backwards compatibility? This can be done in libc so Alpha
  1219. * and all newer ports shouldn't need it.
  1220. */
  1221. SYSCALL_DEFINE1(alarm, unsigned int, seconds)
  1222. {
  1223. return alarm_setitimer(seconds);
  1224. }
  1225. #endif
  1226. #ifndef __alpha__
  1227. /*
  1228. * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
  1229. * should be moved into arch/i386 instead?
  1230. */
  1231. /**
  1232. * sys_getpid - return the thread group id of the current process
  1233. *
  1234. * Note, despite the name, this returns the tgid not the pid. The tgid and
  1235. * the pid are identical unless CLONE_THREAD was specified on clone() in
  1236. * which case the tgid is the same in all threads of the same group.
  1237. *
  1238. * This is SMP safe as current->tgid does not change.
  1239. */
  1240. SYSCALL_DEFINE0(getpid)
  1241. {
  1242. return task_tgid_vnr(current);
  1243. }
  1244. /*
  1245. * Accessing ->real_parent is not SMP-safe, it could
  1246. * change from under us. However, we can use a stale
  1247. * value of ->real_parent under rcu_read_lock(), see
  1248. * release_task()->call_rcu(delayed_put_task_struct).
  1249. */
  1250. SYSCALL_DEFINE0(getppid)
  1251. {
  1252. int pid;
  1253. rcu_read_lock();
  1254. pid = task_tgid_vnr(rcu_dereference(current->real_parent));
  1255. rcu_read_unlock();
  1256. return pid;
  1257. }
  1258. SYSCALL_DEFINE0(getuid)
  1259. {
  1260. /* Only we change this so SMP safe */
  1261. return current_uid();
  1262. }
  1263. SYSCALL_DEFINE0(geteuid)
  1264. {
  1265. /* Only we change this so SMP safe */
  1266. return current_euid();
  1267. }
  1268. SYSCALL_DEFINE0(getgid)
  1269. {
  1270. /* Only we change this so SMP safe */
  1271. return current_gid();
  1272. }
  1273. SYSCALL_DEFINE0(getegid)
  1274. {
  1275. /* Only we change this so SMP safe */
  1276. return current_egid();
  1277. }
  1278. #endif
  1279. static void process_timeout(unsigned long __data)
  1280. {
  1281. wake_up_process((struct task_struct *)__data);
  1282. }
  1283. /**
  1284. * schedule_timeout - sleep until timeout
  1285. * @timeout: timeout value in jiffies
  1286. *
  1287. * Make the current task sleep until @timeout jiffies have
  1288. * elapsed. The routine will return immediately unless
  1289. * the current task state has been set (see set_current_state()).
  1290. *
  1291. * You can set the task state as follows -
  1292. *
  1293. * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
  1294. * pass before the routine returns. The routine will return 0
  1295. *
  1296. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1297. * delivered to the current task. In this case the remaining time
  1298. * in jiffies will be returned, or 0 if the timer expired in time
  1299. *
  1300. * The current task state is guaranteed to be TASK_RUNNING when this
  1301. * routine returns.
  1302. *
  1303. * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
  1304. * the CPU away without a bound on the timeout. In this case the return
  1305. * value will be %MAX_SCHEDULE_TIMEOUT.
  1306. *
  1307. * In all cases the return value is guaranteed to be non-negative.
  1308. */
  1309. signed long __sched schedule_timeout(signed long timeout)
  1310. {
  1311. struct timer_list timer;
  1312. unsigned long expire;
  1313. switch (timeout)
  1314. {
  1315. case MAX_SCHEDULE_TIMEOUT:
  1316. /*
  1317. * These two special cases are useful to be comfortable
  1318. * in the caller. Nothing more. We could take
  1319. * MAX_SCHEDULE_TIMEOUT from one of the negative value
  1320. * but I' d like to return a valid offset (>=0) to allow
  1321. * the caller to do everything it want with the retval.
  1322. */
  1323. schedule();
  1324. goto out;
  1325. default:
  1326. /*
  1327. * Another bit of PARANOID. Note that the retval will be
  1328. * 0 since no piece of kernel is supposed to do a check
  1329. * for a negative retval of schedule_timeout() (since it
  1330. * should never happens anyway). You just have the printk()
  1331. * that will tell you if something is gone wrong and where.
  1332. */
  1333. if (timeout < 0) {
  1334. printk(KERN_ERR "schedule_timeout: wrong timeout "
  1335. "value %lx\n", timeout);
  1336. dump_stack();
  1337. current->state = TASK_RUNNING;
  1338. goto out;
  1339. }
  1340. }
  1341. expire = timeout + jiffies;
  1342. setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
  1343. __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
  1344. schedule();
  1345. del_singleshot_timer_sync(&timer);
  1346. /* Remove the timer from the object tracker */
  1347. destroy_timer_on_stack(&timer);
  1348. timeout = expire - jiffies;
  1349. out:
  1350. return timeout < 0 ? 0 : timeout;
  1351. }
  1352. EXPORT_SYMBOL(schedule_timeout);
  1353. /*
  1354. * We can use __set_current_state() here because schedule_timeout() calls
  1355. * schedule() unconditionally.
  1356. */
  1357. signed long __sched schedule_timeout_interruptible(signed long timeout)
  1358. {
  1359. __set_current_state(TASK_INTERRUPTIBLE);
  1360. return schedule_timeout(timeout);
  1361. }
  1362. EXPORT_SYMBOL(schedule_timeout_interruptible);
  1363. signed long __sched schedule_timeout_killable(signed long timeout)
  1364. {
  1365. __set_current_state(TASK_KILLABLE);
  1366. return schedule_timeout(timeout);
  1367. }
  1368. EXPORT_SYMBOL(schedule_timeout_killable);
  1369. signed long __sched schedule_timeout_uninterruptible(signed long timeout)
  1370. {
  1371. __set_current_state(TASK_UNINTERRUPTIBLE);
  1372. return schedule_timeout(timeout);
  1373. }
  1374. EXPORT_SYMBOL(schedule_timeout_uninterruptible);
  1375. /* Thread ID - the internal kernel "pid" */
  1376. SYSCALL_DEFINE0(gettid)
  1377. {
  1378. return task_pid_vnr(current);
  1379. }
  1380. /**
  1381. * do_sysinfo - fill in sysinfo struct
  1382. * @info: pointer to buffer to fill
  1383. */
  1384. int do_sysinfo(struct sysinfo *info)
  1385. {
  1386. unsigned long mem_total, sav_total;
  1387. unsigned int mem_unit, bitcount;
  1388. struct timespec tp;
  1389. memset(info, 0, sizeof(struct sysinfo));
  1390. ktime_get_ts(&tp);
  1391. monotonic_to_bootbased(&tp);
  1392. info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
  1393. get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
  1394. info->procs = nr_threads;
  1395. si_meminfo(info);
  1396. si_swapinfo(info);
  1397. /*
  1398. * If the sum of all the available memory (i.e. ram + swap)
  1399. * is less than can be stored in a 32 bit unsigned long then
  1400. * we can be binary compatible with 2.2.x kernels. If not,
  1401. * well, in that case 2.2.x was broken anyways...
  1402. *
  1403. * -Erik Andersen <andersee@debian.org>
  1404. */
  1405. mem_total = info->totalram + info->totalswap;
  1406. if (mem_total < info->totalram || mem_total < info->totalswap)
  1407. goto out;
  1408. bitcount = 0;
  1409. mem_unit = info->mem_unit;
  1410. while (mem_unit > 1) {
  1411. bitcount++;
  1412. mem_unit >>= 1;
  1413. sav_total = mem_total;
  1414. mem_total <<= 1;
  1415. if (mem_total < sav_total)
  1416. goto out;
  1417. }
  1418. /*
  1419. * If mem_total did not overflow, multiply all memory values by
  1420. * info->mem_unit and set it to 1. This leaves things compatible
  1421. * with 2.2.x, and also retains compatibility with earlier 2.4.x
  1422. * kernels...
  1423. */
  1424. info->mem_unit = 1;
  1425. info->totalram <<= bitcount;
  1426. info->freeram <<= bitcount;
  1427. info->sharedram <<= bitcount;
  1428. info->bufferram <<= bitcount;
  1429. info->totalswap <<= bitcount;
  1430. info->freeswap <<= bitcount;
  1431. info->totalhigh <<= bitcount;
  1432. info->freehigh <<= bitcount;
  1433. out:
  1434. return 0;
  1435. }
  1436. SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
  1437. {
  1438. struct sysinfo val;
  1439. do_sysinfo(&val);
  1440. if (copy_to_user(info, &val, sizeof(struct sysinfo)))
  1441. return -EFAULT;
  1442. return 0;
  1443. }
  1444. static int __cpuinit init_timers_cpu(int cpu)
  1445. {
  1446. int j;
  1447. struct tvec_base *base;
  1448. static char __cpuinitdata tvec_base_done[NR_CPUS];
  1449. if (!tvec_base_done[cpu]) {
  1450. static char boot_done;
  1451. if (boot_done) {
  1452. /*
  1453. * The APs use this path later in boot
  1454. */
  1455. base = kmalloc_node(sizeof(*base),
  1456. GFP_KERNEL | __GFP_ZERO,
  1457. cpu_to_node(cpu));
  1458. if (!base)
  1459. return -ENOMEM;
  1460. /* Make sure that tvec_base is 2 byte aligned */
  1461. if (tbase_get_deferrable(base)) {
  1462. WARN_ON(1);
  1463. kfree(base);
  1464. return -ENOMEM;
  1465. }
  1466. per_cpu(tvec_bases, cpu) = base;
  1467. } else {
  1468. /*
  1469. * This is for the boot CPU - we use compile-time
  1470. * static initialisation because per-cpu memory isn't
  1471. * ready yet and because the memory allocators are not
  1472. * initialised either.
  1473. */
  1474. boot_done = 1;
  1475. base = &boot_tvec_bases;
  1476. }
  1477. tvec_base_done[cpu] = 1;
  1478. } else {
  1479. base = per_cpu(tvec_bases, cpu);
  1480. }
  1481. spin_lock_init(&base->lock);
  1482. for (j = 0; j < TVN_SIZE; j++) {
  1483. INIT_LIST_HEAD(base->tv5.vec + j);
  1484. INIT_LIST_HEAD(base->tv4.vec + j);
  1485. INIT_LIST_HEAD(base->tv3.vec + j);
  1486. INIT_LIST_HEAD(base->tv2.vec + j);
  1487. }
  1488. for (j = 0; j < TVR_SIZE; j++)
  1489. INIT_LIST_HEAD(base->tv1.vec + j);
  1490. base->timer_jiffies = jiffies;
  1491. base->next_timer = base->timer_jiffies;
  1492. return 0;
  1493. }
  1494. #ifdef CONFIG_HOTPLUG_CPU
  1495. static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
  1496. {
  1497. struct timer_list *timer;
  1498. while (!list_empty(head)) {
  1499. timer = list_first_entry(head, struct timer_list, entry);
  1500. detach_timer(timer, 0);
  1501. timer_set_base(timer, new_base);
  1502. if (time_before(timer->expires, new_base->next_timer) &&
  1503. !tbase_get_deferrable(timer->base))
  1504. new_base->next_timer = timer->expires;
  1505. internal_add_timer(new_base, timer);
  1506. }
  1507. }
  1508. static void __cpuinit migrate_timers(int cpu)
  1509. {
  1510. struct tvec_base *old_base;
  1511. struct tvec_base *new_base;
  1512. int i;
  1513. BUG_ON(cpu_online(cpu));
  1514. old_base = per_cpu(tvec_bases, cpu);
  1515. new_base = get_cpu_var(tvec_bases);
  1516. /*
  1517. * The caller is globally serialized and nobody else
  1518. * takes two locks at once, deadlock is not possible.
  1519. */
  1520. spin_lock_irq(&new_base->lock);
  1521. spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  1522. BUG_ON(old_base->running_timer);
  1523. for (i = 0; i < TVR_SIZE; i++)
  1524. migrate_timer_list(new_base, old_base->tv1.vec + i);
  1525. for (i = 0; i < TVN_SIZE; i++) {
  1526. migrate_timer_list(new_base, old_base->tv2.vec + i);
  1527. migrate_timer_list(new_base, old_base->tv3.vec + i);
  1528. migrate_timer_list(new_base, old_base->tv4.vec + i);
  1529. migrate_timer_list(new_base, old_base->tv5.vec + i);
  1530. }
  1531. spin_unlock(&old_base->lock);
  1532. spin_unlock_irq(&new_base->lock);
  1533. put_cpu_var(tvec_bases);
  1534. }
  1535. #endif /* CONFIG_HOTPLUG_CPU */
  1536. static int __cpuinit timer_cpu_notify(struct notifier_block *self,
  1537. unsigned long action, void *hcpu)
  1538. {
  1539. long cpu = (long)hcpu;
  1540. int err;
  1541. switch(action) {
  1542. case CPU_UP_PREPARE:
  1543. case CPU_UP_PREPARE_FROZEN:
  1544. err = init_timers_cpu(cpu);
  1545. if (err < 0)
  1546. return notifier_from_errno(err);
  1547. break;
  1548. #ifdef CONFIG_HOTPLUG_CPU
  1549. case CPU_DEAD:
  1550. case CPU_DEAD_FROZEN:
  1551. migrate_timers(cpu);
  1552. break;
  1553. #endif
  1554. default:
  1555. break;
  1556. }
  1557. return NOTIFY_OK;
  1558. }
  1559. static struct notifier_block __cpuinitdata timers_nb = {
  1560. .notifier_call = timer_cpu_notify,
  1561. };
  1562. void __init init_timers(void)
  1563. {
  1564. int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
  1565. (void *)(long)smp_processor_id());
  1566. init_timer_stats();
  1567. BUG_ON(err != NOTIFY_OK);
  1568. register_cpu_notifier(&timers_nb);
  1569. open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
  1570. }
  1571. /**
  1572. * msleep - sleep safely even with waitqueue interruptions
  1573. * @msecs: Time in milliseconds to sleep for
  1574. */
  1575. void msleep(unsigned int msecs)
  1576. {
  1577. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1578. while (timeout)
  1579. timeout = schedule_timeout_uninterruptible(timeout);
  1580. }
  1581. EXPORT_SYMBOL(msleep);
  1582. /**
  1583. * msleep_interruptible - sleep waiting for signals
  1584. * @msecs: Time in milliseconds to sleep for
  1585. */
  1586. unsigned long msleep_interruptible(unsigned int msecs)
  1587. {
  1588. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1589. while (timeout && !signal_pending(current))
  1590. timeout = schedule_timeout_interruptible(timeout);
  1591. return jiffies_to_msecs(timeout);
  1592. }
  1593. EXPORT_SYMBOL(msleep_interruptible);
  1594. static int __sched do_usleep_range(unsigned long min, unsigned long max)
  1595. {
  1596. ktime_t kmin;
  1597. unsigned long delta;
  1598. kmin = ktime_set(0, min * NSEC_PER_USEC);
  1599. delta = (max - min) * NSEC_PER_USEC;
  1600. return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
  1601. }
  1602. /**
  1603. * usleep_range - Drop in replacement for udelay where wakeup is flexible
  1604. * @min: Minimum time in usecs to sleep
  1605. * @max: Maximum time in usecs to sleep
  1606. */
  1607. void usleep_range(unsigned long min, unsigned long max)
  1608. {
  1609. __set_current_state(TASK_UNINTERRUPTIBLE);
  1610. do_usleep_range(min, max);
  1611. }
  1612. EXPORT_SYMBOL(usleep_range);