timer.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. /*
  2. * linux/kernel/timer.c
  3. *
  4. * Kernel internal timers, basic process system calls
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. *
  8. * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
  9. *
  10. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13. * serialize accesses to xtime/lost_ticks).
  14. * Copyright (C) 1998 Andrea Arcangeli
  15. * 1999-03-10 Improved NTP compatibility by Ulrich Windl
  16. * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
  17. * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
  18. * Copyright (C) 2000, 2001, 2002 Ingo Molnar
  19. * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20. */
  21. #include <linux/kernel_stat.h>
  22. #include <linux/export.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/percpu.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/pid_namespace.h>
  29. #include <linux/notifier.h>
  30. #include <linux/thread_info.h>
  31. #include <linux/time.h>
  32. #include <linux/jiffies.h>
  33. #include <linux/posix-timers.h>
  34. #include <linux/cpu.h>
  35. #include <linux/syscalls.h>
  36. #include <linux/delay.h>
  37. #include <linux/tick.h>
  38. #include <linux/kallsyms.h>
  39. #include <linux/irq_work.h>
  40. #include <linux/sched.h>
  41. #include <linux/slab.h>
  42. #include <asm/uaccess.h>
  43. #include <asm/unistd.h>
  44. #include <asm/div64.h>
  45. #include <asm/timex.h>
  46. #include <asm/io.h>
  47. #define CREATE_TRACE_POINTS
  48. #include <trace/events/timer.h>
  49. u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  50. EXPORT_SYMBOL(jiffies_64);
  51. /*
  52. * per-CPU timer vector definitions:
  53. */
  54. #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  55. #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  56. #define TVN_SIZE (1 << TVN_BITS)
  57. #define TVR_SIZE (1 << TVR_BITS)
  58. #define TVN_MASK (TVN_SIZE - 1)
  59. #define TVR_MASK (TVR_SIZE - 1)
  60. #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
  61. struct tvec {
  62. struct list_head vec[TVN_SIZE];
  63. };
  64. struct tvec_root {
  65. struct list_head vec[TVR_SIZE];
  66. };
  67. struct tvec_base {
  68. spinlock_t lock;
  69. struct timer_list *running_timer;
  70. unsigned long timer_jiffies;
  71. unsigned long next_timer;
  72. unsigned long active_timers;
  73. struct tvec_root tv1;
  74. struct tvec tv2;
  75. struct tvec tv3;
  76. struct tvec tv4;
  77. struct tvec tv5;
  78. } ____cacheline_aligned;
  79. struct tvec_base boot_tvec_bases;
  80. EXPORT_SYMBOL(boot_tvec_bases);
  81. static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
  82. /* Functions below help us manage 'deferrable' flag */
  83. static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
  84. {
  85. return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
  86. }
  87. static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
  88. {
  89. return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
  90. }
  91. static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
  92. {
  93. return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
  94. }
  95. static inline void
  96. timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
  97. {
  98. unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
  99. timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
  100. }
  101. static unsigned long round_jiffies_common(unsigned long j, int cpu,
  102. bool force_up)
  103. {
  104. int rem;
  105. unsigned long original = j;
  106. /*
  107. * We don't want all cpus firing their timers at once hitting the
  108. * same lock or cachelines, so we skew each extra cpu with an extra
  109. * 3 jiffies. This 3 jiffies came originally from the mm/ code which
  110. * already did this.
  111. * The skew is done by adding 3*cpunr, then round, then subtract this
  112. * extra offset again.
  113. */
  114. j += cpu * 3;
  115. rem = j % HZ;
  116. /*
  117. * If the target jiffie is just after a whole second (which can happen
  118. * due to delays of the timer irq, long irq off times etc etc) then
  119. * we should round down to the whole second, not up. Use 1/4th second
  120. * as cutoff for this rounding as an extreme upper bound for this.
  121. * But never round down if @force_up is set.
  122. */
  123. if (rem < HZ/4 && !force_up) /* round down */
  124. j = j - rem;
  125. else /* round up */
  126. j = j - rem + HZ;
  127. /* now that we have rounded, subtract the extra skew again */
  128. j -= cpu * 3;
  129. if (j <= jiffies) /* rounding ate our timeout entirely; */
  130. return original;
  131. return j;
  132. }
  133. /**
  134. * __round_jiffies - function to round jiffies to a full second
  135. * @j: the time in (absolute) jiffies that should be rounded
  136. * @cpu: the processor number on which the timeout will happen
  137. *
  138. * __round_jiffies() rounds an absolute time in the future (in jiffies)
  139. * up or down to (approximately) full seconds. This is useful for timers
  140. * for which the exact time they fire does not matter too much, as long as
  141. * they fire approximately every X seconds.
  142. *
  143. * By rounding these timers to whole seconds, all such timers will fire
  144. * at the same time, rather than at various times spread out. The goal
  145. * of this is to have the CPU wake up less, which saves power.
  146. *
  147. * The exact rounding is skewed for each processor to avoid all
  148. * processors firing at the exact same time, which could lead
  149. * to lock contention or spurious cache line bouncing.
  150. *
  151. * The return value is the rounded version of the @j parameter.
  152. */
  153. unsigned long __round_jiffies(unsigned long j, int cpu)
  154. {
  155. return round_jiffies_common(j, cpu, false);
  156. }
  157. EXPORT_SYMBOL_GPL(__round_jiffies);
  158. /**
  159. * __round_jiffies_relative - function to round jiffies to a full second
  160. * @j: the time in (relative) jiffies that should be rounded
  161. * @cpu: the processor number on which the timeout will happen
  162. *
  163. * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
  164. * up or down to (approximately) full seconds. This is useful for timers
  165. * for which the exact time they fire does not matter too much, as long as
  166. * they fire approximately every X seconds.
  167. *
  168. * By rounding these timers to whole seconds, all such timers will fire
  169. * at the same time, rather than at various times spread out. The goal
  170. * of this is to have the CPU wake up less, which saves power.
  171. *
  172. * The exact rounding is skewed for each processor to avoid all
  173. * processors firing at the exact same time, which could lead
  174. * to lock contention or spurious cache line bouncing.
  175. *
  176. * The return value is the rounded version of the @j parameter.
  177. */
  178. unsigned long __round_jiffies_relative(unsigned long j, int cpu)
  179. {
  180. unsigned long j0 = jiffies;
  181. /* Use j0 because jiffies might change while we run */
  182. return round_jiffies_common(j + j0, cpu, false) - j0;
  183. }
  184. EXPORT_SYMBOL_GPL(__round_jiffies_relative);
  185. /**
  186. * round_jiffies - function to round jiffies to a full second
  187. * @j: the time in (absolute) jiffies that should be rounded
  188. *
  189. * round_jiffies() rounds an absolute time in the future (in jiffies)
  190. * up or down to (approximately) full seconds. This is useful for timers
  191. * for which the exact time they fire does not matter too much, as long as
  192. * they fire approximately every X seconds.
  193. *
  194. * By rounding these timers to whole seconds, all such timers will fire
  195. * at the same time, rather than at various times spread out. The goal
  196. * of this is to have the CPU wake up less, which saves power.
  197. *
  198. * The return value is the rounded version of the @j parameter.
  199. */
  200. unsigned long round_jiffies(unsigned long j)
  201. {
  202. return round_jiffies_common(j, raw_smp_processor_id(), false);
  203. }
  204. EXPORT_SYMBOL_GPL(round_jiffies);
  205. /**
  206. * round_jiffies_relative - function to round jiffies to a full second
  207. * @j: the time in (relative) jiffies that should be rounded
  208. *
  209. * round_jiffies_relative() rounds a time delta in the future (in jiffies)
  210. * up or down to (approximately) full seconds. This is useful for timers
  211. * for which the exact time they fire does not matter too much, as long as
  212. * they fire approximately every X seconds.
  213. *
  214. * By rounding these timers to whole seconds, all such timers will fire
  215. * at the same time, rather than at various times spread out. The goal
  216. * of this is to have the CPU wake up less, which saves power.
  217. *
  218. * The return value is the rounded version of the @j parameter.
  219. */
  220. unsigned long round_jiffies_relative(unsigned long j)
  221. {
  222. return __round_jiffies_relative(j, raw_smp_processor_id());
  223. }
  224. EXPORT_SYMBOL_GPL(round_jiffies_relative);
  225. /**
  226. * __round_jiffies_up - function to round jiffies up to a full second
  227. * @j: the time in (absolute) jiffies that should be rounded
  228. * @cpu: the processor number on which the timeout will happen
  229. *
  230. * This is the same as __round_jiffies() except that it will never
  231. * round down. This is useful for timeouts for which the exact time
  232. * of firing does not matter too much, as long as they don't fire too
  233. * early.
  234. */
  235. unsigned long __round_jiffies_up(unsigned long j, int cpu)
  236. {
  237. return round_jiffies_common(j, cpu, true);
  238. }
  239. EXPORT_SYMBOL_GPL(__round_jiffies_up);
  240. /**
  241. * __round_jiffies_up_relative - function to round jiffies up to a full second
  242. * @j: the time in (relative) jiffies that should be rounded
  243. * @cpu: the processor number on which the timeout will happen
  244. *
  245. * This is the same as __round_jiffies_relative() except that it will never
  246. * round down. This is useful for timeouts for which the exact time
  247. * of firing does not matter too much, as long as they don't fire too
  248. * early.
  249. */
  250. unsigned long __round_jiffies_up_relative(unsigned long j, int cpu)
  251. {
  252. unsigned long j0 = jiffies;
  253. /* Use j0 because jiffies might change while we run */
  254. return round_jiffies_common(j + j0, cpu, true) - j0;
  255. }
  256. EXPORT_SYMBOL_GPL(__round_jiffies_up_relative);
  257. /**
  258. * round_jiffies_up - function to round jiffies up to a full second
  259. * @j: the time in (absolute) jiffies that should be rounded
  260. *
  261. * This is the same as round_jiffies() except that it will never
  262. * round down. This is useful for timeouts for which the exact time
  263. * of firing does not matter too much, as long as they don't fire too
  264. * early.
  265. */
  266. unsigned long round_jiffies_up(unsigned long j)
  267. {
  268. return round_jiffies_common(j, raw_smp_processor_id(), true);
  269. }
  270. EXPORT_SYMBOL_GPL(round_jiffies_up);
  271. /**
  272. * round_jiffies_up_relative - function to round jiffies up to a full second
  273. * @j: the time in (relative) jiffies that should be rounded
  274. *
  275. * This is the same as round_jiffies_relative() except that it will never
  276. * round down. This is useful for timeouts for which the exact time
  277. * of firing does not matter too much, as long as they don't fire too
  278. * early.
  279. */
  280. unsigned long round_jiffies_up_relative(unsigned long j)
  281. {
  282. return __round_jiffies_up_relative(j, raw_smp_processor_id());
  283. }
  284. EXPORT_SYMBOL_GPL(round_jiffies_up_relative);
  285. /**
  286. * set_timer_slack - set the allowed slack for a timer
  287. * @timer: the timer to be modified
  288. * @slack_hz: the amount of time (in jiffies) allowed for rounding
  289. *
  290. * Set the amount of time, in jiffies, that a certain timer has
  291. * in terms of slack. By setting this value, the timer subsystem
  292. * will schedule the actual timer somewhere between
  293. * the time mod_timer() asks for, and that time plus the slack.
  294. *
  295. * By setting the slack to -1, a percentage of the delay is used
  296. * instead.
  297. */
  298. void set_timer_slack(struct timer_list *timer, int slack_hz)
  299. {
  300. timer->slack = slack_hz;
  301. }
  302. EXPORT_SYMBOL_GPL(set_timer_slack);
  303. static void
  304. __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
  305. {
  306. unsigned long expires = timer->expires;
  307. unsigned long idx = expires - base->timer_jiffies;
  308. struct list_head *vec;
  309. if (idx < TVR_SIZE) {
  310. int i = expires & TVR_MASK;
  311. vec = base->tv1.vec + i;
  312. } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  313. int i = (expires >> TVR_BITS) & TVN_MASK;
  314. vec = base->tv2.vec + i;
  315. } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  316. int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  317. vec = base->tv3.vec + i;
  318. } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  319. int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  320. vec = base->tv4.vec + i;
  321. } else if ((signed long) idx < 0) {
  322. /*
  323. * Can happen if you add a timer with expires == jiffies,
  324. * or you set a timer to go off in the past
  325. */
  326. vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
  327. } else {
  328. int i;
  329. /* If the timeout is larger than MAX_TVAL (on 64-bit
  330. * architectures or with CONFIG_BASE_SMALL=1) then we
  331. * use the maximum timeout.
  332. */
  333. if (idx > MAX_TVAL) {
  334. idx = MAX_TVAL;
  335. expires = idx + base->timer_jiffies;
  336. }
  337. i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  338. vec = base->tv5.vec + i;
  339. }
  340. /*
  341. * Timers are FIFO:
  342. */
  343. list_add_tail(&timer->entry, vec);
  344. }
  345. static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
  346. {
  347. __internal_add_timer(base, timer);
  348. /*
  349. * Update base->active_timers and base->next_timer
  350. */
  351. if (!tbase_get_deferrable(timer->base)) {
  352. if (time_before(timer->expires, base->next_timer))
  353. base->next_timer = timer->expires;
  354. base->active_timers++;
  355. }
  356. }
  357. #ifdef CONFIG_TIMER_STATS
  358. void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
  359. {
  360. if (timer->start_site)
  361. return;
  362. timer->start_site = addr;
  363. memcpy(timer->start_comm, current->comm, TASK_COMM_LEN);
  364. timer->start_pid = current->pid;
  365. }
  366. static void timer_stats_account_timer(struct timer_list *timer)
  367. {
  368. unsigned int flag = 0;
  369. if (likely(!timer->start_site))
  370. return;
  371. if (unlikely(tbase_get_deferrable(timer->base)))
  372. flag |= TIMER_STATS_FLAG_DEFERRABLE;
  373. timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
  374. timer->function, timer->start_comm, flag);
  375. }
  376. #else
  377. static void timer_stats_account_timer(struct timer_list *timer) {}
  378. #endif
  379. #ifdef CONFIG_DEBUG_OBJECTS_TIMERS
  380. static struct debug_obj_descr timer_debug_descr;
  381. static void *timer_debug_hint(void *addr)
  382. {
  383. return ((struct timer_list *) addr)->function;
  384. }
  385. /*
  386. * fixup_init is called when:
  387. * - an active object is initialized
  388. */
  389. static int timer_fixup_init(void *addr, enum debug_obj_state state)
  390. {
  391. struct timer_list *timer = addr;
  392. switch (state) {
  393. case ODEBUG_STATE_ACTIVE:
  394. del_timer_sync(timer);
  395. debug_object_init(timer, &timer_debug_descr);
  396. return 1;
  397. default:
  398. return 0;
  399. }
  400. }
  401. /* Stub timer callback for improperly used timers. */
  402. static void stub_timer(unsigned long data)
  403. {
  404. WARN_ON(1);
  405. }
  406. /*
  407. * fixup_activate is called when:
  408. * - an active object is activated
  409. * - an unknown object is activated (might be a statically initialized object)
  410. */
  411. static int timer_fixup_activate(void *addr, enum debug_obj_state state)
  412. {
  413. struct timer_list *timer = addr;
  414. switch (state) {
  415. case ODEBUG_STATE_NOTAVAILABLE:
  416. /*
  417. * This is not really a fixup. The timer was
  418. * statically initialized. We just make sure that it
  419. * is tracked in the object tracker.
  420. */
  421. if (timer->entry.next == NULL &&
  422. timer->entry.prev == TIMER_ENTRY_STATIC) {
  423. debug_object_init(timer, &timer_debug_descr);
  424. debug_object_activate(timer, &timer_debug_descr);
  425. return 0;
  426. } else {
  427. setup_timer(timer, stub_timer, 0);
  428. return 1;
  429. }
  430. return 0;
  431. case ODEBUG_STATE_ACTIVE:
  432. WARN_ON(1);
  433. default:
  434. return 0;
  435. }
  436. }
  437. /*
  438. * fixup_free is called when:
  439. * - an active object is freed
  440. */
  441. static int timer_fixup_free(void *addr, enum debug_obj_state state)
  442. {
  443. struct timer_list *timer = addr;
  444. switch (state) {
  445. case ODEBUG_STATE_ACTIVE:
  446. del_timer_sync(timer);
  447. debug_object_free(timer, &timer_debug_descr);
  448. return 1;
  449. default:
  450. return 0;
  451. }
  452. }
  453. /*
  454. * fixup_assert_init is called when:
  455. * - an untracked/uninit-ed object is found
  456. */
  457. static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
  458. {
  459. struct timer_list *timer = addr;
  460. switch (state) {
  461. case ODEBUG_STATE_NOTAVAILABLE:
  462. if (timer->entry.prev == TIMER_ENTRY_STATIC) {
  463. /*
  464. * This is not really a fixup. The timer was
  465. * statically initialized. We just make sure that it
  466. * is tracked in the object tracker.
  467. */
  468. debug_object_init(timer, &timer_debug_descr);
  469. return 0;
  470. } else {
  471. setup_timer(timer, stub_timer, 0);
  472. return 1;
  473. }
  474. default:
  475. return 0;
  476. }
  477. }
  478. static struct debug_obj_descr timer_debug_descr = {
  479. .name = "timer_list",
  480. .debug_hint = timer_debug_hint,
  481. .fixup_init = timer_fixup_init,
  482. .fixup_activate = timer_fixup_activate,
  483. .fixup_free = timer_fixup_free,
  484. .fixup_assert_init = timer_fixup_assert_init,
  485. };
  486. static inline void debug_timer_init(struct timer_list *timer)
  487. {
  488. debug_object_init(timer, &timer_debug_descr);
  489. }
  490. static inline void debug_timer_activate(struct timer_list *timer)
  491. {
  492. debug_object_activate(timer, &timer_debug_descr);
  493. }
  494. static inline void debug_timer_deactivate(struct timer_list *timer)
  495. {
  496. debug_object_deactivate(timer, &timer_debug_descr);
  497. }
  498. static inline void debug_timer_free(struct timer_list *timer)
  499. {
  500. debug_object_free(timer, &timer_debug_descr);
  501. }
  502. static inline void debug_timer_assert_init(struct timer_list *timer)
  503. {
  504. debug_object_assert_init(timer, &timer_debug_descr);
  505. }
  506. static void do_init_timer(struct timer_list *timer, unsigned int flags,
  507. const char *name, struct lock_class_key *key);
  508. void init_timer_on_stack_key(struct timer_list *timer, unsigned int flags,
  509. const char *name, struct lock_class_key *key)
  510. {
  511. debug_object_init_on_stack(timer, &timer_debug_descr);
  512. do_init_timer(timer, flags, name, key);
  513. }
  514. EXPORT_SYMBOL_GPL(init_timer_on_stack_key);
  515. void destroy_timer_on_stack(struct timer_list *timer)
  516. {
  517. debug_object_free(timer, &timer_debug_descr);
  518. }
  519. EXPORT_SYMBOL_GPL(destroy_timer_on_stack);
  520. #else
  521. static inline void debug_timer_init(struct timer_list *timer) { }
  522. static inline void debug_timer_activate(struct timer_list *timer) { }
  523. static inline void debug_timer_deactivate(struct timer_list *timer) { }
  524. static inline void debug_timer_assert_init(struct timer_list *timer) { }
  525. #endif
  526. static inline void debug_init(struct timer_list *timer)
  527. {
  528. debug_timer_init(timer);
  529. trace_timer_init(timer);
  530. }
  531. static inline void
  532. debug_activate(struct timer_list *timer, unsigned long expires)
  533. {
  534. debug_timer_activate(timer);
  535. trace_timer_start(timer, expires);
  536. }
  537. static inline void debug_deactivate(struct timer_list *timer)
  538. {
  539. debug_timer_deactivate(timer);
  540. trace_timer_cancel(timer);
  541. }
  542. static inline void debug_assert_init(struct timer_list *timer)
  543. {
  544. debug_timer_assert_init(timer);
  545. }
  546. static void do_init_timer(struct timer_list *timer, unsigned int flags,
  547. const char *name, struct lock_class_key *key)
  548. {
  549. struct tvec_base *base = __raw_get_cpu_var(tvec_bases);
  550. timer->entry.next = NULL;
  551. timer->base = (void *)((unsigned long)base | flags);
  552. timer->slack = -1;
  553. #ifdef CONFIG_TIMER_STATS
  554. timer->start_site = NULL;
  555. timer->start_pid = -1;
  556. memset(timer->start_comm, 0, TASK_COMM_LEN);
  557. #endif
  558. lockdep_init_map(&timer->lockdep_map, name, key, 0);
  559. }
  560. /**
  561. * init_timer_key - initialize a timer
  562. * @timer: the timer to be initialized
  563. * @flags: timer flags
  564. * @name: name of the timer
  565. * @key: lockdep class key of the fake lock used for tracking timer
  566. * sync lock dependencies
  567. *
  568. * init_timer_key() must be done to a timer prior calling *any* of the
  569. * other timer functions.
  570. */
  571. void init_timer_key(struct timer_list *timer, unsigned int flags,
  572. const char *name, struct lock_class_key *key)
  573. {
  574. debug_init(timer);
  575. do_init_timer(timer, flags, name, key);
  576. }
  577. EXPORT_SYMBOL(init_timer_key);
  578. static inline void detach_timer(struct timer_list *timer, bool clear_pending)
  579. {
  580. struct list_head *entry = &timer->entry;
  581. debug_deactivate(timer);
  582. __list_del(entry->prev, entry->next);
  583. if (clear_pending)
  584. entry->next = NULL;
  585. entry->prev = LIST_POISON2;
  586. }
  587. static inline void
  588. detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
  589. {
  590. detach_timer(timer, true);
  591. if (!tbase_get_deferrable(timer->base))
  592. base->active_timers--;
  593. }
  594. static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
  595. bool clear_pending)
  596. {
  597. if (!timer_pending(timer))
  598. return 0;
  599. detach_timer(timer, clear_pending);
  600. if (!tbase_get_deferrable(timer->base)) {
  601. base->active_timers--;
  602. if (timer->expires == base->next_timer)
  603. base->next_timer = base->timer_jiffies;
  604. }
  605. return 1;
  606. }
  607. /*
  608. * We are using hashed locking: holding per_cpu(tvec_bases).lock
  609. * means that all timers which are tied to this base via timer->base are
  610. * locked, and the base itself is locked too.
  611. *
  612. * So __run_timers/migrate_timers can safely modify all timers which could
  613. * be found on ->tvX lists.
  614. *
  615. * When the timer's base is locked, and the timer removed from list, it is
  616. * possible to set timer->base = NULL and drop the lock: the timer remains
  617. * locked.
  618. */
  619. static struct tvec_base *lock_timer_base(struct timer_list *timer,
  620. unsigned long *flags)
  621. __acquires(timer->base->lock)
  622. {
  623. struct tvec_base *base;
  624. for (;;) {
  625. struct tvec_base *prelock_base = timer->base;
  626. base = tbase_get_base(prelock_base);
  627. if (likely(base != NULL)) {
  628. spin_lock_irqsave(&base->lock, *flags);
  629. if (likely(prelock_base == timer->base))
  630. return base;
  631. /* The timer has migrated to another CPU */
  632. spin_unlock_irqrestore(&base->lock, *flags);
  633. }
  634. cpu_relax();
  635. }
  636. }
  637. static inline int
  638. __mod_timer(struct timer_list *timer, unsigned long expires,
  639. bool pending_only, int pinned)
  640. {
  641. struct tvec_base *base, *new_base;
  642. unsigned long flags;
  643. int ret = 0 , cpu;
  644. timer_stats_timer_set_start_info(timer);
  645. BUG_ON(!timer->function);
  646. base = lock_timer_base(timer, &flags);
  647. ret = detach_if_pending(timer, base, false);
  648. if (!ret && pending_only)
  649. goto out_unlock;
  650. debug_activate(timer, expires);
  651. cpu = smp_processor_id();
  652. #if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
  653. if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
  654. cpu = get_nohz_timer_target();
  655. #endif
  656. new_base = per_cpu(tvec_bases, cpu);
  657. if (base != new_base) {
  658. /*
  659. * We are trying to schedule the timer on the local CPU.
  660. * However we can't change timer's base while it is running,
  661. * otherwise del_timer_sync() can't detect that the timer's
  662. * handler yet has not finished. This also guarantees that
  663. * the timer is serialized wrt itself.
  664. */
  665. if (likely(base->running_timer != timer)) {
  666. /* See the comment in lock_timer_base() */
  667. timer_set_base(timer, NULL);
  668. spin_unlock(&base->lock);
  669. base = new_base;
  670. spin_lock(&base->lock);
  671. timer_set_base(timer, base);
  672. }
  673. }
  674. timer->expires = expires;
  675. internal_add_timer(base, timer);
  676. out_unlock:
  677. spin_unlock_irqrestore(&base->lock, flags);
  678. return ret;
  679. }
  680. /**
  681. * mod_timer_pending - modify a pending timer's timeout
  682. * @timer: the pending timer to be modified
  683. * @expires: new timeout in jiffies
  684. *
  685. * mod_timer_pending() is the same for pending timers as mod_timer(),
  686. * but will not re-activate and modify already deleted timers.
  687. *
  688. * It is useful for unserialized use of timers.
  689. */
  690. int mod_timer_pending(struct timer_list *timer, unsigned long expires)
  691. {
  692. return __mod_timer(timer, expires, true, TIMER_NOT_PINNED);
  693. }
  694. EXPORT_SYMBOL(mod_timer_pending);
  695. /*
  696. * Decide where to put the timer while taking the slack into account
  697. *
  698. * Algorithm:
  699. * 1) calculate the maximum (absolute) time
  700. * 2) calculate the highest bit where the expires and new max are different
  701. * 3) use this bit to make a mask
  702. * 4) use the bitmask to round down the maximum time, so that all last
  703. * bits are zeros
  704. */
  705. static inline
  706. unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
  707. {
  708. unsigned long expires_limit, mask;
  709. int bit;
  710. if (timer->slack >= 0) {
  711. expires_limit = expires + timer->slack;
  712. } else {
  713. long delta = expires - jiffies;
  714. if (delta < 256)
  715. return expires;
  716. expires_limit = expires + delta / 256;
  717. }
  718. mask = expires ^ expires_limit;
  719. if (mask == 0)
  720. return expires;
  721. bit = find_last_bit(&mask, BITS_PER_LONG);
  722. mask = (1 << bit) - 1;
  723. expires_limit = expires_limit & ~(mask);
  724. return expires_limit;
  725. }
  726. /**
  727. * mod_timer - modify a timer's timeout
  728. * @timer: the timer to be modified
  729. * @expires: new timeout in jiffies
  730. *
  731. * mod_timer() is a more efficient way to update the expire field of an
  732. * active timer (if the timer is inactive it will be activated)
  733. *
  734. * mod_timer(timer, expires) is equivalent to:
  735. *
  736. * del_timer(timer); timer->expires = expires; add_timer(timer);
  737. *
  738. * Note that if there are multiple unserialized concurrent users of the
  739. * same timer, then mod_timer() is the only safe way to modify the timeout,
  740. * since add_timer() cannot modify an already running timer.
  741. *
  742. * The function returns whether it has modified a pending timer or not.
  743. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
  744. * active timer returns 1.)
  745. */
  746. int mod_timer(struct timer_list *timer, unsigned long expires)
  747. {
  748. expires = apply_slack(timer, expires);
  749. /*
  750. * This is a common optimization triggered by the
  751. * networking code - if the timer is re-modified
  752. * to be the same thing then just return:
  753. */
  754. if (timer_pending(timer) && timer->expires == expires)
  755. return 1;
  756. return __mod_timer(timer, expires, false, TIMER_NOT_PINNED);
  757. }
  758. EXPORT_SYMBOL(mod_timer);
  759. /**
  760. * mod_timer_pinned - modify a timer's timeout
  761. * @timer: the timer to be modified
  762. * @expires: new timeout in jiffies
  763. *
  764. * mod_timer_pinned() is a way to update the expire field of an
  765. * active timer (if the timer is inactive it will be activated)
  766. * and to ensure that the timer is scheduled on the current CPU.
  767. *
  768. * Note that this does not prevent the timer from being migrated
  769. * when the current CPU goes offline. If this is a problem for
  770. * you, use CPU-hotplug notifiers to handle it correctly, for
  771. * example, cancelling the timer when the corresponding CPU goes
  772. * offline.
  773. *
  774. * mod_timer_pinned(timer, expires) is equivalent to:
  775. *
  776. * del_timer(timer); timer->expires = expires; add_timer(timer);
  777. */
  778. int mod_timer_pinned(struct timer_list *timer, unsigned long expires)
  779. {
  780. if (timer->expires == expires && timer_pending(timer))
  781. return 1;
  782. return __mod_timer(timer, expires, false, TIMER_PINNED);
  783. }
  784. EXPORT_SYMBOL(mod_timer_pinned);
  785. /**
  786. * add_timer - start a timer
  787. * @timer: the timer to be added
  788. *
  789. * The kernel will do a ->function(->data) callback from the
  790. * timer interrupt at the ->expires point in the future. The
  791. * current time is 'jiffies'.
  792. *
  793. * The timer's ->expires, ->function (and if the handler uses it, ->data)
  794. * fields must be set prior calling this function.
  795. *
  796. * Timers with an ->expires field in the past will be executed in the next
  797. * timer tick.
  798. */
  799. void add_timer(struct timer_list *timer)
  800. {
  801. BUG_ON(timer_pending(timer));
  802. mod_timer(timer, timer->expires);
  803. }
  804. EXPORT_SYMBOL(add_timer);
  805. /**
  806. * add_timer_on - start a timer on a particular CPU
  807. * @timer: the timer to be added
  808. * @cpu: the CPU to start it on
  809. *
  810. * This is not very scalable on SMP. Double adds are not possible.
  811. */
  812. void add_timer_on(struct timer_list *timer, int cpu)
  813. {
  814. struct tvec_base *base = per_cpu(tvec_bases, cpu);
  815. unsigned long flags;
  816. timer_stats_timer_set_start_info(timer);
  817. BUG_ON(timer_pending(timer) || !timer->function);
  818. spin_lock_irqsave(&base->lock, flags);
  819. timer_set_base(timer, base);
  820. debug_activate(timer, timer->expires);
  821. internal_add_timer(base, timer);
  822. /*
  823. * Check whether the other CPU is idle and needs to be
  824. * triggered to reevaluate the timer wheel when nohz is
  825. * active. We are protected against the other CPU fiddling
  826. * with the timer by holding the timer base lock. This also
  827. * makes sure that a CPU on the way to idle can not evaluate
  828. * the timer wheel.
  829. */
  830. wake_up_idle_cpu(cpu);
  831. spin_unlock_irqrestore(&base->lock, flags);
  832. }
  833. EXPORT_SYMBOL_GPL(add_timer_on);
  834. /**
  835. * del_timer - deactive a timer.
  836. * @timer: the timer to be deactivated
  837. *
  838. * del_timer() deactivates a timer - this works on both active and inactive
  839. * timers.
  840. *
  841. * The function returns whether it has deactivated a pending timer or not.
  842. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
  843. * active timer returns 1.)
  844. */
  845. int del_timer(struct timer_list *timer)
  846. {
  847. struct tvec_base *base;
  848. unsigned long flags;
  849. int ret = 0;
  850. debug_assert_init(timer);
  851. timer_stats_timer_clear_start_info(timer);
  852. if (timer_pending(timer)) {
  853. base = lock_timer_base(timer, &flags);
  854. ret = detach_if_pending(timer, base, true);
  855. spin_unlock_irqrestore(&base->lock, flags);
  856. }
  857. return ret;
  858. }
  859. EXPORT_SYMBOL(del_timer);
  860. /**
  861. * try_to_del_timer_sync - Try to deactivate a timer
  862. * @timer: timer do del
  863. *
  864. * This function tries to deactivate a timer. Upon successful (ret >= 0)
  865. * exit the timer is not queued and the handler is not running on any CPU.
  866. */
  867. int try_to_del_timer_sync(struct timer_list *timer)
  868. {
  869. struct tvec_base *base;
  870. unsigned long flags;
  871. int ret = -1;
  872. debug_assert_init(timer);
  873. base = lock_timer_base(timer, &flags);
  874. if (base->running_timer != timer) {
  875. timer_stats_timer_clear_start_info(timer);
  876. ret = detach_if_pending(timer, base, true);
  877. }
  878. spin_unlock_irqrestore(&base->lock, flags);
  879. return ret;
  880. }
  881. EXPORT_SYMBOL(try_to_del_timer_sync);
  882. #ifdef CONFIG_SMP
  883. /**
  884. * del_timer_sync - deactivate a timer and wait for the handler to finish.
  885. * @timer: the timer to be deactivated
  886. *
  887. * This function only differs from del_timer() on SMP: besides deactivating
  888. * the timer it also makes sure the handler has finished executing on other
  889. * CPUs.
  890. *
  891. * Synchronization rules: Callers must prevent restarting of the timer,
  892. * otherwise this function is meaningless. It must not be called from
  893. * interrupt contexts unless the timer is an irqsafe one. The caller must
  894. * not hold locks which would prevent completion of the timer's
  895. * handler. The timer's handler must not call add_timer_on(). Upon exit the
  896. * timer is not queued and the handler is not running on any CPU.
  897. *
  898. * Note: For !irqsafe timers, you must not hold locks that are held in
  899. * interrupt context while calling this function. Even if the lock has
  900. * nothing to do with the timer in question. Here's why:
  901. *
  902. * CPU0 CPU1
  903. * ---- ----
  904. * <SOFTIRQ>
  905. * call_timer_fn();
  906. * base->running_timer = mytimer;
  907. * spin_lock_irq(somelock);
  908. * <IRQ>
  909. * spin_lock(somelock);
  910. * del_timer_sync(mytimer);
  911. * while (base->running_timer == mytimer);
  912. *
  913. * Now del_timer_sync() will never return and never release somelock.
  914. * The interrupt on the other CPU is waiting to grab somelock but
  915. * it has interrupted the softirq that CPU0 is waiting to finish.
  916. *
  917. * The function returns whether it has deactivated a pending timer or not.
  918. */
  919. int del_timer_sync(struct timer_list *timer)
  920. {
  921. #ifdef CONFIG_LOCKDEP
  922. unsigned long flags;
  923. /*
  924. * If lockdep gives a backtrace here, please reference
  925. * the synchronization rules above.
  926. */
  927. local_irq_save(flags);
  928. lock_map_acquire(&timer->lockdep_map);
  929. lock_map_release(&timer->lockdep_map);
  930. local_irq_restore(flags);
  931. #endif
  932. /*
  933. * don't use it in hardirq context, because it
  934. * could lead to deadlock.
  935. */
  936. WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
  937. for (;;) {
  938. int ret = try_to_del_timer_sync(timer);
  939. if (ret >= 0)
  940. return ret;
  941. cpu_relax();
  942. }
  943. }
  944. EXPORT_SYMBOL(del_timer_sync);
  945. #endif
  946. static int cascade(struct tvec_base *base, struct tvec *tv, int index)
  947. {
  948. /* cascade all the timers from tv up one level */
  949. struct timer_list *timer, *tmp;
  950. struct list_head tv_list;
  951. list_replace_init(tv->vec + index, &tv_list);
  952. /*
  953. * We are removing _all_ timers from the list, so we
  954. * don't have to detach them individually.
  955. */
  956. list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
  957. BUG_ON(tbase_get_base(timer->base) != base);
  958. /* No accounting, while moving them */
  959. __internal_add_timer(base, timer);
  960. }
  961. return index;
  962. }
  963. static void call_timer_fn(struct timer_list *timer, void (*fn)(unsigned long),
  964. unsigned long data)
  965. {
  966. int preempt_count = preempt_count();
  967. #ifdef CONFIG_LOCKDEP
  968. /*
  969. * It is permissible to free the timer from inside the
  970. * function that is called from it, this we need to take into
  971. * account for lockdep too. To avoid bogus "held lock freed"
  972. * warnings as well as problems when looking into
  973. * timer->lockdep_map, make a copy and use that here.
  974. */
  975. struct lockdep_map lockdep_map;
  976. lockdep_copy_map(&lockdep_map, &timer->lockdep_map);
  977. #endif
  978. /*
  979. * Couple the lock chain with the lock chain at
  980. * del_timer_sync() by acquiring the lock_map around the fn()
  981. * call here and in del_timer_sync().
  982. */
  983. lock_map_acquire(&lockdep_map);
  984. trace_timer_expire_entry(timer);
  985. fn(data);
  986. trace_timer_expire_exit(timer);
  987. lock_map_release(&lockdep_map);
  988. if (preempt_count != preempt_count()) {
  989. WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n",
  990. fn, preempt_count, preempt_count());
  991. /*
  992. * Restore the preempt count. That gives us a decent
  993. * chance to survive and extract information. If the
  994. * callback kept a lock held, bad luck, but not worse
  995. * than the BUG() we had.
  996. */
  997. preempt_count() = preempt_count;
  998. }
  999. }
  1000. #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
  1001. /**
  1002. * __run_timers - run all expired timers (if any) on this CPU.
  1003. * @base: the timer vector to be processed.
  1004. *
  1005. * This function cascades all vectors and executes all expired timer
  1006. * vectors.
  1007. */
  1008. static inline void __run_timers(struct tvec_base *base)
  1009. {
  1010. struct timer_list *timer;
  1011. spin_lock_irq(&base->lock);
  1012. while (time_after_eq(jiffies, base->timer_jiffies)) {
  1013. struct list_head work_list;
  1014. struct list_head *head = &work_list;
  1015. int index = base->timer_jiffies & TVR_MASK;
  1016. /*
  1017. * Cascade timers:
  1018. */
  1019. if (!index &&
  1020. (!cascade(base, &base->tv2, INDEX(0))) &&
  1021. (!cascade(base, &base->tv3, INDEX(1))) &&
  1022. !cascade(base, &base->tv4, INDEX(2)))
  1023. cascade(base, &base->tv5, INDEX(3));
  1024. ++base->timer_jiffies;
  1025. list_replace_init(base->tv1.vec + index, &work_list);
  1026. while (!list_empty(head)) {
  1027. void (*fn)(unsigned long);
  1028. unsigned long data;
  1029. bool irqsafe;
  1030. timer = list_first_entry(head, struct timer_list,entry);
  1031. fn = timer->function;
  1032. data = timer->data;
  1033. irqsafe = tbase_get_irqsafe(timer->base);
  1034. timer_stats_account_timer(timer);
  1035. base->running_timer = timer;
  1036. detach_expired_timer(timer, base);
  1037. if (irqsafe) {
  1038. spin_unlock(&base->lock);
  1039. call_timer_fn(timer, fn, data);
  1040. spin_lock(&base->lock);
  1041. } else {
  1042. spin_unlock_irq(&base->lock);
  1043. call_timer_fn(timer, fn, data);
  1044. spin_lock_irq(&base->lock);
  1045. }
  1046. }
  1047. }
  1048. base->running_timer = NULL;
  1049. spin_unlock_irq(&base->lock);
  1050. }
  1051. #ifdef CONFIG_NO_HZ
  1052. /*
  1053. * Find out when the next timer event is due to happen. This
  1054. * is used on S/390 to stop all activity when a CPU is idle.
  1055. * This function needs to be called with interrupts disabled.
  1056. */
  1057. static unsigned long __next_timer_interrupt(struct tvec_base *base)
  1058. {
  1059. unsigned long timer_jiffies = base->timer_jiffies;
  1060. unsigned long expires = timer_jiffies + NEXT_TIMER_MAX_DELTA;
  1061. int index, slot, array, found = 0;
  1062. struct timer_list *nte;
  1063. struct tvec *varray[4];
  1064. /* Look for timer events in tv1. */
  1065. index = slot = timer_jiffies & TVR_MASK;
  1066. do {
  1067. list_for_each_entry(nte, base->tv1.vec + slot, entry) {
  1068. if (tbase_get_deferrable(nte->base))
  1069. continue;
  1070. found = 1;
  1071. expires = nte->expires;
  1072. /* Look at the cascade bucket(s)? */
  1073. if (!index || slot < index)
  1074. goto cascade;
  1075. return expires;
  1076. }
  1077. slot = (slot + 1) & TVR_MASK;
  1078. } while (slot != index);
  1079. cascade:
  1080. /* Calculate the next cascade event */
  1081. if (index)
  1082. timer_jiffies += TVR_SIZE - index;
  1083. timer_jiffies >>= TVR_BITS;
  1084. /* Check tv2-tv5. */
  1085. varray[0] = &base->tv2;
  1086. varray[1] = &base->tv3;
  1087. varray[2] = &base->tv4;
  1088. varray[3] = &base->tv5;
  1089. for (array = 0; array < 4; array++) {
  1090. struct tvec *varp = varray[array];
  1091. index = slot = timer_jiffies & TVN_MASK;
  1092. do {
  1093. list_for_each_entry(nte, varp->vec + slot, entry) {
  1094. if (tbase_get_deferrable(nte->base))
  1095. continue;
  1096. found = 1;
  1097. if (time_before(nte->expires, expires))
  1098. expires = nte->expires;
  1099. }
  1100. /*
  1101. * Do we still search for the first timer or are
  1102. * we looking up the cascade buckets ?
  1103. */
  1104. if (found) {
  1105. /* Look at the cascade bucket(s)? */
  1106. if (!index || slot < index)
  1107. break;
  1108. return expires;
  1109. }
  1110. slot = (slot + 1) & TVN_MASK;
  1111. } while (slot != index);
  1112. if (index)
  1113. timer_jiffies += TVN_SIZE - index;
  1114. timer_jiffies >>= TVN_BITS;
  1115. }
  1116. return expires;
  1117. }
  1118. /*
  1119. * Check, if the next hrtimer event is before the next timer wheel
  1120. * event:
  1121. */
  1122. static unsigned long cmp_next_hrtimer_event(unsigned long now,
  1123. unsigned long expires)
  1124. {
  1125. ktime_t hr_delta = hrtimer_get_next_event();
  1126. struct timespec tsdelta;
  1127. unsigned long delta;
  1128. if (hr_delta.tv64 == KTIME_MAX)
  1129. return expires;
  1130. /*
  1131. * Expired timer available, let it expire in the next tick
  1132. */
  1133. if (hr_delta.tv64 <= 0)
  1134. return now + 1;
  1135. tsdelta = ktime_to_timespec(hr_delta);
  1136. delta = timespec_to_jiffies(&tsdelta);
  1137. /*
  1138. * Limit the delta to the max value, which is checked in
  1139. * tick_nohz_stop_sched_tick():
  1140. */
  1141. if (delta > NEXT_TIMER_MAX_DELTA)
  1142. delta = NEXT_TIMER_MAX_DELTA;
  1143. /*
  1144. * Take rounding errors in to account and make sure, that it
  1145. * expires in the next tick. Otherwise we go into an endless
  1146. * ping pong due to tick_nohz_stop_sched_tick() retriggering
  1147. * the timer softirq
  1148. */
  1149. if (delta < 1)
  1150. delta = 1;
  1151. now += delta;
  1152. if (time_before(now, expires))
  1153. return now;
  1154. return expires;
  1155. }
  1156. /**
  1157. * get_next_timer_interrupt - return the jiffy of the next pending timer
  1158. * @now: current time (in jiffies)
  1159. */
  1160. unsigned long get_next_timer_interrupt(unsigned long now)
  1161. {
  1162. struct tvec_base *base = __this_cpu_read(tvec_bases);
  1163. unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
  1164. /*
  1165. * Pretend that there is no timer pending if the cpu is offline.
  1166. * Possible pending timers will be migrated later to an active cpu.
  1167. */
  1168. if (cpu_is_offline(smp_processor_id()))
  1169. return expires;
  1170. spin_lock(&base->lock);
  1171. if (base->active_timers) {
  1172. if (time_before_eq(base->next_timer, base->timer_jiffies))
  1173. base->next_timer = __next_timer_interrupt(base);
  1174. expires = base->next_timer;
  1175. }
  1176. spin_unlock(&base->lock);
  1177. if (time_before_eq(expires, now))
  1178. return now;
  1179. return cmp_next_hrtimer_event(now, expires);
  1180. }
  1181. #endif
  1182. /*
  1183. * Called from the timer interrupt handler to charge one tick to the current
  1184. * process. user_tick is 1 if the tick is user time, 0 for system.
  1185. */
  1186. void update_process_times(int user_tick)
  1187. {
  1188. struct task_struct *p = current;
  1189. int cpu = smp_processor_id();
  1190. /* Note: this timer irq context must be accounted for as well. */
  1191. account_process_tick(p, user_tick);
  1192. run_local_timers();
  1193. rcu_check_callbacks(cpu, user_tick);
  1194. #ifdef CONFIG_IRQ_WORK
  1195. if (in_irq())
  1196. irq_work_run();
  1197. #endif
  1198. scheduler_tick();
  1199. run_posix_cpu_timers(p);
  1200. }
  1201. /*
  1202. * This function runs timers and the timer-tq in bottom half context.
  1203. */
  1204. static void run_timer_softirq(struct softirq_action *h)
  1205. {
  1206. struct tvec_base *base = __this_cpu_read(tvec_bases);
  1207. hrtimer_run_pending();
  1208. if (time_after_eq(jiffies, base->timer_jiffies))
  1209. __run_timers(base);
  1210. }
  1211. /*
  1212. * Called by the local, per-CPU timer interrupt on SMP.
  1213. */
  1214. void run_local_timers(void)
  1215. {
  1216. hrtimer_run_queues();
  1217. raise_softirq(TIMER_SOFTIRQ);
  1218. }
  1219. #ifdef __ARCH_WANT_SYS_ALARM
  1220. /*
  1221. * For backwards compatibility? This can be done in libc so Alpha
  1222. * and all newer ports shouldn't need it.
  1223. */
  1224. SYSCALL_DEFINE1(alarm, unsigned int, seconds)
  1225. {
  1226. return alarm_setitimer(seconds);
  1227. }
  1228. #endif
  1229. /**
  1230. * sys_getpid - return the thread group id of the current process
  1231. *
  1232. * Note, despite the name, this returns the tgid not the pid. The tgid and
  1233. * the pid are identical unless CLONE_THREAD was specified on clone() in
  1234. * which case the tgid is the same in all threads of the same group.
  1235. *
  1236. * This is SMP safe as current->tgid does not change.
  1237. */
  1238. SYSCALL_DEFINE0(getpid)
  1239. {
  1240. return task_tgid_vnr(current);
  1241. }
  1242. /*
  1243. * Accessing ->real_parent is not SMP-safe, it could
  1244. * change from under us. However, we can use a stale
  1245. * value of ->real_parent under rcu_read_lock(), see
  1246. * release_task()->call_rcu(delayed_put_task_struct).
  1247. */
  1248. SYSCALL_DEFINE0(getppid)
  1249. {
  1250. int pid;
  1251. rcu_read_lock();
  1252. pid = task_tgid_vnr(rcu_dereference(current->real_parent));
  1253. rcu_read_unlock();
  1254. return pid;
  1255. }
  1256. SYSCALL_DEFINE0(getuid)
  1257. {
  1258. /* Only we change this so SMP safe */
  1259. return from_kuid_munged(current_user_ns(), current_uid());
  1260. }
  1261. SYSCALL_DEFINE0(geteuid)
  1262. {
  1263. /* Only we change this so SMP safe */
  1264. return from_kuid_munged(current_user_ns(), current_euid());
  1265. }
  1266. SYSCALL_DEFINE0(getgid)
  1267. {
  1268. /* Only we change this so SMP safe */
  1269. return from_kgid_munged(current_user_ns(), current_gid());
  1270. }
  1271. SYSCALL_DEFINE0(getegid)
  1272. {
  1273. /* Only we change this so SMP safe */
  1274. return from_kgid_munged(current_user_ns(), current_egid());
  1275. }
  1276. static void process_timeout(unsigned long __data)
  1277. {
  1278. wake_up_process((struct task_struct *)__data);
  1279. }
  1280. /**
  1281. * schedule_timeout - sleep until timeout
  1282. * @timeout: timeout value in jiffies
  1283. *
  1284. * Make the current task sleep until @timeout jiffies have
  1285. * elapsed. The routine will return immediately unless
  1286. * the current task state has been set (see set_current_state()).
  1287. *
  1288. * You can set the task state as follows -
  1289. *
  1290. * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
  1291. * pass before the routine returns. The routine will return 0
  1292. *
  1293. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1294. * delivered to the current task. In this case the remaining time
  1295. * in jiffies will be returned, or 0 if the timer expired in time
  1296. *
  1297. * The current task state is guaranteed to be TASK_RUNNING when this
  1298. * routine returns.
  1299. *
  1300. * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
  1301. * the CPU away without a bound on the timeout. In this case the return
  1302. * value will be %MAX_SCHEDULE_TIMEOUT.
  1303. *
  1304. * In all cases the return value is guaranteed to be non-negative.
  1305. */
  1306. signed long __sched schedule_timeout(signed long timeout)
  1307. {
  1308. struct timer_list timer;
  1309. unsigned long expire;
  1310. switch (timeout)
  1311. {
  1312. case MAX_SCHEDULE_TIMEOUT:
  1313. /*
  1314. * These two special cases are useful to be comfortable
  1315. * in the caller. Nothing more. We could take
  1316. * MAX_SCHEDULE_TIMEOUT from one of the negative value
  1317. * but I' d like to return a valid offset (>=0) to allow
  1318. * the caller to do everything it want with the retval.
  1319. */
  1320. schedule();
  1321. goto out;
  1322. default:
  1323. /*
  1324. * Another bit of PARANOID. Note that the retval will be
  1325. * 0 since no piece of kernel is supposed to do a check
  1326. * for a negative retval of schedule_timeout() (since it
  1327. * should never happens anyway). You just have the printk()
  1328. * that will tell you if something is gone wrong and where.
  1329. */
  1330. if (timeout < 0) {
  1331. printk(KERN_ERR "schedule_timeout: wrong timeout "
  1332. "value %lx\n", timeout);
  1333. dump_stack();
  1334. current->state = TASK_RUNNING;
  1335. goto out;
  1336. }
  1337. }
  1338. expire = timeout + jiffies;
  1339. setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
  1340. __mod_timer(&timer, expire, false, TIMER_NOT_PINNED);
  1341. schedule();
  1342. del_singleshot_timer_sync(&timer);
  1343. /* Remove the timer from the object tracker */
  1344. destroy_timer_on_stack(&timer);
  1345. timeout = expire - jiffies;
  1346. out:
  1347. return timeout < 0 ? 0 : timeout;
  1348. }
  1349. EXPORT_SYMBOL(schedule_timeout);
  1350. /*
  1351. * We can use __set_current_state() here because schedule_timeout() calls
  1352. * schedule() unconditionally.
  1353. */
  1354. signed long __sched schedule_timeout_interruptible(signed long timeout)
  1355. {
  1356. __set_current_state(TASK_INTERRUPTIBLE);
  1357. return schedule_timeout(timeout);
  1358. }
  1359. EXPORT_SYMBOL(schedule_timeout_interruptible);
  1360. signed long __sched schedule_timeout_killable(signed long timeout)
  1361. {
  1362. __set_current_state(TASK_KILLABLE);
  1363. return schedule_timeout(timeout);
  1364. }
  1365. EXPORT_SYMBOL(schedule_timeout_killable);
  1366. signed long __sched schedule_timeout_uninterruptible(signed long timeout)
  1367. {
  1368. __set_current_state(TASK_UNINTERRUPTIBLE);
  1369. return schedule_timeout(timeout);
  1370. }
  1371. EXPORT_SYMBOL(schedule_timeout_uninterruptible);
  1372. /* Thread ID - the internal kernel "pid" */
  1373. SYSCALL_DEFINE0(gettid)
  1374. {
  1375. return task_pid_vnr(current);
  1376. }
  1377. /**
  1378. * do_sysinfo - fill in sysinfo struct
  1379. * @info: pointer to buffer to fill
  1380. */
  1381. int do_sysinfo(struct sysinfo *info)
  1382. {
  1383. unsigned long mem_total, sav_total;
  1384. unsigned int mem_unit, bitcount;
  1385. struct timespec tp;
  1386. memset(info, 0, sizeof(struct sysinfo));
  1387. ktime_get_ts(&tp);
  1388. monotonic_to_bootbased(&tp);
  1389. info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
  1390. get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
  1391. info->procs = nr_threads;
  1392. si_meminfo(info);
  1393. si_swapinfo(info);
  1394. /*
  1395. * If the sum of all the available memory (i.e. ram + swap)
  1396. * is less than can be stored in a 32 bit unsigned long then
  1397. * we can be binary compatible with 2.2.x kernels. If not,
  1398. * well, in that case 2.2.x was broken anyways...
  1399. *
  1400. * -Erik Andersen <andersee@debian.org>
  1401. */
  1402. mem_total = info->totalram + info->totalswap;
  1403. if (mem_total < info->totalram || mem_total < info->totalswap)
  1404. goto out;
  1405. bitcount = 0;
  1406. mem_unit = info->mem_unit;
  1407. while (mem_unit > 1) {
  1408. bitcount++;
  1409. mem_unit >>= 1;
  1410. sav_total = mem_total;
  1411. mem_total <<= 1;
  1412. if (mem_total < sav_total)
  1413. goto out;
  1414. }
  1415. /*
  1416. * If mem_total did not overflow, multiply all memory values by
  1417. * info->mem_unit and set it to 1. This leaves things compatible
  1418. * with 2.2.x, and also retains compatibility with earlier 2.4.x
  1419. * kernels...
  1420. */
  1421. info->mem_unit = 1;
  1422. info->totalram <<= bitcount;
  1423. info->freeram <<= bitcount;
  1424. info->sharedram <<= bitcount;
  1425. info->bufferram <<= bitcount;
  1426. info->totalswap <<= bitcount;
  1427. info->freeswap <<= bitcount;
  1428. info->totalhigh <<= bitcount;
  1429. info->freehigh <<= bitcount;
  1430. out:
  1431. return 0;
  1432. }
  1433. SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
  1434. {
  1435. struct sysinfo val;
  1436. do_sysinfo(&val);
  1437. if (copy_to_user(info, &val, sizeof(struct sysinfo)))
  1438. return -EFAULT;
  1439. return 0;
  1440. }
  1441. static int __cpuinit init_timers_cpu(int cpu)
  1442. {
  1443. int j;
  1444. struct tvec_base *base;
  1445. static char __cpuinitdata tvec_base_done[NR_CPUS];
  1446. if (!tvec_base_done[cpu]) {
  1447. static char boot_done;
  1448. if (boot_done) {
  1449. /*
  1450. * The APs use this path later in boot
  1451. */
  1452. base = kmalloc_node(sizeof(*base),
  1453. GFP_KERNEL | __GFP_ZERO,
  1454. cpu_to_node(cpu));
  1455. if (!base)
  1456. return -ENOMEM;
  1457. /* Make sure that tvec_base is 2 byte aligned */
  1458. if (tbase_get_deferrable(base)) {
  1459. WARN_ON(1);
  1460. kfree(base);
  1461. return -ENOMEM;
  1462. }
  1463. per_cpu(tvec_bases, cpu) = base;
  1464. } else {
  1465. /*
  1466. * This is for the boot CPU - we use compile-time
  1467. * static initialisation because per-cpu memory isn't
  1468. * ready yet and because the memory allocators are not
  1469. * initialised either.
  1470. */
  1471. boot_done = 1;
  1472. base = &boot_tvec_bases;
  1473. }
  1474. tvec_base_done[cpu] = 1;
  1475. } else {
  1476. base = per_cpu(tvec_bases, cpu);
  1477. }
  1478. spin_lock_init(&base->lock);
  1479. for (j = 0; j < TVN_SIZE; j++) {
  1480. INIT_LIST_HEAD(base->tv5.vec + j);
  1481. INIT_LIST_HEAD(base->tv4.vec + j);
  1482. INIT_LIST_HEAD(base->tv3.vec + j);
  1483. INIT_LIST_HEAD(base->tv2.vec + j);
  1484. }
  1485. for (j = 0; j < TVR_SIZE; j++)
  1486. INIT_LIST_HEAD(base->tv1.vec + j);
  1487. base->timer_jiffies = jiffies;
  1488. base->next_timer = base->timer_jiffies;
  1489. base->active_timers = 0;
  1490. return 0;
  1491. }
  1492. #ifdef CONFIG_HOTPLUG_CPU
  1493. static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
  1494. {
  1495. struct timer_list *timer;
  1496. while (!list_empty(head)) {
  1497. timer = list_first_entry(head, struct timer_list, entry);
  1498. /* We ignore the accounting on the dying cpu */
  1499. detach_timer(timer, false);
  1500. timer_set_base(timer, new_base);
  1501. internal_add_timer(new_base, timer);
  1502. }
  1503. }
  1504. static void __cpuinit migrate_timers(int cpu)
  1505. {
  1506. struct tvec_base *old_base;
  1507. struct tvec_base *new_base;
  1508. int i;
  1509. BUG_ON(cpu_online(cpu));
  1510. old_base = per_cpu(tvec_bases, cpu);
  1511. new_base = get_cpu_var(tvec_bases);
  1512. /*
  1513. * The caller is globally serialized and nobody else
  1514. * takes two locks at once, deadlock is not possible.
  1515. */
  1516. spin_lock_irq(&new_base->lock);
  1517. spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
  1518. BUG_ON(old_base->running_timer);
  1519. for (i = 0; i < TVR_SIZE; i++)
  1520. migrate_timer_list(new_base, old_base->tv1.vec + i);
  1521. for (i = 0; i < TVN_SIZE; i++) {
  1522. migrate_timer_list(new_base, old_base->tv2.vec + i);
  1523. migrate_timer_list(new_base, old_base->tv3.vec + i);
  1524. migrate_timer_list(new_base, old_base->tv4.vec + i);
  1525. migrate_timer_list(new_base, old_base->tv5.vec + i);
  1526. }
  1527. spin_unlock(&old_base->lock);
  1528. spin_unlock_irq(&new_base->lock);
  1529. put_cpu_var(tvec_bases);
  1530. }
  1531. #endif /* CONFIG_HOTPLUG_CPU */
  1532. static int __cpuinit timer_cpu_notify(struct notifier_block *self,
  1533. unsigned long action, void *hcpu)
  1534. {
  1535. long cpu = (long)hcpu;
  1536. int err;
  1537. switch(action) {
  1538. case CPU_UP_PREPARE:
  1539. case CPU_UP_PREPARE_FROZEN:
  1540. err = init_timers_cpu(cpu);
  1541. if (err < 0)
  1542. return notifier_from_errno(err);
  1543. break;
  1544. #ifdef CONFIG_HOTPLUG_CPU
  1545. case CPU_DEAD:
  1546. case CPU_DEAD_FROZEN:
  1547. migrate_timers(cpu);
  1548. break;
  1549. #endif
  1550. default:
  1551. break;
  1552. }
  1553. return NOTIFY_OK;
  1554. }
  1555. static struct notifier_block __cpuinitdata timers_nb = {
  1556. .notifier_call = timer_cpu_notify,
  1557. };
  1558. void __init init_timers(void)
  1559. {
  1560. int err;
  1561. /* ensure there are enough low bits for flags in timer->base pointer */
  1562. BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
  1563. err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
  1564. (void *)(long)smp_processor_id());
  1565. init_timer_stats();
  1566. BUG_ON(err != NOTIFY_OK);
  1567. register_cpu_notifier(&timers_nb);
  1568. open_softirq(TIMER_SOFTIRQ, run_timer_softirq);
  1569. }
  1570. /**
  1571. * msleep - sleep safely even with waitqueue interruptions
  1572. * @msecs: Time in milliseconds to sleep for
  1573. */
  1574. void msleep(unsigned int msecs)
  1575. {
  1576. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1577. while (timeout)
  1578. timeout = schedule_timeout_uninterruptible(timeout);
  1579. }
  1580. EXPORT_SYMBOL(msleep);
  1581. /**
  1582. * msleep_interruptible - sleep waiting for signals
  1583. * @msecs: Time in milliseconds to sleep for
  1584. */
  1585. unsigned long msleep_interruptible(unsigned int msecs)
  1586. {
  1587. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1588. while (timeout && !signal_pending(current))
  1589. timeout = schedule_timeout_interruptible(timeout);
  1590. return jiffies_to_msecs(timeout);
  1591. }
  1592. EXPORT_SYMBOL(msleep_interruptible);
  1593. static int __sched do_usleep_range(unsigned long min, unsigned long max)
  1594. {
  1595. ktime_t kmin;
  1596. unsigned long delta;
  1597. kmin = ktime_set(0, min * NSEC_PER_USEC);
  1598. delta = (max - min) * NSEC_PER_USEC;
  1599. return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
  1600. }
  1601. /**
  1602. * usleep_range - Drop in replacement for udelay where wakeup is flexible
  1603. * @min: Minimum time in usecs to sleep
  1604. * @max: Maximum time in usecs to sleep
  1605. */
  1606. void usleep_range(unsigned long min, unsigned long max)
  1607. {
  1608. __set_current_state(TASK_UNINTERRUPTIBLE);
  1609. do_usleep_range(min, max);
  1610. }
  1611. EXPORT_SYMBOL(usleep_range);