timer.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897
  1. /*
  2. * linux/kernel/timer.c
  3. *
  4. * Kernel internal timers, kernel timekeeping, basic process system calls
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. *
  8. * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
  9. *
  10. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13. * serialize accesses to xtime/lost_ticks).
  14. * Copyright (C) 1998 Andrea Arcangeli
  15. * 1999-03-10 Improved NTP compatibility by Ulrich Windl
  16. * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
  17. * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
  18. * Copyright (C) 2000, 2001, 2002 Ingo Molnar
  19. * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20. */
  21. #include <linux/kernel_stat.h>
  22. #include <linux/module.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/percpu.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/notifier.h>
  29. #include <linux/thread_info.h>
  30. #include <linux/time.h>
  31. #include <linux/jiffies.h>
  32. #include <linux/posix-timers.h>
  33. #include <linux/cpu.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/delay.h>
  36. #include <linux/clockchips.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/unistd.h>
  39. #include <asm/div64.h>
  40. #include <asm/timex.h>
  41. #include <asm/io.h>
  42. u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  43. EXPORT_SYMBOL(jiffies_64);
  44. /*
  45. * per-CPU timer vector definitions:
  46. */
  47. #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  48. #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  49. #define TVN_SIZE (1 << TVN_BITS)
  50. #define TVR_SIZE (1 << TVR_BITS)
  51. #define TVN_MASK (TVN_SIZE - 1)
  52. #define TVR_MASK (TVR_SIZE - 1)
  53. typedef struct tvec_s {
  54. struct list_head vec[TVN_SIZE];
  55. } tvec_t;
  56. typedef struct tvec_root_s {
  57. struct list_head vec[TVR_SIZE];
  58. } tvec_root_t;
  59. struct tvec_t_base_s {
  60. spinlock_t lock;
  61. struct timer_list *running_timer;
  62. unsigned long timer_jiffies;
  63. tvec_root_t tv1;
  64. tvec_t tv2;
  65. tvec_t tv3;
  66. tvec_t tv4;
  67. tvec_t tv5;
  68. } ____cacheline_aligned_in_smp;
  69. typedef struct tvec_t_base_s tvec_base_t;
  70. tvec_base_t boot_tvec_bases;
  71. EXPORT_SYMBOL(boot_tvec_bases);
  72. static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
  73. /**
  74. * __round_jiffies - function to round jiffies to a full second
  75. * @j: the time in (absolute) jiffies that should be rounded
  76. * @cpu: the processor number on which the timeout will happen
  77. *
  78. * __round_jiffies() rounds an absolute time in the future (in jiffies)
  79. * up or down to (approximately) full seconds. This is useful for timers
  80. * for which the exact time they fire does not matter too much, as long as
  81. * they fire approximately every X seconds.
  82. *
  83. * By rounding these timers to whole seconds, all such timers will fire
  84. * at the same time, rather than at various times spread out. The goal
  85. * of this is to have the CPU wake up less, which saves power.
  86. *
  87. * The exact rounding is skewed for each processor to avoid all
  88. * processors firing at the exact same time, which could lead
  89. * to lock contention or spurious cache line bouncing.
  90. *
  91. * The return value is the rounded version of the @j parameter.
  92. */
  93. unsigned long __round_jiffies(unsigned long j, int cpu)
  94. {
  95. int rem;
  96. unsigned long original = j;
  97. /*
  98. * We don't want all cpus firing their timers at once hitting the
  99. * same lock or cachelines, so we skew each extra cpu with an extra
  100. * 3 jiffies. This 3 jiffies came originally from the mm/ code which
  101. * already did this.
  102. * The skew is done by adding 3*cpunr, then round, then subtract this
  103. * extra offset again.
  104. */
  105. j += cpu * 3;
  106. rem = j % HZ;
  107. /*
  108. * If the target jiffie is just after a whole second (which can happen
  109. * due to delays of the timer irq, long irq off times etc etc) then
  110. * we should round down to the whole second, not up. Use 1/4th second
  111. * as cutoff for this rounding as an extreme upper bound for this.
  112. */
  113. if (rem < HZ/4) /* round down */
  114. j = j - rem;
  115. else /* round up */
  116. j = j - rem + HZ;
  117. /* now that we have rounded, subtract the extra skew again */
  118. j -= cpu * 3;
  119. if (j <= jiffies) /* rounding ate our timeout entirely; */
  120. return original;
  121. return j;
  122. }
  123. EXPORT_SYMBOL_GPL(__round_jiffies);
  124. /**
  125. * __round_jiffies_relative - function to round jiffies to a full second
  126. * @j: the time in (relative) jiffies that should be rounded
  127. * @cpu: the processor number on which the timeout will happen
  128. *
  129. * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
  130. * up or down to (approximately) full seconds. This is useful for timers
  131. * for which the exact time they fire does not matter too much, as long as
  132. * they fire approximately every X seconds.
  133. *
  134. * By rounding these timers to whole seconds, all such timers will fire
  135. * at the same time, rather than at various times spread out. The goal
  136. * of this is to have the CPU wake up less, which saves power.
  137. *
  138. * The exact rounding is skewed for each processor to avoid all
  139. * processors firing at the exact same time, which could lead
  140. * to lock contention or spurious cache line bouncing.
  141. *
  142. * The return value is the rounded version of the @j parameter.
  143. */
  144. unsigned long __round_jiffies_relative(unsigned long j, int cpu)
  145. {
  146. /*
  147. * In theory the following code can skip a jiffy in case jiffies
  148. * increments right between the addition and the later subtraction.
  149. * However since the entire point of this function is to use approximate
  150. * timeouts, it's entirely ok to not handle that.
  151. */
  152. return __round_jiffies(j + jiffies, cpu) - jiffies;
  153. }
  154. EXPORT_SYMBOL_GPL(__round_jiffies_relative);
  155. /**
  156. * round_jiffies - function to round jiffies to a full second
  157. * @j: the time in (absolute) jiffies that should be rounded
  158. *
  159. * round_jiffies() rounds an absolute time in the future (in jiffies)
  160. * up or down to (approximately) full seconds. This is useful for timers
  161. * for which the exact time they fire does not matter too much, as long as
  162. * they fire approximately every X seconds.
  163. *
  164. * By rounding these timers to whole seconds, all such timers will fire
  165. * at the same time, rather than at various times spread out. The goal
  166. * of this is to have the CPU wake up less, which saves power.
  167. *
  168. * The return value is the rounded version of the @j parameter.
  169. */
  170. unsigned long round_jiffies(unsigned long j)
  171. {
  172. return __round_jiffies(j, raw_smp_processor_id());
  173. }
  174. EXPORT_SYMBOL_GPL(round_jiffies);
  175. /**
  176. * round_jiffies_relative - function to round jiffies to a full second
  177. * @j: the time in (relative) jiffies that should be rounded
  178. *
  179. * round_jiffies_relative() rounds a time delta in the future (in jiffies)
  180. * up or down to (approximately) full seconds. This is useful for timers
  181. * for which the exact time they fire does not matter too much, as long as
  182. * they fire approximately every X seconds.
  183. *
  184. * By rounding these timers to whole seconds, all such timers will fire
  185. * at the same time, rather than at various times spread out. The goal
  186. * of this is to have the CPU wake up less, which saves power.
  187. *
  188. * The return value is the rounded version of the @j parameter.
  189. */
  190. unsigned long round_jiffies_relative(unsigned long j)
  191. {
  192. return __round_jiffies_relative(j, raw_smp_processor_id());
  193. }
  194. EXPORT_SYMBOL_GPL(round_jiffies_relative);
  195. static inline void set_running_timer(tvec_base_t *base,
  196. struct timer_list *timer)
  197. {
  198. #ifdef CONFIG_SMP
  199. base->running_timer = timer;
  200. #endif
  201. }
  202. static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
  203. {
  204. unsigned long expires = timer->expires;
  205. unsigned long idx = expires - base->timer_jiffies;
  206. struct list_head *vec;
  207. if (idx < TVR_SIZE) {
  208. int i = expires & TVR_MASK;
  209. vec = base->tv1.vec + i;
  210. } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  211. int i = (expires >> TVR_BITS) & TVN_MASK;
  212. vec = base->tv2.vec + i;
  213. } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  214. int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  215. vec = base->tv3.vec + i;
  216. } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  217. int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  218. vec = base->tv4.vec + i;
  219. } else if ((signed long) idx < 0) {
  220. /*
  221. * Can happen if you add a timer with expires == jiffies,
  222. * or you set a timer to go off in the past
  223. */
  224. vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
  225. } else {
  226. int i;
  227. /* If the timeout is larger than 0xffffffff on 64-bit
  228. * architectures then we use the maximum timeout:
  229. */
  230. if (idx > 0xffffffffUL) {
  231. idx = 0xffffffffUL;
  232. expires = idx + base->timer_jiffies;
  233. }
  234. i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  235. vec = base->tv5.vec + i;
  236. }
  237. /*
  238. * Timers are FIFO:
  239. */
  240. list_add_tail(&timer->entry, vec);
  241. }
  242. /**
  243. * init_timer - initialize a timer.
  244. * @timer: the timer to be initialized
  245. *
  246. * init_timer() must be done to a timer prior calling *any* of the
  247. * other timer functions.
  248. */
  249. void fastcall init_timer(struct timer_list *timer)
  250. {
  251. timer->entry.next = NULL;
  252. timer->base = __raw_get_cpu_var(tvec_bases);
  253. }
  254. EXPORT_SYMBOL(init_timer);
  255. static inline void detach_timer(struct timer_list *timer,
  256. int clear_pending)
  257. {
  258. struct list_head *entry = &timer->entry;
  259. __list_del(entry->prev, entry->next);
  260. if (clear_pending)
  261. entry->next = NULL;
  262. entry->prev = LIST_POISON2;
  263. }
  264. /*
  265. * We are using hashed locking: holding per_cpu(tvec_bases).lock
  266. * means that all timers which are tied to this base via timer->base are
  267. * locked, and the base itself is locked too.
  268. *
  269. * So __run_timers/migrate_timers can safely modify all timers which could
  270. * be found on ->tvX lists.
  271. *
  272. * When the timer's base is locked, and the timer removed from list, it is
  273. * possible to set timer->base = NULL and drop the lock: the timer remains
  274. * locked.
  275. */
  276. static tvec_base_t *lock_timer_base(struct timer_list *timer,
  277. unsigned long *flags)
  278. __acquires(timer->base->lock)
  279. {
  280. tvec_base_t *base;
  281. for (;;) {
  282. base = timer->base;
  283. if (likely(base != NULL)) {
  284. spin_lock_irqsave(&base->lock, *flags);
  285. if (likely(base == timer->base))
  286. return base;
  287. /* The timer has migrated to another CPU */
  288. spin_unlock_irqrestore(&base->lock, *flags);
  289. }
  290. cpu_relax();
  291. }
  292. }
  293. int __mod_timer(struct timer_list *timer, unsigned long expires)
  294. {
  295. tvec_base_t *base, *new_base;
  296. unsigned long flags;
  297. int ret = 0;
  298. BUG_ON(!timer->function);
  299. base = lock_timer_base(timer, &flags);
  300. if (timer_pending(timer)) {
  301. detach_timer(timer, 0);
  302. ret = 1;
  303. }
  304. new_base = __get_cpu_var(tvec_bases);
  305. if (base != new_base) {
  306. /*
  307. * We are trying to schedule the timer on the local CPU.
  308. * However we can't change timer's base while it is running,
  309. * otherwise del_timer_sync() can't detect that the timer's
  310. * handler yet has not finished. This also guarantees that
  311. * the timer is serialized wrt itself.
  312. */
  313. if (likely(base->running_timer != timer)) {
  314. /* See the comment in lock_timer_base() */
  315. timer->base = NULL;
  316. spin_unlock(&base->lock);
  317. base = new_base;
  318. spin_lock(&base->lock);
  319. timer->base = base;
  320. }
  321. }
  322. timer->expires = expires;
  323. internal_add_timer(base, timer);
  324. spin_unlock_irqrestore(&base->lock, flags);
  325. return ret;
  326. }
  327. EXPORT_SYMBOL(__mod_timer);
  328. /**
  329. * add_timer_on - start a timer on a particular CPU
  330. * @timer: the timer to be added
  331. * @cpu: the CPU to start it on
  332. *
  333. * This is not very scalable on SMP. Double adds are not possible.
  334. */
  335. void add_timer_on(struct timer_list *timer, int cpu)
  336. {
  337. tvec_base_t *base = per_cpu(tvec_bases, cpu);
  338. unsigned long flags;
  339. BUG_ON(timer_pending(timer) || !timer->function);
  340. spin_lock_irqsave(&base->lock, flags);
  341. timer->base = base;
  342. internal_add_timer(base, timer);
  343. spin_unlock_irqrestore(&base->lock, flags);
  344. }
  345. /**
  346. * mod_timer - modify a timer's timeout
  347. * @timer: the timer to be modified
  348. * @expires: new timeout in jiffies
  349. *
  350. * mod_timer() is a more efficient way to update the expire field of an
  351. * active timer (if the timer is inactive it will be activated)
  352. *
  353. * mod_timer(timer, expires) is equivalent to:
  354. *
  355. * del_timer(timer); timer->expires = expires; add_timer(timer);
  356. *
  357. * Note that if there are multiple unserialized concurrent users of the
  358. * same timer, then mod_timer() is the only safe way to modify the timeout,
  359. * since add_timer() cannot modify an already running timer.
  360. *
  361. * The function returns whether it has modified a pending timer or not.
  362. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
  363. * active timer returns 1.)
  364. */
  365. int mod_timer(struct timer_list *timer, unsigned long expires)
  366. {
  367. BUG_ON(!timer->function);
  368. /*
  369. * This is a common optimization triggered by the
  370. * networking code - if the timer is re-modified
  371. * to be the same thing then just return:
  372. */
  373. if (timer->expires == expires && timer_pending(timer))
  374. return 1;
  375. return __mod_timer(timer, expires);
  376. }
  377. EXPORT_SYMBOL(mod_timer);
  378. /**
  379. * del_timer - deactive a timer.
  380. * @timer: the timer to be deactivated
  381. *
  382. * del_timer() deactivates a timer - this works on both active and inactive
  383. * timers.
  384. *
  385. * The function returns whether it has deactivated a pending timer or not.
  386. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
  387. * active timer returns 1.)
  388. */
  389. int del_timer(struct timer_list *timer)
  390. {
  391. tvec_base_t *base;
  392. unsigned long flags;
  393. int ret = 0;
  394. if (timer_pending(timer)) {
  395. base = lock_timer_base(timer, &flags);
  396. if (timer_pending(timer)) {
  397. detach_timer(timer, 1);
  398. ret = 1;
  399. }
  400. spin_unlock_irqrestore(&base->lock, flags);
  401. }
  402. return ret;
  403. }
  404. EXPORT_SYMBOL(del_timer);
  405. #ifdef CONFIG_SMP
  406. /**
  407. * try_to_del_timer_sync - Try to deactivate a timer
  408. * @timer: timer do del
  409. *
  410. * This function tries to deactivate a timer. Upon successful (ret >= 0)
  411. * exit the timer is not queued and the handler is not running on any CPU.
  412. *
  413. * It must not be called from interrupt contexts.
  414. */
  415. int try_to_del_timer_sync(struct timer_list *timer)
  416. {
  417. tvec_base_t *base;
  418. unsigned long flags;
  419. int ret = -1;
  420. base = lock_timer_base(timer, &flags);
  421. if (base->running_timer == timer)
  422. goto out;
  423. ret = 0;
  424. if (timer_pending(timer)) {
  425. detach_timer(timer, 1);
  426. ret = 1;
  427. }
  428. out:
  429. spin_unlock_irqrestore(&base->lock, flags);
  430. return ret;
  431. }
  432. /**
  433. * del_timer_sync - deactivate a timer and wait for the handler to finish.
  434. * @timer: the timer to be deactivated
  435. *
  436. * This function only differs from del_timer() on SMP: besides deactivating
  437. * the timer it also makes sure the handler has finished executing on other
  438. * CPUs.
  439. *
  440. * Synchronization rules: Callers must prevent restarting of the timer,
  441. * otherwise this function is meaningless. It must not be called from
  442. * interrupt contexts. The caller must not hold locks which would prevent
  443. * completion of the timer's handler. The timer's handler must not call
  444. * add_timer_on(). Upon exit the timer is not queued and the handler is
  445. * not running on any CPU.
  446. *
  447. * The function returns whether it has deactivated a pending timer or not.
  448. */
  449. int del_timer_sync(struct timer_list *timer)
  450. {
  451. for (;;) {
  452. int ret = try_to_del_timer_sync(timer);
  453. if (ret >= 0)
  454. return ret;
  455. cpu_relax();
  456. }
  457. }
  458. EXPORT_SYMBOL(del_timer_sync);
  459. #endif
  460. static int cascade(tvec_base_t *base, tvec_t *tv, int index)
  461. {
  462. /* cascade all the timers from tv up one level */
  463. struct timer_list *timer, *tmp;
  464. struct list_head tv_list;
  465. list_replace_init(tv->vec + index, &tv_list);
  466. /*
  467. * We are removing _all_ timers from the list, so we
  468. * don't have to detach them individually.
  469. */
  470. list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
  471. BUG_ON(timer->base != base);
  472. internal_add_timer(base, timer);
  473. }
  474. return index;
  475. }
  476. #define INDEX(N) ((base->timer_jiffies >> (TVR_BITS + (N) * TVN_BITS)) & TVN_MASK)
  477. /**
  478. * __run_timers - run all expired timers (if any) on this CPU.
  479. * @base: the timer vector to be processed.
  480. *
  481. * This function cascades all vectors and executes all expired timer
  482. * vectors.
  483. */
  484. static inline void __run_timers(tvec_base_t *base)
  485. {
  486. struct timer_list *timer;
  487. spin_lock_irq(&base->lock);
  488. while (time_after_eq(jiffies, base->timer_jiffies)) {
  489. struct list_head work_list;
  490. struct list_head *head = &work_list;
  491. int index = base->timer_jiffies & TVR_MASK;
  492. /*
  493. * Cascade timers:
  494. */
  495. if (!index &&
  496. (!cascade(base, &base->tv2, INDEX(0))) &&
  497. (!cascade(base, &base->tv3, INDEX(1))) &&
  498. !cascade(base, &base->tv4, INDEX(2)))
  499. cascade(base, &base->tv5, INDEX(3));
  500. ++base->timer_jiffies;
  501. list_replace_init(base->tv1.vec + index, &work_list);
  502. while (!list_empty(head)) {
  503. void (*fn)(unsigned long);
  504. unsigned long data;
  505. timer = list_entry(head->next,struct timer_list,entry);
  506. fn = timer->function;
  507. data = timer->data;
  508. set_running_timer(base, timer);
  509. detach_timer(timer, 1);
  510. spin_unlock_irq(&base->lock);
  511. {
  512. int preempt_count = preempt_count();
  513. fn(data);
  514. if (preempt_count != preempt_count()) {
  515. printk(KERN_WARNING "huh, entered %p "
  516. "with preempt_count %08x, exited"
  517. " with %08x?\n",
  518. fn, preempt_count,
  519. preempt_count());
  520. BUG();
  521. }
  522. }
  523. spin_lock_irq(&base->lock);
  524. }
  525. }
  526. set_running_timer(base, NULL);
  527. spin_unlock_irq(&base->lock);
  528. }
  529. #if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_NO_HZ)
  530. /*
  531. * Find out when the next timer event is due to happen. This
  532. * is used on S/390 to stop all activity when a cpus is idle.
  533. * This functions needs to be called disabled.
  534. */
  535. static unsigned long __next_timer_interrupt(tvec_base_t *base)
  536. {
  537. unsigned long timer_jiffies = base->timer_jiffies;
  538. unsigned long expires = timer_jiffies + (LONG_MAX >> 1);
  539. int index, slot, array, found = 0;
  540. struct timer_list *nte;
  541. tvec_t *varray[4];
  542. /* Look for timer events in tv1. */
  543. index = slot = timer_jiffies & TVR_MASK;
  544. do {
  545. list_for_each_entry(nte, base->tv1.vec + slot, entry) {
  546. found = 1;
  547. expires = nte->expires;
  548. /* Look at the cascade bucket(s)? */
  549. if (!index || slot < index)
  550. goto cascade;
  551. return expires;
  552. }
  553. slot = (slot + 1) & TVR_MASK;
  554. } while (slot != index);
  555. cascade:
  556. /* Calculate the next cascade event */
  557. if (index)
  558. timer_jiffies += TVR_SIZE - index;
  559. timer_jiffies >>= TVR_BITS;
  560. /* Check tv2-tv5. */
  561. varray[0] = &base->tv2;
  562. varray[1] = &base->tv3;
  563. varray[2] = &base->tv4;
  564. varray[3] = &base->tv5;
  565. for (array = 0; array < 4; array++) {
  566. tvec_t *varp = varray[array];
  567. index = slot = timer_jiffies & TVN_MASK;
  568. do {
  569. list_for_each_entry(nte, varp->vec + slot, entry) {
  570. found = 1;
  571. if (time_before(nte->expires, expires))
  572. expires = nte->expires;
  573. }
  574. /*
  575. * Do we still search for the first timer or are
  576. * we looking up the cascade buckets ?
  577. */
  578. if (found) {
  579. /* Look at the cascade bucket(s)? */
  580. if (!index || slot < index)
  581. break;
  582. return expires;
  583. }
  584. slot = (slot + 1) & TVN_MASK;
  585. } while (slot != index);
  586. if (index)
  587. timer_jiffies += TVN_SIZE - index;
  588. timer_jiffies >>= TVN_BITS;
  589. }
  590. return expires;
  591. }
  592. /*
  593. * Check, if the next hrtimer event is before the next timer wheel
  594. * event:
  595. */
  596. static unsigned long cmp_next_hrtimer_event(unsigned long now,
  597. unsigned long expires)
  598. {
  599. ktime_t hr_delta = hrtimer_get_next_event();
  600. struct timespec tsdelta;
  601. if (hr_delta.tv64 == KTIME_MAX)
  602. return expires;
  603. if (hr_delta.tv64 <= TICK_NSEC)
  604. return now;
  605. tsdelta = ktime_to_timespec(hr_delta);
  606. now += timespec_to_jiffies(&tsdelta);
  607. if (time_before(now, expires))
  608. return now;
  609. return expires;
  610. }
  611. /**
  612. * next_timer_interrupt - return the jiffy of the next pending timer
  613. */
  614. unsigned long get_next_timer_interrupt(unsigned long now)
  615. {
  616. tvec_base_t *base = __get_cpu_var(tvec_bases);
  617. unsigned long expires;
  618. spin_lock(&base->lock);
  619. expires = __next_timer_interrupt(base);
  620. spin_unlock(&base->lock);
  621. if (time_before_eq(expires, now))
  622. return now;
  623. return cmp_next_hrtimer_event(now, expires);
  624. }
  625. #ifdef CONFIG_NO_IDLE_HZ
  626. unsigned long next_timer_interrupt(void)
  627. {
  628. return get_next_timer_interrupt(jiffies);
  629. }
  630. #endif
  631. #endif
  632. /******************************************************************/
  633. /*
  634. * The current time
  635. * wall_to_monotonic is what we need to add to xtime (or xtime corrected
  636. * for sub jiffie times) to get to monotonic time. Monotonic is pegged
  637. * at zero at system boot time, so wall_to_monotonic will be negative,
  638. * however, we will ALWAYS keep the tv_nsec part positive so we can use
  639. * the usual normalization.
  640. */
  641. struct timespec xtime __attribute__ ((aligned (16)));
  642. struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
  643. EXPORT_SYMBOL(xtime);
  644. /* XXX - all of this timekeeping code should be later moved to time.c */
  645. #include <linux/clocksource.h>
  646. static struct clocksource *clock; /* pointer to current clocksource */
  647. #ifdef CONFIG_GENERIC_TIME
  648. /**
  649. * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
  650. *
  651. * private function, must hold xtime_lock lock when being
  652. * called. Returns the number of nanoseconds since the
  653. * last call to update_wall_time() (adjusted by NTP scaling)
  654. */
  655. static inline s64 __get_nsec_offset(void)
  656. {
  657. cycle_t cycle_now, cycle_delta;
  658. s64 ns_offset;
  659. /* read clocksource: */
  660. cycle_now = clocksource_read(clock);
  661. /* calculate the delta since the last update_wall_time: */
  662. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  663. /* convert to nanoseconds: */
  664. ns_offset = cyc2ns(clock, cycle_delta);
  665. return ns_offset;
  666. }
  667. /**
  668. * __get_realtime_clock_ts - Returns the time of day in a timespec
  669. * @ts: pointer to the timespec to be set
  670. *
  671. * Returns the time of day in a timespec. Used by
  672. * do_gettimeofday() and get_realtime_clock_ts().
  673. */
  674. static inline void __get_realtime_clock_ts(struct timespec *ts)
  675. {
  676. unsigned long seq;
  677. s64 nsecs;
  678. do {
  679. seq = read_seqbegin(&xtime_lock);
  680. *ts = xtime;
  681. nsecs = __get_nsec_offset();
  682. } while (read_seqretry(&xtime_lock, seq));
  683. timespec_add_ns(ts, nsecs);
  684. }
  685. /**
  686. * getnstimeofday - Returns the time of day in a timespec
  687. * @ts: pointer to the timespec to be set
  688. *
  689. * Returns the time of day in a timespec.
  690. */
  691. void getnstimeofday(struct timespec *ts)
  692. {
  693. __get_realtime_clock_ts(ts);
  694. }
  695. EXPORT_SYMBOL(getnstimeofday);
  696. /**
  697. * do_gettimeofday - Returns the time of day in a timeval
  698. * @tv: pointer to the timeval to be set
  699. *
  700. * NOTE: Users should be converted to using get_realtime_clock_ts()
  701. */
  702. void do_gettimeofday(struct timeval *tv)
  703. {
  704. struct timespec now;
  705. __get_realtime_clock_ts(&now);
  706. tv->tv_sec = now.tv_sec;
  707. tv->tv_usec = now.tv_nsec/1000;
  708. }
  709. EXPORT_SYMBOL(do_gettimeofday);
  710. /**
  711. * do_settimeofday - Sets the time of day
  712. * @tv: pointer to the timespec variable containing the new time
  713. *
  714. * Sets the time of day to the new time and update NTP and notify hrtimers
  715. */
  716. int do_settimeofday(struct timespec *tv)
  717. {
  718. unsigned long flags;
  719. time_t wtm_sec, sec = tv->tv_sec;
  720. long wtm_nsec, nsec = tv->tv_nsec;
  721. if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  722. return -EINVAL;
  723. write_seqlock_irqsave(&xtime_lock, flags);
  724. nsec -= __get_nsec_offset();
  725. wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
  726. wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
  727. set_normalized_timespec(&xtime, sec, nsec);
  728. set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
  729. clock->error = 0;
  730. ntp_clear();
  731. write_sequnlock_irqrestore(&xtime_lock, flags);
  732. /* signal hrtimers about time change */
  733. clock_was_set();
  734. return 0;
  735. }
  736. EXPORT_SYMBOL(do_settimeofday);
  737. /**
  738. * change_clocksource - Swaps clocksources if a new one is available
  739. *
  740. * Accumulates current time interval and initializes new clocksource
  741. */
  742. static void change_clocksource(void)
  743. {
  744. struct clocksource *new;
  745. cycle_t now;
  746. u64 nsec;
  747. new = clocksource_get_next();
  748. if (clock == new)
  749. return;
  750. now = clocksource_read(new);
  751. nsec = __get_nsec_offset();
  752. timespec_add_ns(&xtime, nsec);
  753. clock = new;
  754. clock->cycle_last = now;
  755. clock->error = 0;
  756. clock->xtime_nsec = 0;
  757. clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
  758. printk(KERN_INFO "Time: %s clocksource has been installed.\n",
  759. clock->name);
  760. }
  761. #else
  762. static inline void change_clocksource(void) { }
  763. #endif
  764. /**
  765. * timeofday_is_continuous - check to see if timekeeping is free running
  766. */
  767. int timekeeping_is_continuous(void)
  768. {
  769. unsigned long seq;
  770. int ret;
  771. do {
  772. seq = read_seqbegin(&xtime_lock);
  773. ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
  774. } while (read_seqretry(&xtime_lock, seq));
  775. return ret;
  776. }
  777. /**
  778. * read_persistent_clock - Return time in seconds from the persistent clock.
  779. *
  780. * Weak dummy function for arches that do not yet support it.
  781. * Returns seconds from epoch using the battery backed persistent clock.
  782. * Returns zero if unsupported.
  783. *
  784. * XXX - Do be sure to remove it once all arches implement it.
  785. */
  786. unsigned long __attribute__((weak)) read_persistent_clock(void)
  787. {
  788. return 0;
  789. }
  790. /*
  791. * timekeeping_init - Initializes the clocksource and common timekeeping values
  792. */
  793. void __init timekeeping_init(void)
  794. {
  795. unsigned long flags;
  796. unsigned long sec = read_persistent_clock();
  797. write_seqlock_irqsave(&xtime_lock, flags);
  798. ntp_clear();
  799. clock = clocksource_get_next();
  800. clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
  801. clock->cycle_last = clocksource_read(clock);
  802. xtime.tv_sec = sec;
  803. xtime.tv_nsec = 0;
  804. set_normalized_timespec(&wall_to_monotonic,
  805. -xtime.tv_sec, -xtime.tv_nsec);
  806. write_sequnlock_irqrestore(&xtime_lock, flags);
  807. }
  808. /* flag for if timekeeping is suspended */
  809. static int timekeeping_suspended;
  810. /* time in seconds when suspend began */
  811. static unsigned long timekeeping_suspend_time;
  812. /**
  813. * timekeeping_resume - Resumes the generic timekeeping subsystem.
  814. * @dev: unused
  815. *
  816. * This is for the generic clocksource timekeeping.
  817. * xtime/wall_to_monotonic/jiffies/etc are
  818. * still managed by arch specific suspend/resume code.
  819. */
  820. static int timekeeping_resume(struct sys_device *dev)
  821. {
  822. unsigned long flags;
  823. unsigned long now = read_persistent_clock();
  824. write_seqlock_irqsave(&xtime_lock, flags);
  825. if (now && (now > timekeeping_suspend_time)) {
  826. unsigned long sleep_length = now - timekeeping_suspend_time;
  827. xtime.tv_sec += sleep_length;
  828. wall_to_monotonic.tv_sec -= sleep_length;
  829. }
  830. /* re-base the last cycle value */
  831. clock->cycle_last = clocksource_read(clock);
  832. clock->error = 0;
  833. timekeeping_suspended = 0;
  834. write_sequnlock_irqrestore(&xtime_lock, flags);
  835. touch_softlockup_watchdog();
  836. /* Resume hrtimers */
  837. clock_was_set();
  838. return 0;
  839. }
  840. static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
  841. {
  842. unsigned long flags;
  843. write_seqlock_irqsave(&xtime_lock, flags);
  844. timekeeping_suspended = 1;
  845. timekeeping_suspend_time = read_persistent_clock();
  846. write_sequnlock_irqrestore(&xtime_lock, flags);
  847. return 0;
  848. }
  849. /* sysfs resume/suspend bits for timekeeping */
  850. static struct sysdev_class timekeeping_sysclass = {
  851. .resume = timekeeping_resume,
  852. .suspend = timekeeping_suspend,
  853. set_kset_name("timekeeping"),
  854. };
  855. static struct sys_device device_timer = {
  856. .id = 0,
  857. .cls = &timekeeping_sysclass,
  858. };
  859. static int __init timekeeping_init_device(void)
  860. {
  861. int error = sysdev_class_register(&timekeeping_sysclass);
  862. if (!error)
  863. error = sysdev_register(&device_timer);
  864. return error;
  865. }
  866. device_initcall(timekeeping_init_device);
  867. /*
  868. * If the error is already larger, we look ahead even further
  869. * to compensate for late or lost adjustments.
  870. */
  871. static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
  872. s64 *offset)
  873. {
  874. s64 tick_error, i;
  875. u32 look_ahead, adj;
  876. s32 error2, mult;
  877. /*
  878. * Use the current error value to determine how much to look ahead.
  879. * The larger the error the slower we adjust for it to avoid problems
  880. * with losing too many ticks, otherwise we would overadjust and
  881. * produce an even larger error. The smaller the adjustment the
  882. * faster we try to adjust for it, as lost ticks can do less harm
  883. * here. This is tuned so that an error of about 1 msec is adusted
  884. * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
  885. */
  886. error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
  887. error2 = abs(error2);
  888. for (look_ahead = 0; error2 > 0; look_ahead++)
  889. error2 >>= 2;
  890. /*
  891. * Now calculate the error in (1 << look_ahead) ticks, but first
  892. * remove the single look ahead already included in the error.
  893. */
  894. tick_error = current_tick_length() >>
  895. (TICK_LENGTH_SHIFT - clock->shift + 1);
  896. tick_error -= clock->xtime_interval >> 1;
  897. error = ((error - tick_error) >> look_ahead) + tick_error;
  898. /* Finally calculate the adjustment shift value. */
  899. i = *interval;
  900. mult = 1;
  901. if (error < 0) {
  902. error = -error;
  903. *interval = -*interval;
  904. *offset = -*offset;
  905. mult = -1;
  906. }
  907. for (adj = 0; error > i; adj++)
  908. error >>= 1;
  909. *interval <<= adj;
  910. *offset <<= adj;
  911. return mult << adj;
  912. }
  913. /*
  914. * Adjust the multiplier to reduce the error value,
  915. * this is optimized for the most common adjustments of -1,0,1,
  916. * for other values we can do a bit more work.
  917. */
  918. static void clocksource_adjust(struct clocksource *clock, s64 offset)
  919. {
  920. s64 error, interval = clock->cycle_interval;
  921. int adj;
  922. error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
  923. if (error > interval) {
  924. error >>= 2;
  925. if (likely(error <= interval))
  926. adj = 1;
  927. else
  928. adj = clocksource_bigadjust(error, &interval, &offset);
  929. } else if (error < -interval) {
  930. error >>= 2;
  931. if (likely(error >= -interval)) {
  932. adj = -1;
  933. interval = -interval;
  934. offset = -offset;
  935. } else
  936. adj = clocksource_bigadjust(error, &interval, &offset);
  937. } else
  938. return;
  939. clock->mult += adj;
  940. clock->xtime_interval += interval;
  941. clock->xtime_nsec -= offset;
  942. clock->error -= (interval - offset) <<
  943. (TICK_LENGTH_SHIFT - clock->shift);
  944. }
  945. /**
  946. * update_wall_time - Uses the current clocksource to increment the wall time
  947. *
  948. * Called from the timer interrupt, must hold a write on xtime_lock.
  949. */
  950. static void update_wall_time(void)
  951. {
  952. cycle_t offset;
  953. /* Make sure we're fully resumed: */
  954. if (unlikely(timekeeping_suspended))
  955. return;
  956. #ifdef CONFIG_GENERIC_TIME
  957. offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
  958. #else
  959. offset = clock->cycle_interval;
  960. #endif
  961. clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
  962. /* normally this loop will run just once, however in the
  963. * case of lost or late ticks, it will accumulate correctly.
  964. */
  965. while (offset >= clock->cycle_interval) {
  966. /* accumulate one interval */
  967. clock->xtime_nsec += clock->xtime_interval;
  968. clock->cycle_last += clock->cycle_interval;
  969. offset -= clock->cycle_interval;
  970. if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
  971. clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
  972. xtime.tv_sec++;
  973. second_overflow();
  974. }
  975. /* interpolator bits */
  976. time_interpolator_update(clock->xtime_interval
  977. >> clock->shift);
  978. /* accumulate error between NTP and clock interval */
  979. clock->error += current_tick_length();
  980. clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
  981. }
  982. /* correct the clock when NTP error is too big */
  983. clocksource_adjust(clock, offset);
  984. /* store full nanoseconds into xtime */
  985. xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
  986. clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
  987. /* check to see if there is a new clocksource to use */
  988. change_clocksource();
  989. }
  990. /*
  991. * Called from the timer interrupt handler to charge one tick to the current
  992. * process. user_tick is 1 if the tick is user time, 0 for system.
  993. */
  994. void update_process_times(int user_tick)
  995. {
  996. struct task_struct *p = current;
  997. int cpu = smp_processor_id();
  998. /* Note: this timer irq context must be accounted for as well. */
  999. if (user_tick)
  1000. account_user_time(p, jiffies_to_cputime(1));
  1001. else
  1002. account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
  1003. run_local_timers();
  1004. if (rcu_pending(cpu))
  1005. rcu_check_callbacks(cpu, user_tick);
  1006. scheduler_tick();
  1007. run_posix_cpu_timers(p);
  1008. }
  1009. /*
  1010. * Nr of active tasks - counted in fixed-point numbers
  1011. */
  1012. static unsigned long count_active_tasks(void)
  1013. {
  1014. return nr_active() * FIXED_1;
  1015. }
  1016. /*
  1017. * Hmm.. Changed this, as the GNU make sources (load.c) seems to
  1018. * imply that avenrun[] is the standard name for this kind of thing.
  1019. * Nothing else seems to be standardized: the fractional size etc
  1020. * all seem to differ on different machines.
  1021. *
  1022. * Requires xtime_lock to access.
  1023. */
  1024. unsigned long avenrun[3];
  1025. EXPORT_SYMBOL(avenrun);
  1026. /*
  1027. * calc_load - given tick count, update the avenrun load estimates.
  1028. * This is called while holding a write_lock on xtime_lock.
  1029. */
  1030. static inline void calc_load(unsigned long ticks)
  1031. {
  1032. unsigned long active_tasks; /* fixed-point */
  1033. static int count = LOAD_FREQ;
  1034. count -= ticks;
  1035. if (unlikely(count < 0)) {
  1036. active_tasks = count_active_tasks();
  1037. do {
  1038. CALC_LOAD(avenrun[0], EXP_1, active_tasks);
  1039. CALC_LOAD(avenrun[1], EXP_5, active_tasks);
  1040. CALC_LOAD(avenrun[2], EXP_15, active_tasks);
  1041. count += LOAD_FREQ;
  1042. } while (count < 0);
  1043. }
  1044. }
  1045. /*
  1046. * This read-write spinlock protects us from races in SMP while
  1047. * playing with xtime and avenrun.
  1048. */
  1049. __attribute__((weak)) __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
  1050. EXPORT_SYMBOL(xtime_lock);
  1051. /*
  1052. * This function runs timers and the timer-tq in bottom half context.
  1053. */
  1054. static void run_timer_softirq(struct softirq_action *h)
  1055. {
  1056. tvec_base_t *base = __get_cpu_var(tvec_bases);
  1057. hrtimer_run_queues();
  1058. if (time_after_eq(jiffies, base->timer_jiffies))
  1059. __run_timers(base);
  1060. }
  1061. /*
  1062. * Called by the local, per-CPU timer interrupt on SMP.
  1063. */
  1064. void run_local_timers(void)
  1065. {
  1066. raise_softirq(TIMER_SOFTIRQ);
  1067. softlockup_tick();
  1068. }
  1069. /*
  1070. * Called by the timer interrupt. xtime_lock must already be taken
  1071. * by the timer IRQ!
  1072. */
  1073. static inline void update_times(unsigned long ticks)
  1074. {
  1075. update_wall_time();
  1076. calc_load(ticks);
  1077. }
  1078. /*
  1079. * The 64-bit jiffies value is not atomic - you MUST NOT read it
  1080. * without sampling the sequence number in xtime_lock.
  1081. * jiffies is defined in the linker script...
  1082. */
  1083. void do_timer(unsigned long ticks)
  1084. {
  1085. jiffies_64 += ticks;
  1086. update_times(ticks);
  1087. }
  1088. #ifdef __ARCH_WANT_SYS_ALARM
  1089. /*
  1090. * For backwards compatibility? This can be done in libc so Alpha
  1091. * and all newer ports shouldn't need it.
  1092. */
  1093. asmlinkage unsigned long sys_alarm(unsigned int seconds)
  1094. {
  1095. return alarm_setitimer(seconds);
  1096. }
  1097. #endif
  1098. #ifndef __alpha__
  1099. /*
  1100. * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
  1101. * should be moved into arch/i386 instead?
  1102. */
  1103. /**
  1104. * sys_getpid - return the thread group id of the current process
  1105. *
  1106. * Note, despite the name, this returns the tgid not the pid. The tgid and
  1107. * the pid are identical unless CLONE_THREAD was specified on clone() in
  1108. * which case the tgid is the same in all threads of the same group.
  1109. *
  1110. * This is SMP safe as current->tgid does not change.
  1111. */
  1112. asmlinkage long sys_getpid(void)
  1113. {
  1114. return current->tgid;
  1115. }
  1116. /*
  1117. * Accessing ->real_parent is not SMP-safe, it could
  1118. * change from under us. However, we can use a stale
  1119. * value of ->real_parent under rcu_read_lock(), see
  1120. * release_task()->call_rcu(delayed_put_task_struct).
  1121. */
  1122. asmlinkage long sys_getppid(void)
  1123. {
  1124. int pid;
  1125. rcu_read_lock();
  1126. pid = rcu_dereference(current->real_parent)->tgid;
  1127. rcu_read_unlock();
  1128. return pid;
  1129. }
  1130. asmlinkage long sys_getuid(void)
  1131. {
  1132. /* Only we change this so SMP safe */
  1133. return current->uid;
  1134. }
  1135. asmlinkage long sys_geteuid(void)
  1136. {
  1137. /* Only we change this so SMP safe */
  1138. return current->euid;
  1139. }
  1140. asmlinkage long sys_getgid(void)
  1141. {
  1142. /* Only we change this so SMP safe */
  1143. return current->gid;
  1144. }
  1145. asmlinkage long sys_getegid(void)
  1146. {
  1147. /* Only we change this so SMP safe */
  1148. return current->egid;
  1149. }
  1150. #endif
  1151. static void process_timeout(unsigned long __data)
  1152. {
  1153. wake_up_process((struct task_struct *)__data);
  1154. }
  1155. /**
  1156. * schedule_timeout - sleep until timeout
  1157. * @timeout: timeout value in jiffies
  1158. *
  1159. * Make the current task sleep until @timeout jiffies have
  1160. * elapsed. The routine will return immediately unless
  1161. * the current task state has been set (see set_current_state()).
  1162. *
  1163. * You can set the task state as follows -
  1164. *
  1165. * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
  1166. * pass before the routine returns. The routine will return 0
  1167. *
  1168. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1169. * delivered to the current task. In this case the remaining time
  1170. * in jiffies will be returned, or 0 if the timer expired in time
  1171. *
  1172. * The current task state is guaranteed to be TASK_RUNNING when this
  1173. * routine returns.
  1174. *
  1175. * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
  1176. * the CPU away without a bound on the timeout. In this case the return
  1177. * value will be %MAX_SCHEDULE_TIMEOUT.
  1178. *
  1179. * In all cases the return value is guaranteed to be non-negative.
  1180. */
  1181. fastcall signed long __sched schedule_timeout(signed long timeout)
  1182. {
  1183. struct timer_list timer;
  1184. unsigned long expire;
  1185. switch (timeout)
  1186. {
  1187. case MAX_SCHEDULE_TIMEOUT:
  1188. /*
  1189. * These two special cases are useful to be comfortable
  1190. * in the caller. Nothing more. We could take
  1191. * MAX_SCHEDULE_TIMEOUT from one of the negative value
  1192. * but I' d like to return a valid offset (>=0) to allow
  1193. * the caller to do everything it want with the retval.
  1194. */
  1195. schedule();
  1196. goto out;
  1197. default:
  1198. /*
  1199. * Another bit of PARANOID. Note that the retval will be
  1200. * 0 since no piece of kernel is supposed to do a check
  1201. * for a negative retval of schedule_timeout() (since it
  1202. * should never happens anyway). You just have the printk()
  1203. * that will tell you if something is gone wrong and where.
  1204. */
  1205. if (timeout < 0) {
  1206. printk(KERN_ERR "schedule_timeout: wrong timeout "
  1207. "value %lx\n", timeout);
  1208. dump_stack();
  1209. current->state = TASK_RUNNING;
  1210. goto out;
  1211. }
  1212. }
  1213. expire = timeout + jiffies;
  1214. setup_timer(&timer, process_timeout, (unsigned long)current);
  1215. __mod_timer(&timer, expire);
  1216. schedule();
  1217. del_singleshot_timer_sync(&timer);
  1218. timeout = expire - jiffies;
  1219. out:
  1220. return timeout < 0 ? 0 : timeout;
  1221. }
  1222. EXPORT_SYMBOL(schedule_timeout);
  1223. /*
  1224. * We can use __set_current_state() here because schedule_timeout() calls
  1225. * schedule() unconditionally.
  1226. */
  1227. signed long __sched schedule_timeout_interruptible(signed long timeout)
  1228. {
  1229. __set_current_state(TASK_INTERRUPTIBLE);
  1230. return schedule_timeout(timeout);
  1231. }
  1232. EXPORT_SYMBOL(schedule_timeout_interruptible);
  1233. signed long __sched schedule_timeout_uninterruptible(signed long timeout)
  1234. {
  1235. __set_current_state(TASK_UNINTERRUPTIBLE);
  1236. return schedule_timeout(timeout);
  1237. }
  1238. EXPORT_SYMBOL(schedule_timeout_uninterruptible);
  1239. /* Thread ID - the internal kernel "pid" */
  1240. asmlinkage long sys_gettid(void)
  1241. {
  1242. return current->pid;
  1243. }
  1244. /**
  1245. * do_sysinfo - fill in sysinfo struct
  1246. * @info: pointer to buffer to fill
  1247. */
  1248. int do_sysinfo(struct sysinfo *info)
  1249. {
  1250. unsigned long mem_total, sav_total;
  1251. unsigned int mem_unit, bitcount;
  1252. unsigned long seq;
  1253. memset(info, 0, sizeof(struct sysinfo));
  1254. do {
  1255. struct timespec tp;
  1256. seq = read_seqbegin(&xtime_lock);
  1257. /*
  1258. * This is annoying. The below is the same thing
  1259. * posix_get_clock_monotonic() does, but it wants to
  1260. * take the lock which we want to cover the loads stuff
  1261. * too.
  1262. */
  1263. getnstimeofday(&tp);
  1264. tp.tv_sec += wall_to_monotonic.tv_sec;
  1265. tp.tv_nsec += wall_to_monotonic.tv_nsec;
  1266. if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
  1267. tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
  1268. tp.tv_sec++;
  1269. }
  1270. info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
  1271. info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
  1272. info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
  1273. info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
  1274. info->procs = nr_threads;
  1275. } while (read_seqretry(&xtime_lock, seq));
  1276. si_meminfo(info);
  1277. si_swapinfo(info);
  1278. /*
  1279. * If the sum of all the available memory (i.e. ram + swap)
  1280. * is less than can be stored in a 32 bit unsigned long then
  1281. * we can be binary compatible with 2.2.x kernels. If not,
  1282. * well, in that case 2.2.x was broken anyways...
  1283. *
  1284. * -Erik Andersen <andersee@debian.org>
  1285. */
  1286. mem_total = info->totalram + info->totalswap;
  1287. if (mem_total < info->totalram || mem_total < info->totalswap)
  1288. goto out;
  1289. bitcount = 0;
  1290. mem_unit = info->mem_unit;
  1291. while (mem_unit > 1) {
  1292. bitcount++;
  1293. mem_unit >>= 1;
  1294. sav_total = mem_total;
  1295. mem_total <<= 1;
  1296. if (mem_total < sav_total)
  1297. goto out;
  1298. }
  1299. /*
  1300. * If mem_total did not overflow, multiply all memory values by
  1301. * info->mem_unit and set it to 1. This leaves things compatible
  1302. * with 2.2.x, and also retains compatibility with earlier 2.4.x
  1303. * kernels...
  1304. */
  1305. info->mem_unit = 1;
  1306. info->totalram <<= bitcount;
  1307. info->freeram <<= bitcount;
  1308. info->sharedram <<= bitcount;
  1309. info->bufferram <<= bitcount;
  1310. info->totalswap <<= bitcount;
  1311. info->freeswap <<= bitcount;
  1312. info->totalhigh <<= bitcount;
  1313. info->freehigh <<= bitcount;
  1314. out:
  1315. return 0;
  1316. }
  1317. asmlinkage long sys_sysinfo(struct sysinfo __user *info)
  1318. {
  1319. struct sysinfo val;
  1320. do_sysinfo(&val);
  1321. if (copy_to_user(info, &val, sizeof(struct sysinfo)))
  1322. return -EFAULT;
  1323. return 0;
  1324. }
  1325. /*
  1326. * lockdep: we want to track each per-CPU base as a separate lock-class,
  1327. * but timer-bases are kmalloc()-ed, so we need to attach separate
  1328. * keys to them:
  1329. */
  1330. static struct lock_class_key base_lock_keys[NR_CPUS];
  1331. static int __devinit init_timers_cpu(int cpu)
  1332. {
  1333. int j;
  1334. tvec_base_t *base;
  1335. static char __devinitdata tvec_base_done[NR_CPUS];
  1336. if (!tvec_base_done[cpu]) {
  1337. static char boot_done;
  1338. if (boot_done) {
  1339. /*
  1340. * The APs use this path later in boot
  1341. */
  1342. base = kmalloc_node(sizeof(*base), GFP_KERNEL,
  1343. cpu_to_node(cpu));
  1344. if (!base)
  1345. return -ENOMEM;
  1346. memset(base, 0, sizeof(*base));
  1347. per_cpu(tvec_bases, cpu) = base;
  1348. } else {
  1349. /*
  1350. * This is for the boot CPU - we use compile-time
  1351. * static initialisation because per-cpu memory isn't
  1352. * ready yet and because the memory allocators are not
  1353. * initialised either.
  1354. */
  1355. boot_done = 1;
  1356. base = &boot_tvec_bases;
  1357. }
  1358. tvec_base_done[cpu] = 1;
  1359. } else {
  1360. base = per_cpu(tvec_bases, cpu);
  1361. }
  1362. spin_lock_init(&base->lock);
  1363. lockdep_set_class(&base->lock, base_lock_keys + cpu);
  1364. for (j = 0; j < TVN_SIZE; j++) {
  1365. INIT_LIST_HEAD(base->tv5.vec + j);
  1366. INIT_LIST_HEAD(base->tv4.vec + j);
  1367. INIT_LIST_HEAD(base->tv3.vec + j);
  1368. INIT_LIST_HEAD(base->tv2.vec + j);
  1369. }
  1370. for (j = 0; j < TVR_SIZE; j++)
  1371. INIT_LIST_HEAD(base->tv1.vec + j);
  1372. base->timer_jiffies = jiffies;
  1373. return 0;
  1374. }
  1375. #ifdef CONFIG_HOTPLUG_CPU
  1376. static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
  1377. {
  1378. struct timer_list *timer;
  1379. while (!list_empty(head)) {
  1380. timer = list_entry(head->next, struct timer_list, entry);
  1381. detach_timer(timer, 0);
  1382. timer->base = new_base;
  1383. internal_add_timer(new_base, timer);
  1384. }
  1385. }
  1386. static void __devinit migrate_timers(int cpu)
  1387. {
  1388. tvec_base_t *old_base;
  1389. tvec_base_t *new_base;
  1390. int i;
  1391. BUG_ON(cpu_online(cpu));
  1392. old_base = per_cpu(tvec_bases, cpu);
  1393. new_base = get_cpu_var(tvec_bases);
  1394. local_irq_disable();
  1395. spin_lock(&new_base->lock);
  1396. spin_lock(&old_base->lock);
  1397. BUG_ON(old_base->running_timer);
  1398. for (i = 0; i < TVR_SIZE; i++)
  1399. migrate_timer_list(new_base, old_base->tv1.vec + i);
  1400. for (i = 0; i < TVN_SIZE; i++) {
  1401. migrate_timer_list(new_base, old_base->tv2.vec + i);
  1402. migrate_timer_list(new_base, old_base->tv3.vec + i);
  1403. migrate_timer_list(new_base, old_base->tv4.vec + i);
  1404. migrate_timer_list(new_base, old_base->tv5.vec + i);
  1405. }
  1406. spin_unlock(&old_base->lock);
  1407. spin_unlock(&new_base->lock);
  1408. local_irq_enable();
  1409. put_cpu_var(tvec_bases);
  1410. }
  1411. #endif /* CONFIG_HOTPLUG_CPU */
  1412. static int __cpuinit timer_cpu_notify(struct notifier_block *self,
  1413. unsigned long action, void *hcpu)
  1414. {
  1415. long cpu = (long)hcpu;
  1416. switch(action) {
  1417. case CPU_UP_PREPARE:
  1418. if (init_timers_cpu(cpu) < 0)
  1419. return NOTIFY_BAD;
  1420. break;
  1421. #ifdef CONFIG_HOTPLUG_CPU
  1422. case CPU_DEAD:
  1423. migrate_timers(cpu);
  1424. break;
  1425. #endif
  1426. default:
  1427. break;
  1428. }
  1429. return NOTIFY_OK;
  1430. }
  1431. static struct notifier_block __cpuinitdata timers_nb = {
  1432. .notifier_call = timer_cpu_notify,
  1433. };
  1434. void __init init_timers(void)
  1435. {
  1436. int err = timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
  1437. (void *)(long)smp_processor_id());
  1438. BUG_ON(err == NOTIFY_BAD);
  1439. register_cpu_notifier(&timers_nb);
  1440. open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
  1441. }
  1442. #ifdef CONFIG_TIME_INTERPOLATION
  1443. struct time_interpolator *time_interpolator __read_mostly;
  1444. static struct time_interpolator *time_interpolator_list __read_mostly;
  1445. static DEFINE_SPINLOCK(time_interpolator_lock);
  1446. static inline cycles_t time_interpolator_get_cycles(unsigned int src)
  1447. {
  1448. unsigned long (*x)(void);
  1449. switch (src)
  1450. {
  1451. case TIME_SOURCE_FUNCTION:
  1452. x = time_interpolator->addr;
  1453. return x();
  1454. case TIME_SOURCE_MMIO64 :
  1455. return readq_relaxed((void __iomem *)time_interpolator->addr);
  1456. case TIME_SOURCE_MMIO32 :
  1457. return readl_relaxed((void __iomem *)time_interpolator->addr);
  1458. default: return get_cycles();
  1459. }
  1460. }
  1461. static inline u64 time_interpolator_get_counter(int writelock)
  1462. {
  1463. unsigned int src = time_interpolator->source;
  1464. if (time_interpolator->jitter)
  1465. {
  1466. cycles_t lcycle;
  1467. cycles_t now;
  1468. do {
  1469. lcycle = time_interpolator->last_cycle;
  1470. now = time_interpolator_get_cycles(src);
  1471. if (lcycle && time_after(lcycle, now))
  1472. return lcycle;
  1473. /* When holding the xtime write lock, there's no need
  1474. * to add the overhead of the cmpxchg. Readers are
  1475. * force to retry until the write lock is released.
  1476. */
  1477. if (writelock) {
  1478. time_interpolator->last_cycle = now;
  1479. return now;
  1480. }
  1481. /* Keep track of the last timer value returned. The use of cmpxchg here
  1482. * will cause contention in an SMP environment.
  1483. */
  1484. } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
  1485. return now;
  1486. }
  1487. else
  1488. return time_interpolator_get_cycles(src);
  1489. }
  1490. void time_interpolator_reset(void)
  1491. {
  1492. time_interpolator->offset = 0;
  1493. time_interpolator->last_counter = time_interpolator_get_counter(1);
  1494. }
  1495. #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
  1496. unsigned long time_interpolator_get_offset(void)
  1497. {
  1498. /* If we do not have a time interpolator set up then just return zero */
  1499. if (!time_interpolator)
  1500. return 0;
  1501. return time_interpolator->offset +
  1502. GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
  1503. }
  1504. #define INTERPOLATOR_ADJUST 65536
  1505. #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
  1506. void time_interpolator_update(long delta_nsec)
  1507. {
  1508. u64 counter;
  1509. unsigned long offset;
  1510. /* If there is no time interpolator set up then do nothing */
  1511. if (!time_interpolator)
  1512. return;
  1513. /*
  1514. * The interpolator compensates for late ticks by accumulating the late
  1515. * time in time_interpolator->offset. A tick earlier than expected will
  1516. * lead to a reset of the offset and a corresponding jump of the clock
  1517. * forward. Again this only works if the interpolator clock is running
  1518. * slightly slower than the regular clock and the tuning logic insures
  1519. * that.
  1520. */
  1521. counter = time_interpolator_get_counter(1);
  1522. offset = time_interpolator->offset +
  1523. GET_TI_NSECS(counter, time_interpolator);
  1524. if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
  1525. time_interpolator->offset = offset - delta_nsec;
  1526. else {
  1527. time_interpolator->skips++;
  1528. time_interpolator->ns_skipped += delta_nsec - offset;
  1529. time_interpolator->offset = 0;
  1530. }
  1531. time_interpolator->last_counter = counter;
  1532. /* Tuning logic for time interpolator invoked every minute or so.
  1533. * Decrease interpolator clock speed if no skips occurred and an offset is carried.
  1534. * Increase interpolator clock speed if we skip too much time.
  1535. */
  1536. if (jiffies % INTERPOLATOR_ADJUST == 0)
  1537. {
  1538. if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
  1539. time_interpolator->nsec_per_cyc--;
  1540. if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
  1541. time_interpolator->nsec_per_cyc++;
  1542. time_interpolator->skips = 0;
  1543. time_interpolator->ns_skipped = 0;
  1544. }
  1545. }
  1546. static inline int
  1547. is_better_time_interpolator(struct time_interpolator *new)
  1548. {
  1549. if (!time_interpolator)
  1550. return 1;
  1551. return new->frequency > 2*time_interpolator->frequency ||
  1552. (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
  1553. }
  1554. void
  1555. register_time_interpolator(struct time_interpolator *ti)
  1556. {
  1557. unsigned long flags;
  1558. /* Sanity check */
  1559. BUG_ON(ti->frequency == 0 || ti->mask == 0);
  1560. ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
  1561. spin_lock(&time_interpolator_lock);
  1562. write_seqlock_irqsave(&xtime_lock, flags);
  1563. if (is_better_time_interpolator(ti)) {
  1564. time_interpolator = ti;
  1565. time_interpolator_reset();
  1566. }
  1567. write_sequnlock_irqrestore(&xtime_lock, flags);
  1568. ti->next = time_interpolator_list;
  1569. time_interpolator_list = ti;
  1570. spin_unlock(&time_interpolator_lock);
  1571. }
  1572. void
  1573. unregister_time_interpolator(struct time_interpolator *ti)
  1574. {
  1575. struct time_interpolator *curr, **prev;
  1576. unsigned long flags;
  1577. spin_lock(&time_interpolator_lock);
  1578. prev = &time_interpolator_list;
  1579. for (curr = *prev; curr; curr = curr->next) {
  1580. if (curr == ti) {
  1581. *prev = curr->next;
  1582. break;
  1583. }
  1584. prev = &curr->next;
  1585. }
  1586. write_seqlock_irqsave(&xtime_lock, flags);
  1587. if (ti == time_interpolator) {
  1588. /* we lost the best time-interpolator: */
  1589. time_interpolator = NULL;
  1590. /* find the next-best interpolator */
  1591. for (curr = time_interpolator_list; curr; curr = curr->next)
  1592. if (is_better_time_interpolator(curr))
  1593. time_interpolator = curr;
  1594. time_interpolator_reset();
  1595. }
  1596. write_sequnlock_irqrestore(&xtime_lock, flags);
  1597. spin_unlock(&time_interpolator_lock);
  1598. }
  1599. #endif /* CONFIG_TIME_INTERPOLATION */
  1600. /**
  1601. * msleep - sleep safely even with waitqueue interruptions
  1602. * @msecs: Time in milliseconds to sleep for
  1603. */
  1604. void msleep(unsigned int msecs)
  1605. {
  1606. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1607. while (timeout)
  1608. timeout = schedule_timeout_uninterruptible(timeout);
  1609. }
  1610. EXPORT_SYMBOL(msleep);
  1611. /**
  1612. * msleep_interruptible - sleep waiting for signals
  1613. * @msecs: Time in milliseconds to sleep for
  1614. */
  1615. unsigned long msleep_interruptible(unsigned int msecs)
  1616. {
  1617. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1618. while (timeout && !signal_pending(current))
  1619. timeout = schedule_timeout_interruptible(timeout);
  1620. return jiffies_to_msecs(timeout);
  1621. }
  1622. EXPORT_SYMBOL(msleep_interruptible);