timer.c 47 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836
  1. /*
  2. * linux/kernel/timer.c
  3. *
  4. * Kernel internal timers, kernel timekeeping, basic process system calls
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. *
  8. * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
  9. *
  10. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13. * serialize accesses to xtime/lost_ticks).
  14. * Copyright (C) 1998 Andrea Arcangeli
  15. * 1999-03-10 Improved NTP compatibility by Ulrich Windl
  16. * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
  17. * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
  18. * Copyright (C) 2000, 2001, 2002 Ingo Molnar
  19. * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20. */
  21. #include <linux/kernel_stat.h>
  22. #include <linux/module.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/percpu.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/notifier.h>
  29. #include <linux/thread_info.h>
  30. #include <linux/time.h>
  31. #include <linux/jiffies.h>
  32. #include <linux/posix-timers.h>
  33. #include <linux/cpu.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/delay.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/unistd.h>
  38. #include <asm/div64.h>
  39. #include <asm/timex.h>
  40. #include <asm/io.h>
  41. #ifdef CONFIG_TIME_INTERPOLATION
  42. static void time_interpolator_update(long delta_nsec);
  43. #else
  44. #define time_interpolator_update(x)
  45. #endif
  46. u64 jiffies_64 __cacheline_aligned_in_smp = INITIAL_JIFFIES;
  47. EXPORT_SYMBOL(jiffies_64);
  48. /*
  49. * per-CPU timer vector definitions:
  50. */
  51. #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  52. #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  53. #define TVN_SIZE (1 << TVN_BITS)
  54. #define TVR_SIZE (1 << TVR_BITS)
  55. #define TVN_MASK (TVN_SIZE - 1)
  56. #define TVR_MASK (TVR_SIZE - 1)
  57. typedef struct tvec_s {
  58. struct list_head vec[TVN_SIZE];
  59. } tvec_t;
  60. typedef struct tvec_root_s {
  61. struct list_head vec[TVR_SIZE];
  62. } tvec_root_t;
  63. struct tvec_t_base_s {
  64. spinlock_t lock;
  65. struct timer_list *running_timer;
  66. unsigned long timer_jiffies;
  67. tvec_root_t tv1;
  68. tvec_t tv2;
  69. tvec_t tv3;
  70. tvec_t tv4;
  71. tvec_t tv5;
  72. } ____cacheline_aligned_in_smp;
  73. typedef struct tvec_t_base_s tvec_base_t;
  74. tvec_base_t boot_tvec_bases;
  75. EXPORT_SYMBOL(boot_tvec_bases);
  76. static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = { &boot_tvec_bases };
  77. static inline void set_running_timer(tvec_base_t *base,
  78. struct timer_list *timer)
  79. {
  80. #ifdef CONFIG_SMP
  81. base->running_timer = timer;
  82. #endif
  83. }
  84. static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
  85. {
  86. unsigned long expires = timer->expires;
  87. unsigned long idx = expires - base->timer_jiffies;
  88. struct list_head *vec;
  89. if (idx < TVR_SIZE) {
  90. int i = expires & TVR_MASK;
  91. vec = base->tv1.vec + i;
  92. } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  93. int i = (expires >> TVR_BITS) & TVN_MASK;
  94. vec = base->tv2.vec + i;
  95. } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  96. int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  97. vec = base->tv3.vec + i;
  98. } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  99. int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  100. vec = base->tv4.vec + i;
  101. } else if ((signed long) idx < 0) {
  102. /*
  103. * Can happen if you add a timer with expires == jiffies,
  104. * or you set a timer to go off in the past
  105. */
  106. vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
  107. } else {
  108. int i;
  109. /* If the timeout is larger than 0xffffffff on 64-bit
  110. * architectures then we use the maximum timeout:
  111. */
  112. if (idx > 0xffffffffUL) {
  113. idx = 0xffffffffUL;
  114. expires = idx + base->timer_jiffies;
  115. }
  116. i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  117. vec = base->tv5.vec + i;
  118. }
  119. /*
  120. * Timers are FIFO:
  121. */
  122. list_add_tail(&timer->entry, vec);
  123. }
  124. /***
  125. * init_timer - initialize a timer.
  126. * @timer: the timer to be initialized
  127. *
  128. * init_timer() must be done to a timer prior calling *any* of the
  129. * other timer functions.
  130. */
  131. void fastcall init_timer(struct timer_list *timer)
  132. {
  133. timer->entry.next = NULL;
  134. timer->base = __raw_get_cpu_var(tvec_bases);
  135. }
  136. EXPORT_SYMBOL(init_timer);
  137. static inline void detach_timer(struct timer_list *timer,
  138. int clear_pending)
  139. {
  140. struct list_head *entry = &timer->entry;
  141. __list_del(entry->prev, entry->next);
  142. if (clear_pending)
  143. entry->next = NULL;
  144. entry->prev = LIST_POISON2;
  145. }
  146. /*
  147. * We are using hashed locking: holding per_cpu(tvec_bases).lock
  148. * means that all timers which are tied to this base via timer->base are
  149. * locked, and the base itself is locked too.
  150. *
  151. * So __run_timers/migrate_timers can safely modify all timers which could
  152. * be found on ->tvX lists.
  153. *
  154. * When the timer's base is locked, and the timer removed from list, it is
  155. * possible to set timer->base = NULL and drop the lock: the timer remains
  156. * locked.
  157. */
  158. static tvec_base_t *lock_timer_base(struct timer_list *timer,
  159. unsigned long *flags)
  160. {
  161. tvec_base_t *base;
  162. for (;;) {
  163. base = timer->base;
  164. if (likely(base != NULL)) {
  165. spin_lock_irqsave(&base->lock, *flags);
  166. if (likely(base == timer->base))
  167. return base;
  168. /* The timer has migrated to another CPU */
  169. spin_unlock_irqrestore(&base->lock, *flags);
  170. }
  171. cpu_relax();
  172. }
  173. }
  174. int __mod_timer(struct timer_list *timer, unsigned long expires)
  175. {
  176. tvec_base_t *base, *new_base;
  177. unsigned long flags;
  178. int ret = 0;
  179. BUG_ON(!timer->function);
  180. base = lock_timer_base(timer, &flags);
  181. if (timer_pending(timer)) {
  182. detach_timer(timer, 0);
  183. ret = 1;
  184. }
  185. new_base = __get_cpu_var(tvec_bases);
  186. if (base != new_base) {
  187. /*
  188. * We are trying to schedule the timer on the local CPU.
  189. * However we can't change timer's base while it is running,
  190. * otherwise del_timer_sync() can't detect that the timer's
  191. * handler yet has not finished. This also guarantees that
  192. * the timer is serialized wrt itself.
  193. */
  194. if (likely(base->running_timer != timer)) {
  195. /* See the comment in lock_timer_base() */
  196. timer->base = NULL;
  197. spin_unlock(&base->lock);
  198. base = new_base;
  199. spin_lock(&base->lock);
  200. timer->base = base;
  201. }
  202. }
  203. timer->expires = expires;
  204. internal_add_timer(base, timer);
  205. spin_unlock_irqrestore(&base->lock, flags);
  206. return ret;
  207. }
  208. EXPORT_SYMBOL(__mod_timer);
  209. /***
  210. * add_timer_on - start a timer on a particular CPU
  211. * @timer: the timer to be added
  212. * @cpu: the CPU to start it on
  213. *
  214. * This is not very scalable on SMP. Double adds are not possible.
  215. */
  216. void add_timer_on(struct timer_list *timer, int cpu)
  217. {
  218. tvec_base_t *base = per_cpu(tvec_bases, cpu);
  219. unsigned long flags;
  220. BUG_ON(timer_pending(timer) || !timer->function);
  221. spin_lock_irqsave(&base->lock, flags);
  222. timer->base = base;
  223. internal_add_timer(base, timer);
  224. spin_unlock_irqrestore(&base->lock, flags);
  225. }
  226. /***
  227. * mod_timer - modify a timer's timeout
  228. * @timer: the timer to be modified
  229. *
  230. * mod_timer is a more efficient way to update the expire field of an
  231. * active timer (if the timer is inactive it will be activated)
  232. *
  233. * mod_timer(timer, expires) is equivalent to:
  234. *
  235. * del_timer(timer); timer->expires = expires; add_timer(timer);
  236. *
  237. * Note that if there are multiple unserialized concurrent users of the
  238. * same timer, then mod_timer() is the only safe way to modify the timeout,
  239. * since add_timer() cannot modify an already running timer.
  240. *
  241. * The function returns whether it has modified a pending timer or not.
  242. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
  243. * active timer returns 1.)
  244. */
  245. int mod_timer(struct timer_list *timer, unsigned long expires)
  246. {
  247. BUG_ON(!timer->function);
  248. /*
  249. * This is a common optimization triggered by the
  250. * networking code - if the timer is re-modified
  251. * to be the same thing then just return:
  252. */
  253. if (timer->expires == expires && timer_pending(timer))
  254. return 1;
  255. return __mod_timer(timer, expires);
  256. }
  257. EXPORT_SYMBOL(mod_timer);
  258. /***
  259. * del_timer - deactive a timer.
  260. * @timer: the timer to be deactivated
  261. *
  262. * del_timer() deactivates a timer - this works on both active and inactive
  263. * timers.
  264. *
  265. * The function returns whether it has deactivated a pending timer or not.
  266. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
  267. * active timer returns 1.)
  268. */
  269. int del_timer(struct timer_list *timer)
  270. {
  271. tvec_base_t *base;
  272. unsigned long flags;
  273. int ret = 0;
  274. if (timer_pending(timer)) {
  275. base = lock_timer_base(timer, &flags);
  276. if (timer_pending(timer)) {
  277. detach_timer(timer, 1);
  278. ret = 1;
  279. }
  280. spin_unlock_irqrestore(&base->lock, flags);
  281. }
  282. return ret;
  283. }
  284. EXPORT_SYMBOL(del_timer);
  285. #ifdef CONFIG_SMP
  286. /*
  287. * This function tries to deactivate a timer. Upon successful (ret >= 0)
  288. * exit the timer is not queued and the handler is not running on any CPU.
  289. *
  290. * It must not be called from interrupt contexts.
  291. */
  292. int try_to_del_timer_sync(struct timer_list *timer)
  293. {
  294. tvec_base_t *base;
  295. unsigned long flags;
  296. int ret = -1;
  297. base = lock_timer_base(timer, &flags);
  298. if (base->running_timer == timer)
  299. goto out;
  300. ret = 0;
  301. if (timer_pending(timer)) {
  302. detach_timer(timer, 1);
  303. ret = 1;
  304. }
  305. out:
  306. spin_unlock_irqrestore(&base->lock, flags);
  307. return ret;
  308. }
  309. /***
  310. * del_timer_sync - deactivate a timer and wait for the handler to finish.
  311. * @timer: the timer to be deactivated
  312. *
  313. * This function only differs from del_timer() on SMP: besides deactivating
  314. * the timer it also makes sure the handler has finished executing on other
  315. * CPUs.
  316. *
  317. * Synchronization rules: callers must prevent restarting of the timer,
  318. * otherwise this function is meaningless. It must not be called from
  319. * interrupt contexts. The caller must not hold locks which would prevent
  320. * completion of the timer's handler. The timer's handler must not call
  321. * add_timer_on(). Upon exit the timer is not queued and the handler is
  322. * not running on any CPU.
  323. *
  324. * The function returns whether it has deactivated a pending timer or not.
  325. */
  326. int del_timer_sync(struct timer_list *timer)
  327. {
  328. for (;;) {
  329. int ret = try_to_del_timer_sync(timer);
  330. if (ret >= 0)
  331. return ret;
  332. }
  333. }
  334. EXPORT_SYMBOL(del_timer_sync);
  335. #endif
  336. static int cascade(tvec_base_t *base, tvec_t *tv, int index)
  337. {
  338. /* cascade all the timers from tv up one level */
  339. struct timer_list *timer, *tmp;
  340. struct list_head tv_list;
  341. list_replace_init(tv->vec + index, &tv_list);
  342. /*
  343. * We are removing _all_ timers from the list, so we
  344. * don't have to detach them individually.
  345. */
  346. list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
  347. BUG_ON(timer->base != base);
  348. internal_add_timer(base, timer);
  349. }
  350. return index;
  351. }
  352. /***
  353. * __run_timers - run all expired timers (if any) on this CPU.
  354. * @base: the timer vector to be processed.
  355. *
  356. * This function cascades all vectors and executes all expired timer
  357. * vectors.
  358. */
  359. #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
  360. static inline void __run_timers(tvec_base_t *base)
  361. {
  362. struct timer_list *timer;
  363. spin_lock_irq(&base->lock);
  364. while (time_after_eq(jiffies, base->timer_jiffies)) {
  365. struct list_head work_list;
  366. struct list_head *head = &work_list;
  367. int index = base->timer_jiffies & TVR_MASK;
  368. /*
  369. * Cascade timers:
  370. */
  371. if (!index &&
  372. (!cascade(base, &base->tv2, INDEX(0))) &&
  373. (!cascade(base, &base->tv3, INDEX(1))) &&
  374. !cascade(base, &base->tv4, INDEX(2)))
  375. cascade(base, &base->tv5, INDEX(3));
  376. ++base->timer_jiffies;
  377. list_replace_init(base->tv1.vec + index, &work_list);
  378. while (!list_empty(head)) {
  379. void (*fn)(unsigned long);
  380. unsigned long data;
  381. timer = list_entry(head->next,struct timer_list,entry);
  382. fn = timer->function;
  383. data = timer->data;
  384. set_running_timer(base, timer);
  385. detach_timer(timer, 1);
  386. spin_unlock_irq(&base->lock);
  387. {
  388. int preempt_count = preempt_count();
  389. fn(data);
  390. if (preempt_count != preempt_count()) {
  391. printk(KERN_WARNING "huh, entered %p "
  392. "with preempt_count %08x, exited"
  393. " with %08x?\n",
  394. fn, preempt_count,
  395. preempt_count());
  396. BUG();
  397. }
  398. }
  399. spin_lock_irq(&base->lock);
  400. }
  401. }
  402. set_running_timer(base, NULL);
  403. spin_unlock_irq(&base->lock);
  404. }
  405. #ifdef CONFIG_NO_IDLE_HZ
  406. /*
  407. * Find out when the next timer event is due to happen. This
  408. * is used on S/390 to stop all activity when a cpus is idle.
  409. * This functions needs to be called disabled.
  410. */
  411. unsigned long next_timer_interrupt(void)
  412. {
  413. tvec_base_t *base;
  414. struct list_head *list;
  415. struct timer_list *nte;
  416. unsigned long expires;
  417. unsigned long hr_expires = MAX_JIFFY_OFFSET;
  418. ktime_t hr_delta;
  419. tvec_t *varray[4];
  420. int i, j;
  421. hr_delta = hrtimer_get_next_event();
  422. if (hr_delta.tv64 != KTIME_MAX) {
  423. struct timespec tsdelta;
  424. tsdelta = ktime_to_timespec(hr_delta);
  425. hr_expires = timespec_to_jiffies(&tsdelta);
  426. if (hr_expires < 3)
  427. return hr_expires + jiffies;
  428. }
  429. hr_expires += jiffies;
  430. base = __get_cpu_var(tvec_bases);
  431. spin_lock(&base->lock);
  432. expires = base->timer_jiffies + (LONG_MAX >> 1);
  433. list = NULL;
  434. /* Look for timer events in tv1. */
  435. j = base->timer_jiffies & TVR_MASK;
  436. do {
  437. list_for_each_entry(nte, base->tv1.vec + j, entry) {
  438. expires = nte->expires;
  439. if (j < (base->timer_jiffies & TVR_MASK))
  440. list = base->tv2.vec + (INDEX(0));
  441. goto found;
  442. }
  443. j = (j + 1) & TVR_MASK;
  444. } while (j != (base->timer_jiffies & TVR_MASK));
  445. /* Check tv2-tv5. */
  446. varray[0] = &base->tv2;
  447. varray[1] = &base->tv3;
  448. varray[2] = &base->tv4;
  449. varray[3] = &base->tv5;
  450. for (i = 0; i < 4; i++) {
  451. j = INDEX(i);
  452. do {
  453. if (list_empty(varray[i]->vec + j)) {
  454. j = (j + 1) & TVN_MASK;
  455. continue;
  456. }
  457. list_for_each_entry(nte, varray[i]->vec + j, entry)
  458. if (time_before(nte->expires, expires))
  459. expires = nte->expires;
  460. if (j < (INDEX(i)) && i < 3)
  461. list = varray[i + 1]->vec + (INDEX(i + 1));
  462. goto found;
  463. } while (j != (INDEX(i)));
  464. }
  465. found:
  466. if (list) {
  467. /*
  468. * The search wrapped. We need to look at the next list
  469. * from next tv element that would cascade into tv element
  470. * where we found the timer element.
  471. */
  472. list_for_each_entry(nte, list, entry) {
  473. if (time_before(nte->expires, expires))
  474. expires = nte->expires;
  475. }
  476. }
  477. spin_unlock(&base->lock);
  478. /*
  479. * It can happen that other CPUs service timer IRQs and increment
  480. * jiffies, but we have not yet got a local timer tick to process
  481. * the timer wheels. In that case, the expiry time can be before
  482. * jiffies, but since the high-resolution timer here is relative to
  483. * jiffies, the default expression when high-resolution timers are
  484. * not active,
  485. *
  486. * time_before(MAX_JIFFY_OFFSET + jiffies, expires)
  487. *
  488. * would falsely evaluate to true. If that is the case, just
  489. * return jiffies so that we can immediately fire the local timer
  490. */
  491. if (time_before(expires, jiffies))
  492. return jiffies;
  493. if (time_before(hr_expires, expires))
  494. return hr_expires;
  495. return expires;
  496. }
  497. #endif
  498. /******************************************************************/
  499. /*
  500. * Timekeeping variables
  501. */
  502. unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
  503. unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
  504. /*
  505. * The current time
  506. * wall_to_monotonic is what we need to add to xtime (or xtime corrected
  507. * for sub jiffie times) to get to monotonic time. Monotonic is pegged
  508. * at zero at system boot time, so wall_to_monotonic will be negative,
  509. * however, we will ALWAYS keep the tv_nsec part positive so we can use
  510. * the usual normalization.
  511. */
  512. struct timespec xtime __attribute__ ((aligned (16)));
  513. struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
  514. EXPORT_SYMBOL(xtime);
  515. /* Don't completely fail for HZ > 500. */
  516. int tickadj = 500/HZ ? : 1; /* microsecs */
  517. /*
  518. * phase-lock loop variables
  519. */
  520. /* TIME_ERROR prevents overwriting the CMOS clock */
  521. int time_state = TIME_OK; /* clock synchronization status */
  522. int time_status = STA_UNSYNC; /* clock status bits */
  523. long time_offset; /* time adjustment (us) */
  524. long time_constant = 2; /* pll time constant */
  525. long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
  526. long time_precision = 1; /* clock precision (us) */
  527. long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
  528. long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
  529. long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
  530. /* frequency offset (scaled ppm)*/
  531. static long time_adj; /* tick adjust (scaled 1 / HZ) */
  532. long time_reftime; /* time at last adjustment (s) */
  533. long time_adjust;
  534. long time_next_adjust;
  535. /*
  536. * this routine handles the overflow of the microsecond field
  537. *
  538. * The tricky bits of code to handle the accurate clock support
  539. * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
  540. * They were originally developed for SUN and DEC kernels.
  541. * All the kudos should go to Dave for this stuff.
  542. *
  543. */
  544. static void second_overflow(void)
  545. {
  546. long ltemp;
  547. /* Bump the maxerror field */
  548. time_maxerror += time_tolerance >> SHIFT_USEC;
  549. if (time_maxerror > NTP_PHASE_LIMIT) {
  550. time_maxerror = NTP_PHASE_LIMIT;
  551. time_status |= STA_UNSYNC;
  552. }
  553. /*
  554. * Leap second processing. If in leap-insert state at the end of the
  555. * day, the system clock is set back one second; if in leap-delete
  556. * state, the system clock is set ahead one second. The microtime()
  557. * routine or external clock driver will insure that reported time is
  558. * always monotonic. The ugly divides should be replaced.
  559. */
  560. switch (time_state) {
  561. case TIME_OK:
  562. if (time_status & STA_INS)
  563. time_state = TIME_INS;
  564. else if (time_status & STA_DEL)
  565. time_state = TIME_DEL;
  566. break;
  567. case TIME_INS:
  568. if (xtime.tv_sec % 86400 == 0) {
  569. xtime.tv_sec--;
  570. wall_to_monotonic.tv_sec++;
  571. /*
  572. * The timer interpolator will make time change
  573. * gradually instead of an immediate jump by one second
  574. */
  575. time_interpolator_update(-NSEC_PER_SEC);
  576. time_state = TIME_OOP;
  577. clock_was_set();
  578. printk(KERN_NOTICE "Clock: inserting leap second "
  579. "23:59:60 UTC\n");
  580. }
  581. break;
  582. case TIME_DEL:
  583. if ((xtime.tv_sec + 1) % 86400 == 0) {
  584. xtime.tv_sec++;
  585. wall_to_monotonic.tv_sec--;
  586. /*
  587. * Use of time interpolator for a gradual change of
  588. * time
  589. */
  590. time_interpolator_update(NSEC_PER_SEC);
  591. time_state = TIME_WAIT;
  592. clock_was_set();
  593. printk(KERN_NOTICE "Clock: deleting leap second "
  594. "23:59:59 UTC\n");
  595. }
  596. break;
  597. case TIME_OOP:
  598. time_state = TIME_WAIT;
  599. break;
  600. case TIME_WAIT:
  601. if (!(time_status & (STA_INS | STA_DEL)))
  602. time_state = TIME_OK;
  603. }
  604. /*
  605. * Compute the phase adjustment for the next second. In PLL mode, the
  606. * offset is reduced by a fixed factor times the time constant. In FLL
  607. * mode the offset is used directly. In either mode, the maximum phase
  608. * adjustment for each second is clamped so as to spread the adjustment
  609. * over not more than the number of seconds between updates.
  610. */
  611. ltemp = time_offset;
  612. if (!(time_status & STA_FLL))
  613. ltemp = shift_right(ltemp, SHIFT_KG + time_constant);
  614. ltemp = min(ltemp, (MAXPHASE / MINSEC) << SHIFT_UPDATE);
  615. ltemp = max(ltemp, -(MAXPHASE / MINSEC) << SHIFT_UPDATE);
  616. time_offset -= ltemp;
  617. time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
  618. /*
  619. * Compute the frequency estimate and additional phase adjustment due
  620. * to frequency error for the next second.
  621. */
  622. ltemp = time_freq;
  623. time_adj += shift_right(ltemp,(SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE));
  624. #if HZ == 100
  625. /*
  626. * Compensate for (HZ==100) != (1 << SHIFT_HZ). Add 25% and 3.125% to
  627. * get 128.125; => only 0.125% error (p. 14)
  628. */
  629. time_adj += shift_right(time_adj, 2) + shift_right(time_adj, 5);
  630. #endif
  631. #if HZ == 250
  632. /*
  633. * Compensate for (HZ==250) != (1 << SHIFT_HZ). Add 1.5625% and
  634. * 0.78125% to get 255.85938; => only 0.05% error (p. 14)
  635. */
  636. time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
  637. #endif
  638. #if HZ == 1000
  639. /*
  640. * Compensate for (HZ==1000) != (1 << SHIFT_HZ). Add 1.5625% and
  641. * 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
  642. */
  643. time_adj += shift_right(time_adj, 6) + shift_right(time_adj, 7);
  644. #endif
  645. }
  646. /*
  647. * Returns how many microseconds we need to add to xtime this tick
  648. * in doing an adjustment requested with adjtime.
  649. */
  650. static long adjtime_adjustment(void)
  651. {
  652. long time_adjust_step;
  653. time_adjust_step = time_adjust;
  654. if (time_adjust_step) {
  655. /*
  656. * We are doing an adjtime thing. Prepare time_adjust_step to
  657. * be within bounds. Note that a positive time_adjust means we
  658. * want the clock to run faster.
  659. *
  660. * Limit the amount of the step to be in the range
  661. * -tickadj .. +tickadj
  662. */
  663. time_adjust_step = min(time_adjust_step, (long)tickadj);
  664. time_adjust_step = max(time_adjust_step, (long)-tickadj);
  665. }
  666. return time_adjust_step;
  667. }
  668. /* in the NTP reference this is called "hardclock()" */
  669. static void update_ntp_one_tick(void)
  670. {
  671. long time_adjust_step;
  672. time_adjust_step = adjtime_adjustment();
  673. if (time_adjust_step)
  674. /* Reduce by this step the amount of time left */
  675. time_adjust -= time_adjust_step;
  676. /* Changes by adjtime() do not take effect till next tick. */
  677. if (time_next_adjust != 0) {
  678. time_adjust = time_next_adjust;
  679. time_next_adjust = 0;
  680. }
  681. }
  682. /*
  683. * Return how long ticks are at the moment, that is, how much time
  684. * update_wall_time_one_tick will add to xtime next time we call it
  685. * (assuming no calls to do_adjtimex in the meantime).
  686. * The return value is in fixed-point nanoseconds shifted by the
  687. * specified number of bits to the right of the binary point.
  688. * This function has no side-effects.
  689. */
  690. u64 current_tick_length(long shift)
  691. {
  692. long delta_nsec;
  693. u64 ret;
  694. /* calculate the finest interval NTP will allow.
  695. * ie: nanosecond value shifted by (SHIFT_SCALE - 10)
  696. */
  697. delta_nsec = tick_nsec + adjtime_adjustment() * 1000;
  698. ret = ((u64) delta_nsec << (SHIFT_SCALE - 10)) + time_adj;
  699. /* convert from (SHIFT_SCALE - 10) to specified shift scale: */
  700. shift = shift - (SHIFT_SCALE - 10);
  701. if (shift < 0)
  702. ret >>= -shift;
  703. else
  704. ret <<= shift;
  705. return ret;
  706. }
  707. /* XXX - all of this timekeeping code should be later moved to time.c */
  708. #include <linux/clocksource.h>
  709. static struct clocksource *clock; /* pointer to current clocksource */
  710. static cycle_t last_clock_cycle; /* cycle value at last update_wall_time */
  711. #ifdef CONFIG_GENERIC_TIME
  712. /**
  713. * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
  714. *
  715. * private function, must hold xtime_lock lock when being
  716. * called. Returns the number of nanoseconds since the
  717. * last call to update_wall_time() (adjusted by NTP scaling)
  718. */
  719. static inline s64 __get_nsec_offset(void)
  720. {
  721. cycle_t cycle_now, cycle_delta;
  722. s64 ns_offset;
  723. /* read clocksource: */
  724. cycle_now = read_clocksource(clock);
  725. /* calculate the delta since the last update_wall_time: */
  726. cycle_delta = (cycle_now - last_clock_cycle) & clock->mask;
  727. /* convert to nanoseconds: */
  728. ns_offset = cyc2ns(clock, cycle_delta);
  729. return ns_offset;
  730. }
  731. /**
  732. * __get_realtime_clock_ts - Returns the time of day in a timespec
  733. * @ts: pointer to the timespec to be set
  734. *
  735. * Returns the time of day in a timespec. Used by
  736. * do_gettimeofday() and get_realtime_clock_ts().
  737. */
  738. static inline void __get_realtime_clock_ts(struct timespec *ts)
  739. {
  740. unsigned long seq;
  741. s64 nsecs;
  742. do {
  743. seq = read_seqbegin(&xtime_lock);
  744. *ts = xtime;
  745. nsecs = __get_nsec_offset();
  746. } while (read_seqretry(&xtime_lock, seq));
  747. timespec_add_ns(ts, nsecs);
  748. }
  749. /**
  750. * get_realtime_clock_ts - Returns the time of day in a timespec
  751. * @ts: pointer to the timespec to be set
  752. *
  753. * Returns the time of day in a timespec.
  754. */
  755. void getnstimeofday(struct timespec *ts)
  756. {
  757. __get_realtime_clock_ts(ts);
  758. }
  759. EXPORT_SYMBOL(getnstimeofday);
  760. /**
  761. * do_gettimeofday - Returns the time of day in a timeval
  762. * @tv: pointer to the timeval to be set
  763. *
  764. * NOTE: Users should be converted to using get_realtime_clock_ts()
  765. */
  766. void do_gettimeofday(struct timeval *tv)
  767. {
  768. struct timespec now;
  769. __get_realtime_clock_ts(&now);
  770. tv->tv_sec = now.tv_sec;
  771. tv->tv_usec = now.tv_nsec/1000;
  772. }
  773. EXPORT_SYMBOL(do_gettimeofday);
  774. /**
  775. * do_settimeofday - Sets the time of day
  776. * @tv: pointer to the timespec variable containing the new time
  777. *
  778. * Sets the time of day to the new time and update NTP and notify hrtimers
  779. */
  780. int do_settimeofday(struct timespec *tv)
  781. {
  782. unsigned long flags;
  783. time_t wtm_sec, sec = tv->tv_sec;
  784. long wtm_nsec, nsec = tv->tv_nsec;
  785. if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  786. return -EINVAL;
  787. write_seqlock_irqsave(&xtime_lock, flags);
  788. nsec -= __get_nsec_offset();
  789. wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
  790. wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
  791. set_normalized_timespec(&xtime, sec, nsec);
  792. set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
  793. ntp_clear();
  794. write_sequnlock_irqrestore(&xtime_lock, flags);
  795. /* signal hrtimers about time change */
  796. clock_was_set();
  797. return 0;
  798. }
  799. EXPORT_SYMBOL(do_settimeofday);
  800. /**
  801. * change_clocksource - Swaps clocksources if a new one is available
  802. *
  803. * Accumulates current time interval and initializes new clocksource
  804. */
  805. static int change_clocksource(void)
  806. {
  807. struct clocksource *new;
  808. cycle_t now;
  809. u64 nsec;
  810. new = get_next_clocksource();
  811. if (clock != new) {
  812. now = read_clocksource(new);
  813. nsec = __get_nsec_offset();
  814. timespec_add_ns(&xtime, nsec);
  815. clock = new;
  816. last_clock_cycle = now;
  817. printk(KERN_INFO "Time: %s clocksource has been installed.\n",
  818. clock->name);
  819. return 1;
  820. } else if (clock->update_callback) {
  821. return clock->update_callback();
  822. }
  823. return 0;
  824. }
  825. #else
  826. #define change_clocksource() (0)
  827. #endif
  828. /**
  829. * timeofday_is_continuous - check to see if timekeeping is free running
  830. */
  831. int timekeeping_is_continuous(void)
  832. {
  833. unsigned long seq;
  834. int ret;
  835. do {
  836. seq = read_seqbegin(&xtime_lock);
  837. ret = clock->is_continuous;
  838. } while (read_seqretry(&xtime_lock, seq));
  839. return ret;
  840. }
  841. /*
  842. * timekeeping_init - Initializes the clocksource and common timekeeping values
  843. */
  844. void __init timekeeping_init(void)
  845. {
  846. unsigned long flags;
  847. write_seqlock_irqsave(&xtime_lock, flags);
  848. clock = get_next_clocksource();
  849. calculate_clocksource_interval(clock, tick_nsec);
  850. last_clock_cycle = read_clocksource(clock);
  851. ntp_clear();
  852. write_sequnlock_irqrestore(&xtime_lock, flags);
  853. }
  854. /*
  855. * timekeeping_resume - Resumes the generic timekeeping subsystem.
  856. * @dev: unused
  857. *
  858. * This is for the generic clocksource timekeeping.
  859. * xtime/wall_to_monotonic/jiffies/wall_jiffies/etc are
  860. * still managed by arch specific suspend/resume code.
  861. */
  862. static int timekeeping_resume(struct sys_device *dev)
  863. {
  864. unsigned long flags;
  865. write_seqlock_irqsave(&xtime_lock, flags);
  866. /* restart the last cycle value */
  867. last_clock_cycle = read_clocksource(clock);
  868. write_sequnlock_irqrestore(&xtime_lock, flags);
  869. return 0;
  870. }
  871. /* sysfs resume/suspend bits for timekeeping */
  872. static struct sysdev_class timekeeping_sysclass = {
  873. .resume = timekeeping_resume,
  874. set_kset_name("timekeeping"),
  875. };
  876. static struct sys_device device_timer = {
  877. .id = 0,
  878. .cls = &timekeeping_sysclass,
  879. };
  880. static int __init timekeeping_init_device(void)
  881. {
  882. int error = sysdev_class_register(&timekeeping_sysclass);
  883. if (!error)
  884. error = sysdev_register(&device_timer);
  885. return error;
  886. }
  887. device_initcall(timekeeping_init_device);
  888. /*
  889. * update_wall_time - Uses the current clocksource to increment the wall time
  890. *
  891. * Called from the timer interrupt, must hold a write on xtime_lock.
  892. */
  893. static void update_wall_time(void)
  894. {
  895. static s64 remainder_snsecs, error;
  896. s64 snsecs_per_sec;
  897. cycle_t now, offset;
  898. snsecs_per_sec = (s64)NSEC_PER_SEC << clock->shift;
  899. remainder_snsecs += (s64)xtime.tv_nsec << clock->shift;
  900. now = read_clocksource(clock);
  901. offset = (now - last_clock_cycle)&clock->mask;
  902. /* normally this loop will run just once, however in the
  903. * case of lost or late ticks, it will accumulate correctly.
  904. */
  905. while (offset > clock->interval_cycles) {
  906. /* get the ntp interval in clock shifted nanoseconds */
  907. s64 ntp_snsecs = current_tick_length(clock->shift);
  908. /* accumulate one interval */
  909. remainder_snsecs += clock->interval_snsecs;
  910. last_clock_cycle += clock->interval_cycles;
  911. offset -= clock->interval_cycles;
  912. /* interpolator bits */
  913. time_interpolator_update(clock->interval_snsecs
  914. >> clock->shift);
  915. /* increment the NTP state machine */
  916. update_ntp_one_tick();
  917. /* accumulate error between NTP and clock interval */
  918. error += (ntp_snsecs - (s64)clock->interval_snsecs);
  919. /* correct the clock when NTP error is too big */
  920. remainder_snsecs += make_ntp_adj(clock, offset, &error);
  921. if (remainder_snsecs >= snsecs_per_sec) {
  922. remainder_snsecs -= snsecs_per_sec;
  923. xtime.tv_sec++;
  924. second_overflow();
  925. }
  926. }
  927. /* store full nanoseconds into xtime */
  928. xtime.tv_nsec = remainder_snsecs >> clock->shift;
  929. remainder_snsecs -= (s64)xtime.tv_nsec << clock->shift;
  930. /* check to see if there is a new clocksource to use */
  931. if (change_clocksource()) {
  932. error = 0;
  933. remainder_snsecs = 0;
  934. calculate_clocksource_interval(clock, tick_nsec);
  935. }
  936. }
  937. /*
  938. * Called from the timer interrupt handler to charge one tick to the current
  939. * process. user_tick is 1 if the tick is user time, 0 for system.
  940. */
  941. void update_process_times(int user_tick)
  942. {
  943. struct task_struct *p = current;
  944. int cpu = smp_processor_id();
  945. /* Note: this timer irq context must be accounted for as well. */
  946. if (user_tick)
  947. account_user_time(p, jiffies_to_cputime(1));
  948. else
  949. account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
  950. run_local_timers();
  951. if (rcu_pending(cpu))
  952. rcu_check_callbacks(cpu, user_tick);
  953. scheduler_tick();
  954. run_posix_cpu_timers(p);
  955. }
  956. /*
  957. * Nr of active tasks - counted in fixed-point numbers
  958. */
  959. static unsigned long count_active_tasks(void)
  960. {
  961. return nr_active() * FIXED_1;
  962. }
  963. /*
  964. * Hmm.. Changed this, as the GNU make sources (load.c) seems to
  965. * imply that avenrun[] is the standard name for this kind of thing.
  966. * Nothing else seems to be standardized: the fractional size etc
  967. * all seem to differ on different machines.
  968. *
  969. * Requires xtime_lock to access.
  970. */
  971. unsigned long avenrun[3];
  972. EXPORT_SYMBOL(avenrun);
  973. /*
  974. * calc_load - given tick count, update the avenrun load estimates.
  975. * This is called while holding a write_lock on xtime_lock.
  976. */
  977. static inline void calc_load(unsigned long ticks)
  978. {
  979. unsigned long active_tasks; /* fixed-point */
  980. static int count = LOAD_FREQ;
  981. count -= ticks;
  982. if (count < 0) {
  983. count += LOAD_FREQ;
  984. active_tasks = count_active_tasks();
  985. CALC_LOAD(avenrun[0], EXP_1, active_tasks);
  986. CALC_LOAD(avenrun[1], EXP_5, active_tasks);
  987. CALC_LOAD(avenrun[2], EXP_15, active_tasks);
  988. }
  989. }
  990. /* jiffies at the most recent update of wall time */
  991. unsigned long wall_jiffies = INITIAL_JIFFIES;
  992. /*
  993. * This read-write spinlock protects us from races in SMP while
  994. * playing with xtime and avenrun.
  995. */
  996. #ifndef ARCH_HAVE_XTIME_LOCK
  997. seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
  998. EXPORT_SYMBOL(xtime_lock);
  999. #endif
  1000. /*
  1001. * This function runs timers and the timer-tq in bottom half context.
  1002. */
  1003. static void run_timer_softirq(struct softirq_action *h)
  1004. {
  1005. tvec_base_t *base = __get_cpu_var(tvec_bases);
  1006. hrtimer_run_queues();
  1007. if (time_after_eq(jiffies, base->timer_jiffies))
  1008. __run_timers(base);
  1009. }
  1010. /*
  1011. * Called by the local, per-CPU timer interrupt on SMP.
  1012. */
  1013. void run_local_timers(void)
  1014. {
  1015. raise_softirq(TIMER_SOFTIRQ);
  1016. softlockup_tick();
  1017. }
  1018. /*
  1019. * Called by the timer interrupt. xtime_lock must already be taken
  1020. * by the timer IRQ!
  1021. */
  1022. static inline void update_times(void)
  1023. {
  1024. unsigned long ticks;
  1025. ticks = jiffies - wall_jiffies;
  1026. wall_jiffies += ticks;
  1027. update_wall_time();
  1028. calc_load(ticks);
  1029. }
  1030. /*
  1031. * The 64-bit jiffies value is not atomic - you MUST NOT read it
  1032. * without sampling the sequence number in xtime_lock.
  1033. * jiffies is defined in the linker script...
  1034. */
  1035. void do_timer(struct pt_regs *regs)
  1036. {
  1037. jiffies_64++;
  1038. /* prevent loading jiffies before storing new jiffies_64 value. */
  1039. barrier();
  1040. update_times();
  1041. }
  1042. #ifdef __ARCH_WANT_SYS_ALARM
  1043. /*
  1044. * For backwards compatibility? This can be done in libc so Alpha
  1045. * and all newer ports shouldn't need it.
  1046. */
  1047. asmlinkage unsigned long sys_alarm(unsigned int seconds)
  1048. {
  1049. return alarm_setitimer(seconds);
  1050. }
  1051. #endif
  1052. #ifndef __alpha__
  1053. /*
  1054. * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
  1055. * should be moved into arch/i386 instead?
  1056. */
  1057. /**
  1058. * sys_getpid - return the thread group id of the current process
  1059. *
  1060. * Note, despite the name, this returns the tgid not the pid. The tgid and
  1061. * the pid are identical unless CLONE_THREAD was specified on clone() in
  1062. * which case the tgid is the same in all threads of the same group.
  1063. *
  1064. * This is SMP safe as current->tgid does not change.
  1065. */
  1066. asmlinkage long sys_getpid(void)
  1067. {
  1068. return current->tgid;
  1069. }
  1070. /*
  1071. * Accessing ->group_leader->real_parent is not SMP-safe, it could
  1072. * change from under us. However, rather than getting any lock
  1073. * we can use an optimistic algorithm: get the parent
  1074. * pid, and go back and check that the parent is still
  1075. * the same. If it has changed (which is extremely unlikely
  1076. * indeed), we just try again..
  1077. *
  1078. * NOTE! This depends on the fact that even if we _do_
  1079. * get an old value of "parent", we can happily dereference
  1080. * the pointer (it was and remains a dereferencable kernel pointer
  1081. * no matter what): we just can't necessarily trust the result
  1082. * until we know that the parent pointer is valid.
  1083. *
  1084. * NOTE2: ->group_leader never changes from under us.
  1085. */
  1086. asmlinkage long sys_getppid(void)
  1087. {
  1088. int pid;
  1089. struct task_struct *me = current;
  1090. struct task_struct *parent;
  1091. parent = me->group_leader->real_parent;
  1092. for (;;) {
  1093. pid = parent->tgid;
  1094. #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
  1095. {
  1096. struct task_struct *old = parent;
  1097. /*
  1098. * Make sure we read the pid before re-reading the
  1099. * parent pointer:
  1100. */
  1101. smp_rmb();
  1102. parent = me->group_leader->real_parent;
  1103. if (old != parent)
  1104. continue;
  1105. }
  1106. #endif
  1107. break;
  1108. }
  1109. return pid;
  1110. }
  1111. asmlinkage long sys_getuid(void)
  1112. {
  1113. /* Only we change this so SMP safe */
  1114. return current->uid;
  1115. }
  1116. asmlinkage long sys_geteuid(void)
  1117. {
  1118. /* Only we change this so SMP safe */
  1119. return current->euid;
  1120. }
  1121. asmlinkage long sys_getgid(void)
  1122. {
  1123. /* Only we change this so SMP safe */
  1124. return current->gid;
  1125. }
  1126. asmlinkage long sys_getegid(void)
  1127. {
  1128. /* Only we change this so SMP safe */
  1129. return current->egid;
  1130. }
  1131. #endif
  1132. static void process_timeout(unsigned long __data)
  1133. {
  1134. wake_up_process((task_t *)__data);
  1135. }
  1136. /**
  1137. * schedule_timeout - sleep until timeout
  1138. * @timeout: timeout value in jiffies
  1139. *
  1140. * Make the current task sleep until @timeout jiffies have
  1141. * elapsed. The routine will return immediately unless
  1142. * the current task state has been set (see set_current_state()).
  1143. *
  1144. * You can set the task state as follows -
  1145. *
  1146. * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
  1147. * pass before the routine returns. The routine will return 0
  1148. *
  1149. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  1150. * delivered to the current task. In this case the remaining time
  1151. * in jiffies will be returned, or 0 if the timer expired in time
  1152. *
  1153. * The current task state is guaranteed to be TASK_RUNNING when this
  1154. * routine returns.
  1155. *
  1156. * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
  1157. * the CPU away without a bound on the timeout. In this case the return
  1158. * value will be %MAX_SCHEDULE_TIMEOUT.
  1159. *
  1160. * In all cases the return value is guaranteed to be non-negative.
  1161. */
  1162. fastcall signed long __sched schedule_timeout(signed long timeout)
  1163. {
  1164. struct timer_list timer;
  1165. unsigned long expire;
  1166. switch (timeout)
  1167. {
  1168. case MAX_SCHEDULE_TIMEOUT:
  1169. /*
  1170. * These two special cases are useful to be comfortable
  1171. * in the caller. Nothing more. We could take
  1172. * MAX_SCHEDULE_TIMEOUT from one of the negative value
  1173. * but I' d like to return a valid offset (>=0) to allow
  1174. * the caller to do everything it want with the retval.
  1175. */
  1176. schedule();
  1177. goto out;
  1178. default:
  1179. /*
  1180. * Another bit of PARANOID. Note that the retval will be
  1181. * 0 since no piece of kernel is supposed to do a check
  1182. * for a negative retval of schedule_timeout() (since it
  1183. * should never happens anyway). You just have the printk()
  1184. * that will tell you if something is gone wrong and where.
  1185. */
  1186. if (timeout < 0)
  1187. {
  1188. printk(KERN_ERR "schedule_timeout: wrong timeout "
  1189. "value %lx from %p\n", timeout,
  1190. __builtin_return_address(0));
  1191. current->state = TASK_RUNNING;
  1192. goto out;
  1193. }
  1194. }
  1195. expire = timeout + jiffies;
  1196. setup_timer(&timer, process_timeout, (unsigned long)current);
  1197. __mod_timer(&timer, expire);
  1198. schedule();
  1199. del_singleshot_timer_sync(&timer);
  1200. timeout = expire - jiffies;
  1201. out:
  1202. return timeout < 0 ? 0 : timeout;
  1203. }
  1204. EXPORT_SYMBOL(schedule_timeout);
  1205. /*
  1206. * We can use __set_current_state() here because schedule_timeout() calls
  1207. * schedule() unconditionally.
  1208. */
  1209. signed long __sched schedule_timeout_interruptible(signed long timeout)
  1210. {
  1211. __set_current_state(TASK_INTERRUPTIBLE);
  1212. return schedule_timeout(timeout);
  1213. }
  1214. EXPORT_SYMBOL(schedule_timeout_interruptible);
  1215. signed long __sched schedule_timeout_uninterruptible(signed long timeout)
  1216. {
  1217. __set_current_state(TASK_UNINTERRUPTIBLE);
  1218. return schedule_timeout(timeout);
  1219. }
  1220. EXPORT_SYMBOL(schedule_timeout_uninterruptible);
  1221. /* Thread ID - the internal kernel "pid" */
  1222. asmlinkage long sys_gettid(void)
  1223. {
  1224. return current->pid;
  1225. }
  1226. /*
  1227. * sys_sysinfo - fill in sysinfo struct
  1228. */
  1229. asmlinkage long sys_sysinfo(struct sysinfo __user *info)
  1230. {
  1231. struct sysinfo val;
  1232. unsigned long mem_total, sav_total;
  1233. unsigned int mem_unit, bitcount;
  1234. unsigned long seq;
  1235. memset((char *)&val, 0, sizeof(struct sysinfo));
  1236. do {
  1237. struct timespec tp;
  1238. seq = read_seqbegin(&xtime_lock);
  1239. /*
  1240. * This is annoying. The below is the same thing
  1241. * posix_get_clock_monotonic() does, but it wants to
  1242. * take the lock which we want to cover the loads stuff
  1243. * too.
  1244. */
  1245. getnstimeofday(&tp);
  1246. tp.tv_sec += wall_to_monotonic.tv_sec;
  1247. tp.tv_nsec += wall_to_monotonic.tv_nsec;
  1248. if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
  1249. tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
  1250. tp.tv_sec++;
  1251. }
  1252. val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
  1253. val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
  1254. val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
  1255. val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
  1256. val.procs = nr_threads;
  1257. } while (read_seqretry(&xtime_lock, seq));
  1258. si_meminfo(&val);
  1259. si_swapinfo(&val);
  1260. /*
  1261. * If the sum of all the available memory (i.e. ram + swap)
  1262. * is less than can be stored in a 32 bit unsigned long then
  1263. * we can be binary compatible with 2.2.x kernels. If not,
  1264. * well, in that case 2.2.x was broken anyways...
  1265. *
  1266. * -Erik Andersen <andersee@debian.org>
  1267. */
  1268. mem_total = val.totalram + val.totalswap;
  1269. if (mem_total < val.totalram || mem_total < val.totalswap)
  1270. goto out;
  1271. bitcount = 0;
  1272. mem_unit = val.mem_unit;
  1273. while (mem_unit > 1) {
  1274. bitcount++;
  1275. mem_unit >>= 1;
  1276. sav_total = mem_total;
  1277. mem_total <<= 1;
  1278. if (mem_total < sav_total)
  1279. goto out;
  1280. }
  1281. /*
  1282. * If mem_total did not overflow, multiply all memory values by
  1283. * val.mem_unit and set it to 1. This leaves things compatible
  1284. * with 2.2.x, and also retains compatibility with earlier 2.4.x
  1285. * kernels...
  1286. */
  1287. val.mem_unit = 1;
  1288. val.totalram <<= bitcount;
  1289. val.freeram <<= bitcount;
  1290. val.sharedram <<= bitcount;
  1291. val.bufferram <<= bitcount;
  1292. val.totalswap <<= bitcount;
  1293. val.freeswap <<= bitcount;
  1294. val.totalhigh <<= bitcount;
  1295. val.freehigh <<= bitcount;
  1296. out:
  1297. if (copy_to_user(info, &val, sizeof(struct sysinfo)))
  1298. return -EFAULT;
  1299. return 0;
  1300. }
  1301. static int __devinit init_timers_cpu(int cpu)
  1302. {
  1303. int j;
  1304. tvec_base_t *base;
  1305. static char __devinitdata tvec_base_done[NR_CPUS];
  1306. if (!tvec_base_done[cpu]) {
  1307. static char boot_done;
  1308. if (boot_done) {
  1309. /*
  1310. * The APs use this path later in boot
  1311. */
  1312. base = kmalloc_node(sizeof(*base), GFP_KERNEL,
  1313. cpu_to_node(cpu));
  1314. if (!base)
  1315. return -ENOMEM;
  1316. memset(base, 0, sizeof(*base));
  1317. per_cpu(tvec_bases, cpu) = base;
  1318. } else {
  1319. /*
  1320. * This is for the boot CPU - we use compile-time
  1321. * static initialisation because per-cpu memory isn't
  1322. * ready yet and because the memory allocators are not
  1323. * initialised either.
  1324. */
  1325. boot_done = 1;
  1326. base = &boot_tvec_bases;
  1327. }
  1328. tvec_base_done[cpu] = 1;
  1329. } else {
  1330. base = per_cpu(tvec_bases, cpu);
  1331. }
  1332. spin_lock_init(&base->lock);
  1333. for (j = 0; j < TVN_SIZE; j++) {
  1334. INIT_LIST_HEAD(base->tv5.vec + j);
  1335. INIT_LIST_HEAD(base->tv4.vec + j);
  1336. INIT_LIST_HEAD(base->tv3.vec + j);
  1337. INIT_LIST_HEAD(base->tv2.vec + j);
  1338. }
  1339. for (j = 0; j < TVR_SIZE; j++)
  1340. INIT_LIST_HEAD(base->tv1.vec + j);
  1341. base->timer_jiffies = jiffies;
  1342. return 0;
  1343. }
  1344. #ifdef CONFIG_HOTPLUG_CPU
  1345. static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
  1346. {
  1347. struct timer_list *timer;
  1348. while (!list_empty(head)) {
  1349. timer = list_entry(head->next, struct timer_list, entry);
  1350. detach_timer(timer, 0);
  1351. timer->base = new_base;
  1352. internal_add_timer(new_base, timer);
  1353. }
  1354. }
  1355. static void __devinit migrate_timers(int cpu)
  1356. {
  1357. tvec_base_t *old_base;
  1358. tvec_base_t *new_base;
  1359. int i;
  1360. BUG_ON(cpu_online(cpu));
  1361. old_base = per_cpu(tvec_bases, cpu);
  1362. new_base = get_cpu_var(tvec_bases);
  1363. local_irq_disable();
  1364. spin_lock(&new_base->lock);
  1365. spin_lock(&old_base->lock);
  1366. BUG_ON(old_base->running_timer);
  1367. for (i = 0; i < TVR_SIZE; i++)
  1368. migrate_timer_list(new_base, old_base->tv1.vec + i);
  1369. for (i = 0; i < TVN_SIZE; i++) {
  1370. migrate_timer_list(new_base, old_base->tv2.vec + i);
  1371. migrate_timer_list(new_base, old_base->tv3.vec + i);
  1372. migrate_timer_list(new_base, old_base->tv4.vec + i);
  1373. migrate_timer_list(new_base, old_base->tv5.vec + i);
  1374. }
  1375. spin_unlock(&old_base->lock);
  1376. spin_unlock(&new_base->lock);
  1377. local_irq_enable();
  1378. put_cpu_var(tvec_bases);
  1379. }
  1380. #endif /* CONFIG_HOTPLUG_CPU */
  1381. static int timer_cpu_notify(struct notifier_block *self,
  1382. unsigned long action, void *hcpu)
  1383. {
  1384. long cpu = (long)hcpu;
  1385. switch(action) {
  1386. case CPU_UP_PREPARE:
  1387. if (init_timers_cpu(cpu) < 0)
  1388. return NOTIFY_BAD;
  1389. break;
  1390. #ifdef CONFIG_HOTPLUG_CPU
  1391. case CPU_DEAD:
  1392. migrate_timers(cpu);
  1393. break;
  1394. #endif
  1395. default:
  1396. break;
  1397. }
  1398. return NOTIFY_OK;
  1399. }
  1400. static struct notifier_block timers_nb = {
  1401. .notifier_call = timer_cpu_notify,
  1402. };
  1403. void __init init_timers(void)
  1404. {
  1405. timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
  1406. (void *)(long)smp_processor_id());
  1407. register_cpu_notifier(&timers_nb);
  1408. open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
  1409. }
  1410. #ifdef CONFIG_TIME_INTERPOLATION
  1411. struct time_interpolator *time_interpolator __read_mostly;
  1412. static struct time_interpolator *time_interpolator_list __read_mostly;
  1413. static DEFINE_SPINLOCK(time_interpolator_lock);
  1414. static inline u64 time_interpolator_get_cycles(unsigned int src)
  1415. {
  1416. unsigned long (*x)(void);
  1417. switch (src)
  1418. {
  1419. case TIME_SOURCE_FUNCTION:
  1420. x = time_interpolator->addr;
  1421. return x();
  1422. case TIME_SOURCE_MMIO64 :
  1423. return readq_relaxed((void __iomem *)time_interpolator->addr);
  1424. case TIME_SOURCE_MMIO32 :
  1425. return readl_relaxed((void __iomem *)time_interpolator->addr);
  1426. default: return get_cycles();
  1427. }
  1428. }
  1429. static inline u64 time_interpolator_get_counter(int writelock)
  1430. {
  1431. unsigned int src = time_interpolator->source;
  1432. if (time_interpolator->jitter)
  1433. {
  1434. u64 lcycle;
  1435. u64 now;
  1436. do {
  1437. lcycle = time_interpolator->last_cycle;
  1438. now = time_interpolator_get_cycles(src);
  1439. if (lcycle && time_after(lcycle, now))
  1440. return lcycle;
  1441. /* When holding the xtime write lock, there's no need
  1442. * to add the overhead of the cmpxchg. Readers are
  1443. * force to retry until the write lock is released.
  1444. */
  1445. if (writelock) {
  1446. time_interpolator->last_cycle = now;
  1447. return now;
  1448. }
  1449. /* Keep track of the last timer value returned. The use of cmpxchg here
  1450. * will cause contention in an SMP environment.
  1451. */
  1452. } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
  1453. return now;
  1454. }
  1455. else
  1456. return time_interpolator_get_cycles(src);
  1457. }
  1458. void time_interpolator_reset(void)
  1459. {
  1460. time_interpolator->offset = 0;
  1461. time_interpolator->last_counter = time_interpolator_get_counter(1);
  1462. }
  1463. #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
  1464. unsigned long time_interpolator_get_offset(void)
  1465. {
  1466. /* If we do not have a time interpolator set up then just return zero */
  1467. if (!time_interpolator)
  1468. return 0;
  1469. return time_interpolator->offset +
  1470. GET_TI_NSECS(time_interpolator_get_counter(0), time_interpolator);
  1471. }
  1472. #define INTERPOLATOR_ADJUST 65536
  1473. #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
  1474. static void time_interpolator_update(long delta_nsec)
  1475. {
  1476. u64 counter;
  1477. unsigned long offset;
  1478. /* If there is no time interpolator set up then do nothing */
  1479. if (!time_interpolator)
  1480. return;
  1481. /*
  1482. * The interpolator compensates for late ticks by accumulating the late
  1483. * time in time_interpolator->offset. A tick earlier than expected will
  1484. * lead to a reset of the offset and a corresponding jump of the clock
  1485. * forward. Again this only works if the interpolator clock is running
  1486. * slightly slower than the regular clock and the tuning logic insures
  1487. * that.
  1488. */
  1489. counter = time_interpolator_get_counter(1);
  1490. offset = time_interpolator->offset +
  1491. GET_TI_NSECS(counter, time_interpolator);
  1492. if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
  1493. time_interpolator->offset = offset - delta_nsec;
  1494. else {
  1495. time_interpolator->skips++;
  1496. time_interpolator->ns_skipped += delta_nsec - offset;
  1497. time_interpolator->offset = 0;
  1498. }
  1499. time_interpolator->last_counter = counter;
  1500. /* Tuning logic for time interpolator invoked every minute or so.
  1501. * Decrease interpolator clock speed if no skips occurred and an offset is carried.
  1502. * Increase interpolator clock speed if we skip too much time.
  1503. */
  1504. if (jiffies % INTERPOLATOR_ADJUST == 0)
  1505. {
  1506. if (time_interpolator->skips == 0 && time_interpolator->offset > tick_nsec)
  1507. time_interpolator->nsec_per_cyc--;
  1508. if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
  1509. time_interpolator->nsec_per_cyc++;
  1510. time_interpolator->skips = 0;
  1511. time_interpolator->ns_skipped = 0;
  1512. }
  1513. }
  1514. static inline int
  1515. is_better_time_interpolator(struct time_interpolator *new)
  1516. {
  1517. if (!time_interpolator)
  1518. return 1;
  1519. return new->frequency > 2*time_interpolator->frequency ||
  1520. (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
  1521. }
  1522. void
  1523. register_time_interpolator(struct time_interpolator *ti)
  1524. {
  1525. unsigned long flags;
  1526. /* Sanity check */
  1527. BUG_ON(ti->frequency == 0 || ti->mask == 0);
  1528. ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
  1529. spin_lock(&time_interpolator_lock);
  1530. write_seqlock_irqsave(&xtime_lock, flags);
  1531. if (is_better_time_interpolator(ti)) {
  1532. time_interpolator = ti;
  1533. time_interpolator_reset();
  1534. }
  1535. write_sequnlock_irqrestore(&xtime_lock, flags);
  1536. ti->next = time_interpolator_list;
  1537. time_interpolator_list = ti;
  1538. spin_unlock(&time_interpolator_lock);
  1539. }
  1540. void
  1541. unregister_time_interpolator(struct time_interpolator *ti)
  1542. {
  1543. struct time_interpolator *curr, **prev;
  1544. unsigned long flags;
  1545. spin_lock(&time_interpolator_lock);
  1546. prev = &time_interpolator_list;
  1547. for (curr = *prev; curr; curr = curr->next) {
  1548. if (curr == ti) {
  1549. *prev = curr->next;
  1550. break;
  1551. }
  1552. prev = &curr->next;
  1553. }
  1554. write_seqlock_irqsave(&xtime_lock, flags);
  1555. if (ti == time_interpolator) {
  1556. /* we lost the best time-interpolator: */
  1557. time_interpolator = NULL;
  1558. /* find the next-best interpolator */
  1559. for (curr = time_interpolator_list; curr; curr = curr->next)
  1560. if (is_better_time_interpolator(curr))
  1561. time_interpolator = curr;
  1562. time_interpolator_reset();
  1563. }
  1564. write_sequnlock_irqrestore(&xtime_lock, flags);
  1565. spin_unlock(&time_interpolator_lock);
  1566. }
  1567. #endif /* CONFIG_TIME_INTERPOLATION */
  1568. /**
  1569. * msleep - sleep safely even with waitqueue interruptions
  1570. * @msecs: Time in milliseconds to sleep for
  1571. */
  1572. void msleep(unsigned int msecs)
  1573. {
  1574. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1575. while (timeout)
  1576. timeout = schedule_timeout_uninterruptible(timeout);
  1577. }
  1578. EXPORT_SYMBOL(msleep);
  1579. /**
  1580. * msleep_interruptible - sleep waiting for signals
  1581. * @msecs: Time in milliseconds to sleep for
  1582. */
  1583. unsigned long msleep_interruptible(unsigned int msecs)
  1584. {
  1585. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1586. while (timeout && !signal_pending(current))
  1587. timeout = schedule_timeout_interruptible(timeout);
  1588. return jiffies_to_msecs(timeout);
  1589. }
  1590. EXPORT_SYMBOL(msleep_interruptible);