timer.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610
  1. /*
  2. * linux/kernel/timer.c
  3. *
  4. * Kernel internal timers, kernel timekeeping, basic process system calls
  5. *
  6. * Copyright (C) 1991, 1992 Linus Torvalds
  7. *
  8. * 1997-01-28 Modified by Finn Arne Gangstad to make timers scale better.
  9. *
  10. * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
  11. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  12. * 1998-12-24 Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
  13. * serialize accesses to xtime/lost_ticks).
  14. * Copyright (C) 1998 Andrea Arcangeli
  15. * 1999-03-10 Improved NTP compatibility by Ulrich Windl
  16. * 2002-05-31 Move sys_sysinfo here and make its locking sane, Robert Love
  17. * 2000-10-05 Implemented scalable SMP per-CPU timer handling.
  18. * Copyright (C) 2000, 2001, 2002 Ingo Molnar
  19. * Designed by David S. Miller, Alexey Kuznetsov and Ingo Molnar
  20. */
  21. #include <linux/kernel_stat.h>
  22. #include <linux/module.h>
  23. #include <linux/interrupt.h>
  24. #include <linux/percpu.h>
  25. #include <linux/init.h>
  26. #include <linux/mm.h>
  27. #include <linux/swap.h>
  28. #include <linux/notifier.h>
  29. #include <linux/thread_info.h>
  30. #include <linux/time.h>
  31. #include <linux/jiffies.h>
  32. #include <linux/posix-timers.h>
  33. #include <linux/cpu.h>
  34. #include <linux/syscalls.h>
  35. #include <asm/uaccess.h>
  36. #include <asm/unistd.h>
  37. #include <asm/div64.h>
  38. #include <asm/timex.h>
  39. #include <asm/io.h>
  40. #ifdef CONFIG_TIME_INTERPOLATION
  41. static void time_interpolator_update(long delta_nsec);
  42. #else
  43. #define time_interpolator_update(x)
  44. #endif
  45. /*
  46. * per-CPU timer vector definitions:
  47. */
  48. #define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
  49. #define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
  50. #define TVN_SIZE (1 << TVN_BITS)
  51. #define TVR_SIZE (1 << TVR_BITS)
  52. #define TVN_MASK (TVN_SIZE - 1)
  53. #define TVR_MASK (TVR_SIZE - 1)
  54. struct timer_base_s {
  55. spinlock_t lock;
  56. struct timer_list *running_timer;
  57. };
  58. typedef struct tvec_s {
  59. struct list_head vec[TVN_SIZE];
  60. } tvec_t;
  61. typedef struct tvec_root_s {
  62. struct list_head vec[TVR_SIZE];
  63. } tvec_root_t;
  64. struct tvec_t_base_s {
  65. struct timer_base_s t_base;
  66. unsigned long timer_jiffies;
  67. tvec_root_t tv1;
  68. tvec_t tv2;
  69. tvec_t tv3;
  70. tvec_t tv4;
  71. tvec_t tv5;
  72. } ____cacheline_aligned_in_smp;
  73. typedef struct tvec_t_base_s tvec_base_t;
  74. static DEFINE_PER_CPU(tvec_base_t, tvec_bases);
  75. static inline void set_running_timer(tvec_base_t *base,
  76. struct timer_list *timer)
  77. {
  78. #ifdef CONFIG_SMP
  79. base->t_base.running_timer = timer;
  80. #endif
  81. }
  82. static void check_timer_failed(struct timer_list *timer)
  83. {
  84. static int whine_count;
  85. if (whine_count < 16) {
  86. whine_count++;
  87. printk("Uninitialised timer!\n");
  88. printk("This is just a warning. Your computer is OK\n");
  89. printk("function=0x%p, data=0x%lx\n",
  90. timer->function, timer->data);
  91. dump_stack();
  92. }
  93. /*
  94. * Now fix it up
  95. */
  96. timer->magic = TIMER_MAGIC;
  97. }
  98. static inline void check_timer(struct timer_list *timer)
  99. {
  100. if (timer->magic != TIMER_MAGIC)
  101. check_timer_failed(timer);
  102. }
  103. static void internal_add_timer(tvec_base_t *base, struct timer_list *timer)
  104. {
  105. unsigned long expires = timer->expires;
  106. unsigned long idx = expires - base->timer_jiffies;
  107. struct list_head *vec;
  108. if (idx < TVR_SIZE) {
  109. int i = expires & TVR_MASK;
  110. vec = base->tv1.vec + i;
  111. } else if (idx < 1 << (TVR_BITS + TVN_BITS)) {
  112. int i = (expires >> TVR_BITS) & TVN_MASK;
  113. vec = base->tv2.vec + i;
  114. } else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS)) {
  115. int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
  116. vec = base->tv3.vec + i;
  117. } else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS)) {
  118. int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
  119. vec = base->tv4.vec + i;
  120. } else if ((signed long) idx < 0) {
  121. /*
  122. * Can happen if you add a timer with expires == jiffies,
  123. * or you set a timer to go off in the past
  124. */
  125. vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK);
  126. } else {
  127. int i;
  128. /* If the timeout is larger than 0xffffffff on 64-bit
  129. * architectures then we use the maximum timeout:
  130. */
  131. if (idx > 0xffffffffUL) {
  132. idx = 0xffffffffUL;
  133. expires = idx + base->timer_jiffies;
  134. }
  135. i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
  136. vec = base->tv5.vec + i;
  137. }
  138. /*
  139. * Timers are FIFO:
  140. */
  141. list_add_tail(&timer->entry, vec);
  142. }
  143. typedef struct timer_base_s timer_base_t;
  144. /*
  145. * Used by TIMER_INITIALIZER, we can't use per_cpu(tvec_bases)
  146. * at compile time, and we need timer->base to lock the timer.
  147. */
  148. timer_base_t __init_timer_base
  149. ____cacheline_aligned_in_smp = { .lock = SPIN_LOCK_UNLOCKED };
  150. EXPORT_SYMBOL(__init_timer_base);
  151. /***
  152. * init_timer - initialize a timer.
  153. * @timer: the timer to be initialized
  154. *
  155. * init_timer() must be done to a timer prior calling *any* of the
  156. * other timer functions.
  157. */
  158. void fastcall init_timer(struct timer_list *timer)
  159. {
  160. timer->entry.next = NULL;
  161. timer->base = &per_cpu(tvec_bases, raw_smp_processor_id()).t_base;
  162. timer->magic = TIMER_MAGIC;
  163. }
  164. EXPORT_SYMBOL(init_timer);
  165. static inline void detach_timer(struct timer_list *timer,
  166. int clear_pending)
  167. {
  168. struct list_head *entry = &timer->entry;
  169. __list_del(entry->prev, entry->next);
  170. if (clear_pending)
  171. entry->next = NULL;
  172. entry->prev = LIST_POISON2;
  173. }
  174. /*
  175. * We are using hashed locking: holding per_cpu(tvec_bases).t_base.lock
  176. * means that all timers which are tied to this base via timer->base are
  177. * locked, and the base itself is locked too.
  178. *
  179. * So __run_timers/migrate_timers can safely modify all timers which could
  180. * be found on ->tvX lists.
  181. *
  182. * When the timer's base is locked, and the timer removed from list, it is
  183. * possible to set timer->base = NULL and drop the lock: the timer remains
  184. * locked.
  185. */
  186. static timer_base_t *lock_timer_base(struct timer_list *timer,
  187. unsigned long *flags)
  188. {
  189. timer_base_t *base;
  190. for (;;) {
  191. base = timer->base;
  192. if (likely(base != NULL)) {
  193. spin_lock_irqsave(&base->lock, *flags);
  194. if (likely(base == timer->base))
  195. return base;
  196. /* The timer has migrated to another CPU */
  197. spin_unlock_irqrestore(&base->lock, *flags);
  198. }
  199. cpu_relax();
  200. }
  201. }
  202. int __mod_timer(struct timer_list *timer, unsigned long expires)
  203. {
  204. timer_base_t *base;
  205. tvec_base_t *new_base;
  206. unsigned long flags;
  207. int ret = 0;
  208. BUG_ON(!timer->function);
  209. check_timer(timer);
  210. base = lock_timer_base(timer, &flags);
  211. if (timer_pending(timer)) {
  212. detach_timer(timer, 0);
  213. ret = 1;
  214. }
  215. new_base = &__get_cpu_var(tvec_bases);
  216. if (base != &new_base->t_base) {
  217. /*
  218. * We are trying to schedule the timer on the local CPU.
  219. * However we can't change timer's base while it is running,
  220. * otherwise del_timer_sync() can't detect that the timer's
  221. * handler yet has not finished. This also guarantees that
  222. * the timer is serialized wrt itself.
  223. */
  224. if (unlikely(base->running_timer == timer)) {
  225. /* The timer remains on a former base */
  226. new_base = container_of(base, tvec_base_t, t_base);
  227. } else {
  228. /* See the comment in lock_timer_base() */
  229. timer->base = NULL;
  230. spin_unlock(&base->lock);
  231. spin_lock(&new_base->t_base.lock);
  232. timer->base = &new_base->t_base;
  233. }
  234. }
  235. timer->expires = expires;
  236. internal_add_timer(new_base, timer);
  237. spin_unlock_irqrestore(&new_base->t_base.lock, flags);
  238. return ret;
  239. }
  240. EXPORT_SYMBOL(__mod_timer);
  241. /***
  242. * add_timer_on - start a timer on a particular CPU
  243. * @timer: the timer to be added
  244. * @cpu: the CPU to start it on
  245. *
  246. * This is not very scalable on SMP. Double adds are not possible.
  247. */
  248. void add_timer_on(struct timer_list *timer, int cpu)
  249. {
  250. tvec_base_t *base = &per_cpu(tvec_bases, cpu);
  251. unsigned long flags;
  252. BUG_ON(timer_pending(timer) || !timer->function);
  253. check_timer(timer);
  254. spin_lock_irqsave(&base->t_base.lock, flags);
  255. timer->base = &base->t_base;
  256. internal_add_timer(base, timer);
  257. spin_unlock_irqrestore(&base->t_base.lock, flags);
  258. }
  259. /***
  260. * mod_timer - modify a timer's timeout
  261. * @timer: the timer to be modified
  262. *
  263. * mod_timer is a more efficient way to update the expire field of an
  264. * active timer (if the timer is inactive it will be activated)
  265. *
  266. * mod_timer(timer, expires) is equivalent to:
  267. *
  268. * del_timer(timer); timer->expires = expires; add_timer(timer);
  269. *
  270. * Note that if there are multiple unserialized concurrent users of the
  271. * same timer, then mod_timer() is the only safe way to modify the timeout,
  272. * since add_timer() cannot modify an already running timer.
  273. *
  274. * The function returns whether it has modified a pending timer or not.
  275. * (ie. mod_timer() of an inactive timer returns 0, mod_timer() of an
  276. * active timer returns 1.)
  277. */
  278. int mod_timer(struct timer_list *timer, unsigned long expires)
  279. {
  280. BUG_ON(!timer->function);
  281. check_timer(timer);
  282. /*
  283. * This is a common optimization triggered by the
  284. * networking code - if the timer is re-modified
  285. * to be the same thing then just return:
  286. */
  287. if (timer->expires == expires && timer_pending(timer))
  288. return 1;
  289. return __mod_timer(timer, expires);
  290. }
  291. EXPORT_SYMBOL(mod_timer);
  292. /***
  293. * del_timer - deactive a timer.
  294. * @timer: the timer to be deactivated
  295. *
  296. * del_timer() deactivates a timer - this works on both active and inactive
  297. * timers.
  298. *
  299. * The function returns whether it has deactivated a pending timer or not.
  300. * (ie. del_timer() of an inactive timer returns 0, del_timer() of an
  301. * active timer returns 1.)
  302. */
  303. int del_timer(struct timer_list *timer)
  304. {
  305. timer_base_t *base;
  306. unsigned long flags;
  307. int ret = 0;
  308. check_timer(timer);
  309. if (timer_pending(timer)) {
  310. base = lock_timer_base(timer, &flags);
  311. if (timer_pending(timer)) {
  312. detach_timer(timer, 1);
  313. ret = 1;
  314. }
  315. spin_unlock_irqrestore(&base->lock, flags);
  316. }
  317. return ret;
  318. }
  319. EXPORT_SYMBOL(del_timer);
  320. #ifdef CONFIG_SMP
  321. /*
  322. * This function tries to deactivate a timer. Upon successful (ret >= 0)
  323. * exit the timer is not queued and the handler is not running on any CPU.
  324. *
  325. * It must not be called from interrupt contexts.
  326. */
  327. int try_to_del_timer_sync(struct timer_list *timer)
  328. {
  329. timer_base_t *base;
  330. unsigned long flags;
  331. int ret = -1;
  332. base = lock_timer_base(timer, &flags);
  333. if (base->running_timer == timer)
  334. goto out;
  335. ret = 0;
  336. if (timer_pending(timer)) {
  337. detach_timer(timer, 1);
  338. ret = 1;
  339. }
  340. out:
  341. spin_unlock_irqrestore(&base->lock, flags);
  342. return ret;
  343. }
  344. /***
  345. * del_timer_sync - deactivate a timer and wait for the handler to finish.
  346. * @timer: the timer to be deactivated
  347. *
  348. * This function only differs from del_timer() on SMP: besides deactivating
  349. * the timer it also makes sure the handler has finished executing on other
  350. * CPUs.
  351. *
  352. * Synchronization rules: callers must prevent restarting of the timer,
  353. * otherwise this function is meaningless. It must not be called from
  354. * interrupt contexts. The caller must not hold locks which would prevent
  355. * completion of the timer's handler. The timer's handler must not call
  356. * add_timer_on(). Upon exit the timer is not queued and the handler is
  357. * not running on any CPU.
  358. *
  359. * The function returns whether it has deactivated a pending timer or not.
  360. */
  361. int del_timer_sync(struct timer_list *timer)
  362. {
  363. check_timer(timer);
  364. for (;;) {
  365. int ret = try_to_del_timer_sync(timer);
  366. if (ret >= 0)
  367. return ret;
  368. }
  369. }
  370. EXPORT_SYMBOL(del_timer_sync);
  371. #endif
  372. static int cascade(tvec_base_t *base, tvec_t *tv, int index)
  373. {
  374. /* cascade all the timers from tv up one level */
  375. struct list_head *head, *curr;
  376. head = tv->vec + index;
  377. curr = head->next;
  378. /*
  379. * We are removing _all_ timers from the list, so we don't have to
  380. * detach them individually, just clear the list afterwards.
  381. */
  382. while (curr != head) {
  383. struct timer_list *tmp;
  384. tmp = list_entry(curr, struct timer_list, entry);
  385. BUG_ON(tmp->base != &base->t_base);
  386. curr = curr->next;
  387. internal_add_timer(base, tmp);
  388. }
  389. INIT_LIST_HEAD(head);
  390. return index;
  391. }
  392. /***
  393. * __run_timers - run all expired timers (if any) on this CPU.
  394. * @base: the timer vector to be processed.
  395. *
  396. * This function cascades all vectors and executes all expired timer
  397. * vectors.
  398. */
  399. #define INDEX(N) (base->timer_jiffies >> (TVR_BITS + N * TVN_BITS)) & TVN_MASK
  400. static inline void __run_timers(tvec_base_t *base)
  401. {
  402. struct timer_list *timer;
  403. spin_lock_irq(&base->t_base.lock);
  404. while (time_after_eq(jiffies, base->timer_jiffies)) {
  405. struct list_head work_list = LIST_HEAD_INIT(work_list);
  406. struct list_head *head = &work_list;
  407. int index = base->timer_jiffies & TVR_MASK;
  408. /*
  409. * Cascade timers:
  410. */
  411. if (!index &&
  412. (!cascade(base, &base->tv2, INDEX(0))) &&
  413. (!cascade(base, &base->tv3, INDEX(1))) &&
  414. !cascade(base, &base->tv4, INDEX(2)))
  415. cascade(base, &base->tv5, INDEX(3));
  416. ++base->timer_jiffies;
  417. list_splice_init(base->tv1.vec + index, &work_list);
  418. while (!list_empty(head)) {
  419. void (*fn)(unsigned long);
  420. unsigned long data;
  421. timer = list_entry(head->next,struct timer_list,entry);
  422. fn = timer->function;
  423. data = timer->data;
  424. set_running_timer(base, timer);
  425. detach_timer(timer, 1);
  426. spin_unlock_irq(&base->t_base.lock);
  427. {
  428. u32 preempt_count = preempt_count();
  429. fn(data);
  430. if (preempt_count != preempt_count()) {
  431. printk("huh, entered %p with %08x, exited with %08x?\n", fn, preempt_count, preempt_count());
  432. BUG();
  433. }
  434. }
  435. spin_lock_irq(&base->t_base.lock);
  436. }
  437. }
  438. set_running_timer(base, NULL);
  439. spin_unlock_irq(&base->t_base.lock);
  440. }
  441. #ifdef CONFIG_NO_IDLE_HZ
  442. /*
  443. * Find out when the next timer event is due to happen. This
  444. * is used on S/390 to stop all activity when a cpus is idle.
  445. * This functions needs to be called disabled.
  446. */
  447. unsigned long next_timer_interrupt(void)
  448. {
  449. tvec_base_t *base;
  450. struct list_head *list;
  451. struct timer_list *nte;
  452. unsigned long expires;
  453. tvec_t *varray[4];
  454. int i, j;
  455. base = &__get_cpu_var(tvec_bases);
  456. spin_lock(&base->t_base.lock);
  457. expires = base->timer_jiffies + (LONG_MAX >> 1);
  458. list = 0;
  459. /* Look for timer events in tv1. */
  460. j = base->timer_jiffies & TVR_MASK;
  461. do {
  462. list_for_each_entry(nte, base->tv1.vec + j, entry) {
  463. expires = nte->expires;
  464. if (j < (base->timer_jiffies & TVR_MASK))
  465. list = base->tv2.vec + (INDEX(0));
  466. goto found;
  467. }
  468. j = (j + 1) & TVR_MASK;
  469. } while (j != (base->timer_jiffies & TVR_MASK));
  470. /* Check tv2-tv5. */
  471. varray[0] = &base->tv2;
  472. varray[1] = &base->tv3;
  473. varray[2] = &base->tv4;
  474. varray[3] = &base->tv5;
  475. for (i = 0; i < 4; i++) {
  476. j = INDEX(i);
  477. do {
  478. if (list_empty(varray[i]->vec + j)) {
  479. j = (j + 1) & TVN_MASK;
  480. continue;
  481. }
  482. list_for_each_entry(nte, varray[i]->vec + j, entry)
  483. if (time_before(nte->expires, expires))
  484. expires = nte->expires;
  485. if (j < (INDEX(i)) && i < 3)
  486. list = varray[i + 1]->vec + (INDEX(i + 1));
  487. goto found;
  488. } while (j != (INDEX(i)));
  489. }
  490. found:
  491. if (list) {
  492. /*
  493. * The search wrapped. We need to look at the next list
  494. * from next tv element that would cascade into tv element
  495. * where we found the timer element.
  496. */
  497. list_for_each_entry(nte, list, entry) {
  498. if (time_before(nte->expires, expires))
  499. expires = nte->expires;
  500. }
  501. }
  502. spin_unlock(&base->t_base.lock);
  503. return expires;
  504. }
  505. #endif
  506. /******************************************************************/
  507. /*
  508. * Timekeeping variables
  509. */
  510. unsigned long tick_usec = TICK_USEC; /* USER_HZ period (usec) */
  511. unsigned long tick_nsec = TICK_NSEC; /* ACTHZ period (nsec) */
  512. /*
  513. * The current time
  514. * wall_to_monotonic is what we need to add to xtime (or xtime corrected
  515. * for sub jiffie times) to get to monotonic time. Monotonic is pegged
  516. * at zero at system boot time, so wall_to_monotonic will be negative,
  517. * however, we will ALWAYS keep the tv_nsec part positive so we can use
  518. * the usual normalization.
  519. */
  520. struct timespec xtime __attribute__ ((aligned (16)));
  521. struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
  522. EXPORT_SYMBOL(xtime);
  523. /* Don't completely fail for HZ > 500. */
  524. int tickadj = 500/HZ ? : 1; /* microsecs */
  525. /*
  526. * phase-lock loop variables
  527. */
  528. /* TIME_ERROR prevents overwriting the CMOS clock */
  529. int time_state = TIME_OK; /* clock synchronization status */
  530. int time_status = STA_UNSYNC; /* clock status bits */
  531. long time_offset; /* time adjustment (us) */
  532. long time_constant = 2; /* pll time constant */
  533. long time_tolerance = MAXFREQ; /* frequency tolerance (ppm) */
  534. long time_precision = 1; /* clock precision (us) */
  535. long time_maxerror = NTP_PHASE_LIMIT; /* maximum error (us) */
  536. long time_esterror = NTP_PHASE_LIMIT; /* estimated error (us) */
  537. static long time_phase; /* phase offset (scaled us) */
  538. long time_freq = (((NSEC_PER_SEC + HZ/2) % HZ - HZ/2) << SHIFT_USEC) / NSEC_PER_USEC;
  539. /* frequency offset (scaled ppm)*/
  540. static long time_adj; /* tick adjust (scaled 1 / HZ) */
  541. long time_reftime; /* time at last adjustment (s) */
  542. long time_adjust;
  543. long time_next_adjust;
  544. /*
  545. * this routine handles the overflow of the microsecond field
  546. *
  547. * The tricky bits of code to handle the accurate clock support
  548. * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
  549. * They were originally developed for SUN and DEC kernels.
  550. * All the kudos should go to Dave for this stuff.
  551. *
  552. */
  553. static void second_overflow(void)
  554. {
  555. long ltemp;
  556. /* Bump the maxerror field */
  557. time_maxerror += time_tolerance >> SHIFT_USEC;
  558. if ( time_maxerror > NTP_PHASE_LIMIT ) {
  559. time_maxerror = NTP_PHASE_LIMIT;
  560. time_status |= STA_UNSYNC;
  561. }
  562. /*
  563. * Leap second processing. If in leap-insert state at
  564. * the end of the day, the system clock is set back one
  565. * second; if in leap-delete state, the system clock is
  566. * set ahead one second. The microtime() routine or
  567. * external clock driver will insure that reported time
  568. * is always monotonic. The ugly divides should be
  569. * replaced.
  570. */
  571. switch (time_state) {
  572. case TIME_OK:
  573. if (time_status & STA_INS)
  574. time_state = TIME_INS;
  575. else if (time_status & STA_DEL)
  576. time_state = TIME_DEL;
  577. break;
  578. case TIME_INS:
  579. if (xtime.tv_sec % 86400 == 0) {
  580. xtime.tv_sec--;
  581. wall_to_monotonic.tv_sec++;
  582. /* The timer interpolator will make time change gradually instead
  583. * of an immediate jump by one second.
  584. */
  585. time_interpolator_update(-NSEC_PER_SEC);
  586. time_state = TIME_OOP;
  587. clock_was_set();
  588. printk(KERN_NOTICE "Clock: inserting leap second 23:59:60 UTC\n");
  589. }
  590. break;
  591. case TIME_DEL:
  592. if ((xtime.tv_sec + 1) % 86400 == 0) {
  593. xtime.tv_sec++;
  594. wall_to_monotonic.tv_sec--;
  595. /* Use of time interpolator for a gradual change of time */
  596. time_interpolator_update(NSEC_PER_SEC);
  597. time_state = TIME_WAIT;
  598. clock_was_set();
  599. printk(KERN_NOTICE "Clock: deleting leap second 23:59:59 UTC\n");
  600. }
  601. break;
  602. case TIME_OOP:
  603. time_state = TIME_WAIT;
  604. break;
  605. case TIME_WAIT:
  606. if (!(time_status & (STA_INS | STA_DEL)))
  607. time_state = TIME_OK;
  608. }
  609. /*
  610. * Compute the phase adjustment for the next second. In
  611. * PLL mode, the offset is reduced by a fixed factor
  612. * times the time constant. In FLL mode the offset is
  613. * used directly. In either mode, the maximum phase
  614. * adjustment for each second is clamped so as to spread
  615. * the adjustment over not more than the number of
  616. * seconds between updates.
  617. */
  618. if (time_offset < 0) {
  619. ltemp = -time_offset;
  620. if (!(time_status & STA_FLL))
  621. ltemp >>= SHIFT_KG + time_constant;
  622. if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
  623. ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
  624. time_offset += ltemp;
  625. time_adj = -ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
  626. } else {
  627. ltemp = time_offset;
  628. if (!(time_status & STA_FLL))
  629. ltemp >>= SHIFT_KG + time_constant;
  630. if (ltemp > (MAXPHASE / MINSEC) << SHIFT_UPDATE)
  631. ltemp = (MAXPHASE / MINSEC) << SHIFT_UPDATE;
  632. time_offset -= ltemp;
  633. time_adj = ltemp << (SHIFT_SCALE - SHIFT_HZ - SHIFT_UPDATE);
  634. }
  635. /*
  636. * Compute the frequency estimate and additional phase
  637. * adjustment due to frequency error for the next
  638. * second. When the PPS signal is engaged, gnaw on the
  639. * watchdog counter and update the frequency computed by
  640. * the pll and the PPS signal.
  641. */
  642. pps_valid++;
  643. if (pps_valid == PPS_VALID) { /* PPS signal lost */
  644. pps_jitter = MAXTIME;
  645. pps_stabil = MAXFREQ;
  646. time_status &= ~(STA_PPSSIGNAL | STA_PPSJITTER |
  647. STA_PPSWANDER | STA_PPSERROR);
  648. }
  649. ltemp = time_freq + pps_freq;
  650. if (ltemp < 0)
  651. time_adj -= -ltemp >>
  652. (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
  653. else
  654. time_adj += ltemp >>
  655. (SHIFT_USEC + SHIFT_HZ - SHIFT_SCALE);
  656. #if HZ == 100
  657. /* Compensate for (HZ==100) != (1 << SHIFT_HZ).
  658. * Add 25% and 3.125% to get 128.125; => only 0.125% error (p. 14)
  659. */
  660. if (time_adj < 0)
  661. time_adj -= (-time_adj >> 2) + (-time_adj >> 5);
  662. else
  663. time_adj += (time_adj >> 2) + (time_adj >> 5);
  664. #endif
  665. #if HZ == 1000
  666. /* Compensate for (HZ==1000) != (1 << SHIFT_HZ).
  667. * Add 1.5625% and 0.78125% to get 1023.4375; => only 0.05% error (p. 14)
  668. */
  669. if (time_adj < 0)
  670. time_adj -= (-time_adj >> 6) + (-time_adj >> 7);
  671. else
  672. time_adj += (time_adj >> 6) + (time_adj >> 7);
  673. #endif
  674. }
  675. /* in the NTP reference this is called "hardclock()" */
  676. static void update_wall_time_one_tick(void)
  677. {
  678. long time_adjust_step, delta_nsec;
  679. if ( (time_adjust_step = time_adjust) != 0 ) {
  680. /* We are doing an adjtime thing.
  681. *
  682. * Prepare time_adjust_step to be within bounds.
  683. * Note that a positive time_adjust means we want the clock
  684. * to run faster.
  685. *
  686. * Limit the amount of the step to be in the range
  687. * -tickadj .. +tickadj
  688. */
  689. if (time_adjust > tickadj)
  690. time_adjust_step = tickadj;
  691. else if (time_adjust < -tickadj)
  692. time_adjust_step = -tickadj;
  693. /* Reduce by this step the amount of time left */
  694. time_adjust -= time_adjust_step;
  695. }
  696. delta_nsec = tick_nsec + time_adjust_step * 1000;
  697. /*
  698. * Advance the phase, once it gets to one microsecond, then
  699. * advance the tick more.
  700. */
  701. time_phase += time_adj;
  702. if (time_phase <= -FINENSEC) {
  703. long ltemp = -time_phase >> (SHIFT_SCALE - 10);
  704. time_phase += ltemp << (SHIFT_SCALE - 10);
  705. delta_nsec -= ltemp;
  706. }
  707. else if (time_phase >= FINENSEC) {
  708. long ltemp = time_phase >> (SHIFT_SCALE - 10);
  709. time_phase -= ltemp << (SHIFT_SCALE - 10);
  710. delta_nsec += ltemp;
  711. }
  712. xtime.tv_nsec += delta_nsec;
  713. time_interpolator_update(delta_nsec);
  714. /* Changes by adjtime() do not take effect till next tick. */
  715. if (time_next_adjust != 0) {
  716. time_adjust = time_next_adjust;
  717. time_next_adjust = 0;
  718. }
  719. }
  720. /*
  721. * Using a loop looks inefficient, but "ticks" is
  722. * usually just one (we shouldn't be losing ticks,
  723. * we're doing this this way mainly for interrupt
  724. * latency reasons, not because we think we'll
  725. * have lots of lost timer ticks
  726. */
  727. static void update_wall_time(unsigned long ticks)
  728. {
  729. do {
  730. ticks--;
  731. update_wall_time_one_tick();
  732. if (xtime.tv_nsec >= 1000000000) {
  733. xtime.tv_nsec -= 1000000000;
  734. xtime.tv_sec++;
  735. second_overflow();
  736. }
  737. } while (ticks);
  738. }
  739. /*
  740. * Called from the timer interrupt handler to charge one tick to the current
  741. * process. user_tick is 1 if the tick is user time, 0 for system.
  742. */
  743. void update_process_times(int user_tick)
  744. {
  745. struct task_struct *p = current;
  746. int cpu = smp_processor_id();
  747. /* Note: this timer irq context must be accounted for as well. */
  748. if (user_tick)
  749. account_user_time(p, jiffies_to_cputime(1));
  750. else
  751. account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
  752. run_local_timers();
  753. if (rcu_pending(cpu))
  754. rcu_check_callbacks(cpu, user_tick);
  755. scheduler_tick();
  756. run_posix_cpu_timers(p);
  757. }
  758. /*
  759. * Nr of active tasks - counted in fixed-point numbers
  760. */
  761. static unsigned long count_active_tasks(void)
  762. {
  763. return (nr_running() + nr_uninterruptible()) * FIXED_1;
  764. }
  765. /*
  766. * Hmm.. Changed this, as the GNU make sources (load.c) seems to
  767. * imply that avenrun[] is the standard name for this kind of thing.
  768. * Nothing else seems to be standardized: the fractional size etc
  769. * all seem to differ on different machines.
  770. *
  771. * Requires xtime_lock to access.
  772. */
  773. unsigned long avenrun[3];
  774. EXPORT_SYMBOL(avenrun);
  775. /*
  776. * calc_load - given tick count, update the avenrun load estimates.
  777. * This is called while holding a write_lock on xtime_lock.
  778. */
  779. static inline void calc_load(unsigned long ticks)
  780. {
  781. unsigned long active_tasks; /* fixed-point */
  782. static int count = LOAD_FREQ;
  783. count -= ticks;
  784. if (count < 0) {
  785. count += LOAD_FREQ;
  786. active_tasks = count_active_tasks();
  787. CALC_LOAD(avenrun[0], EXP_1, active_tasks);
  788. CALC_LOAD(avenrun[1], EXP_5, active_tasks);
  789. CALC_LOAD(avenrun[2], EXP_15, active_tasks);
  790. }
  791. }
  792. /* jiffies at the most recent update of wall time */
  793. unsigned long wall_jiffies = INITIAL_JIFFIES;
  794. /*
  795. * This read-write spinlock protects us from races in SMP while
  796. * playing with xtime and avenrun.
  797. */
  798. #ifndef ARCH_HAVE_XTIME_LOCK
  799. seqlock_t xtime_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED;
  800. EXPORT_SYMBOL(xtime_lock);
  801. #endif
  802. /*
  803. * This function runs timers and the timer-tq in bottom half context.
  804. */
  805. static void run_timer_softirq(struct softirq_action *h)
  806. {
  807. tvec_base_t *base = &__get_cpu_var(tvec_bases);
  808. if (time_after_eq(jiffies, base->timer_jiffies))
  809. __run_timers(base);
  810. }
  811. /*
  812. * Called by the local, per-CPU timer interrupt on SMP.
  813. */
  814. void run_local_timers(void)
  815. {
  816. raise_softirq(TIMER_SOFTIRQ);
  817. }
  818. /*
  819. * Called by the timer interrupt. xtime_lock must already be taken
  820. * by the timer IRQ!
  821. */
  822. static inline void update_times(void)
  823. {
  824. unsigned long ticks;
  825. ticks = jiffies - wall_jiffies;
  826. if (ticks) {
  827. wall_jiffies += ticks;
  828. update_wall_time(ticks);
  829. }
  830. calc_load(ticks);
  831. }
  832. /*
  833. * The 64-bit jiffies value is not atomic - you MUST NOT read it
  834. * without sampling the sequence number in xtime_lock.
  835. * jiffies is defined in the linker script...
  836. */
  837. void do_timer(struct pt_regs *regs)
  838. {
  839. jiffies_64++;
  840. update_times();
  841. }
  842. #ifdef __ARCH_WANT_SYS_ALARM
  843. /*
  844. * For backwards compatibility? This can be done in libc so Alpha
  845. * and all newer ports shouldn't need it.
  846. */
  847. asmlinkage unsigned long sys_alarm(unsigned int seconds)
  848. {
  849. struct itimerval it_new, it_old;
  850. unsigned int oldalarm;
  851. it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0;
  852. it_new.it_value.tv_sec = seconds;
  853. it_new.it_value.tv_usec = 0;
  854. do_setitimer(ITIMER_REAL, &it_new, &it_old);
  855. oldalarm = it_old.it_value.tv_sec;
  856. /* ehhh.. We can't return 0 if we have an alarm pending.. */
  857. /* And we'd better return too much than too little anyway */
  858. if ((!oldalarm && it_old.it_value.tv_usec) || it_old.it_value.tv_usec >= 500000)
  859. oldalarm++;
  860. return oldalarm;
  861. }
  862. #endif
  863. #ifndef __alpha__
  864. /*
  865. * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
  866. * should be moved into arch/i386 instead?
  867. */
  868. /**
  869. * sys_getpid - return the thread group id of the current process
  870. *
  871. * Note, despite the name, this returns the tgid not the pid. The tgid and
  872. * the pid are identical unless CLONE_THREAD was specified on clone() in
  873. * which case the tgid is the same in all threads of the same group.
  874. *
  875. * This is SMP safe as current->tgid does not change.
  876. */
  877. asmlinkage long sys_getpid(void)
  878. {
  879. return current->tgid;
  880. }
  881. /*
  882. * Accessing ->group_leader->real_parent is not SMP-safe, it could
  883. * change from under us. However, rather than getting any lock
  884. * we can use an optimistic algorithm: get the parent
  885. * pid, and go back and check that the parent is still
  886. * the same. If it has changed (which is extremely unlikely
  887. * indeed), we just try again..
  888. *
  889. * NOTE! This depends on the fact that even if we _do_
  890. * get an old value of "parent", we can happily dereference
  891. * the pointer (it was and remains a dereferencable kernel pointer
  892. * no matter what): we just can't necessarily trust the result
  893. * until we know that the parent pointer is valid.
  894. *
  895. * NOTE2: ->group_leader never changes from under us.
  896. */
  897. asmlinkage long sys_getppid(void)
  898. {
  899. int pid;
  900. struct task_struct *me = current;
  901. struct task_struct *parent;
  902. parent = me->group_leader->real_parent;
  903. for (;;) {
  904. pid = parent->tgid;
  905. #ifdef CONFIG_SMP
  906. {
  907. struct task_struct *old = parent;
  908. /*
  909. * Make sure we read the pid before re-reading the
  910. * parent pointer:
  911. */
  912. smp_rmb();
  913. parent = me->group_leader->real_parent;
  914. if (old != parent)
  915. continue;
  916. }
  917. #endif
  918. break;
  919. }
  920. return pid;
  921. }
  922. asmlinkage long sys_getuid(void)
  923. {
  924. /* Only we change this so SMP safe */
  925. return current->uid;
  926. }
  927. asmlinkage long sys_geteuid(void)
  928. {
  929. /* Only we change this so SMP safe */
  930. return current->euid;
  931. }
  932. asmlinkage long sys_getgid(void)
  933. {
  934. /* Only we change this so SMP safe */
  935. return current->gid;
  936. }
  937. asmlinkage long sys_getegid(void)
  938. {
  939. /* Only we change this so SMP safe */
  940. return current->egid;
  941. }
  942. #endif
  943. static void process_timeout(unsigned long __data)
  944. {
  945. wake_up_process((task_t *)__data);
  946. }
  947. /**
  948. * schedule_timeout - sleep until timeout
  949. * @timeout: timeout value in jiffies
  950. *
  951. * Make the current task sleep until @timeout jiffies have
  952. * elapsed. The routine will return immediately unless
  953. * the current task state has been set (see set_current_state()).
  954. *
  955. * You can set the task state as follows -
  956. *
  957. * %TASK_UNINTERRUPTIBLE - at least @timeout jiffies are guaranteed to
  958. * pass before the routine returns. The routine will return 0
  959. *
  960. * %TASK_INTERRUPTIBLE - the routine may return early if a signal is
  961. * delivered to the current task. In this case the remaining time
  962. * in jiffies will be returned, or 0 if the timer expired in time
  963. *
  964. * The current task state is guaranteed to be TASK_RUNNING when this
  965. * routine returns.
  966. *
  967. * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
  968. * the CPU away without a bound on the timeout. In this case the return
  969. * value will be %MAX_SCHEDULE_TIMEOUT.
  970. *
  971. * In all cases the return value is guaranteed to be non-negative.
  972. */
  973. fastcall signed long __sched schedule_timeout(signed long timeout)
  974. {
  975. struct timer_list timer;
  976. unsigned long expire;
  977. switch (timeout)
  978. {
  979. case MAX_SCHEDULE_TIMEOUT:
  980. /*
  981. * These two special cases are useful to be comfortable
  982. * in the caller. Nothing more. We could take
  983. * MAX_SCHEDULE_TIMEOUT from one of the negative value
  984. * but I' d like to return a valid offset (>=0) to allow
  985. * the caller to do everything it want with the retval.
  986. */
  987. schedule();
  988. goto out;
  989. default:
  990. /*
  991. * Another bit of PARANOID. Note that the retval will be
  992. * 0 since no piece of kernel is supposed to do a check
  993. * for a negative retval of schedule_timeout() (since it
  994. * should never happens anyway). You just have the printk()
  995. * that will tell you if something is gone wrong and where.
  996. */
  997. if (timeout < 0)
  998. {
  999. printk(KERN_ERR "schedule_timeout: wrong timeout "
  1000. "value %lx from %p\n", timeout,
  1001. __builtin_return_address(0));
  1002. current->state = TASK_RUNNING;
  1003. goto out;
  1004. }
  1005. }
  1006. expire = timeout + jiffies;
  1007. init_timer(&timer);
  1008. timer.expires = expire;
  1009. timer.data = (unsigned long) current;
  1010. timer.function = process_timeout;
  1011. add_timer(&timer);
  1012. schedule();
  1013. del_singleshot_timer_sync(&timer);
  1014. timeout = expire - jiffies;
  1015. out:
  1016. return timeout < 0 ? 0 : timeout;
  1017. }
  1018. EXPORT_SYMBOL(schedule_timeout);
  1019. /* Thread ID - the internal kernel "pid" */
  1020. asmlinkage long sys_gettid(void)
  1021. {
  1022. return current->pid;
  1023. }
  1024. static long __sched nanosleep_restart(struct restart_block *restart)
  1025. {
  1026. unsigned long expire = restart->arg0, now = jiffies;
  1027. struct timespec __user *rmtp = (struct timespec __user *) restart->arg1;
  1028. long ret;
  1029. /* Did it expire while we handled signals? */
  1030. if (!time_after(expire, now))
  1031. return 0;
  1032. current->state = TASK_INTERRUPTIBLE;
  1033. expire = schedule_timeout(expire - now);
  1034. ret = 0;
  1035. if (expire) {
  1036. struct timespec t;
  1037. jiffies_to_timespec(expire, &t);
  1038. ret = -ERESTART_RESTARTBLOCK;
  1039. if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
  1040. ret = -EFAULT;
  1041. /* The 'restart' block is already filled in */
  1042. }
  1043. return ret;
  1044. }
  1045. asmlinkage long sys_nanosleep(struct timespec __user *rqtp, struct timespec __user *rmtp)
  1046. {
  1047. struct timespec t;
  1048. unsigned long expire;
  1049. long ret;
  1050. if (copy_from_user(&t, rqtp, sizeof(t)))
  1051. return -EFAULT;
  1052. if ((t.tv_nsec >= 1000000000L) || (t.tv_nsec < 0) || (t.tv_sec < 0))
  1053. return -EINVAL;
  1054. expire = timespec_to_jiffies(&t) + (t.tv_sec || t.tv_nsec);
  1055. current->state = TASK_INTERRUPTIBLE;
  1056. expire = schedule_timeout(expire);
  1057. ret = 0;
  1058. if (expire) {
  1059. struct restart_block *restart;
  1060. jiffies_to_timespec(expire, &t);
  1061. if (rmtp && copy_to_user(rmtp, &t, sizeof(t)))
  1062. return -EFAULT;
  1063. restart = &current_thread_info()->restart_block;
  1064. restart->fn = nanosleep_restart;
  1065. restart->arg0 = jiffies + expire;
  1066. restart->arg1 = (unsigned long) rmtp;
  1067. ret = -ERESTART_RESTARTBLOCK;
  1068. }
  1069. return ret;
  1070. }
  1071. /*
  1072. * sys_sysinfo - fill in sysinfo struct
  1073. */
  1074. asmlinkage long sys_sysinfo(struct sysinfo __user *info)
  1075. {
  1076. struct sysinfo val;
  1077. unsigned long mem_total, sav_total;
  1078. unsigned int mem_unit, bitcount;
  1079. unsigned long seq;
  1080. memset((char *)&val, 0, sizeof(struct sysinfo));
  1081. do {
  1082. struct timespec tp;
  1083. seq = read_seqbegin(&xtime_lock);
  1084. /*
  1085. * This is annoying. The below is the same thing
  1086. * posix_get_clock_monotonic() does, but it wants to
  1087. * take the lock which we want to cover the loads stuff
  1088. * too.
  1089. */
  1090. getnstimeofday(&tp);
  1091. tp.tv_sec += wall_to_monotonic.tv_sec;
  1092. tp.tv_nsec += wall_to_monotonic.tv_nsec;
  1093. if (tp.tv_nsec - NSEC_PER_SEC >= 0) {
  1094. tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
  1095. tp.tv_sec++;
  1096. }
  1097. val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
  1098. val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
  1099. val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
  1100. val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
  1101. val.procs = nr_threads;
  1102. } while (read_seqretry(&xtime_lock, seq));
  1103. si_meminfo(&val);
  1104. si_swapinfo(&val);
  1105. /*
  1106. * If the sum of all the available memory (i.e. ram + swap)
  1107. * is less than can be stored in a 32 bit unsigned long then
  1108. * we can be binary compatible with 2.2.x kernels. If not,
  1109. * well, in that case 2.2.x was broken anyways...
  1110. *
  1111. * -Erik Andersen <andersee@debian.org>
  1112. */
  1113. mem_total = val.totalram + val.totalswap;
  1114. if (mem_total < val.totalram || mem_total < val.totalswap)
  1115. goto out;
  1116. bitcount = 0;
  1117. mem_unit = val.mem_unit;
  1118. while (mem_unit > 1) {
  1119. bitcount++;
  1120. mem_unit >>= 1;
  1121. sav_total = mem_total;
  1122. mem_total <<= 1;
  1123. if (mem_total < sav_total)
  1124. goto out;
  1125. }
  1126. /*
  1127. * If mem_total did not overflow, multiply all memory values by
  1128. * val.mem_unit and set it to 1. This leaves things compatible
  1129. * with 2.2.x, and also retains compatibility with earlier 2.4.x
  1130. * kernels...
  1131. */
  1132. val.mem_unit = 1;
  1133. val.totalram <<= bitcount;
  1134. val.freeram <<= bitcount;
  1135. val.sharedram <<= bitcount;
  1136. val.bufferram <<= bitcount;
  1137. val.totalswap <<= bitcount;
  1138. val.freeswap <<= bitcount;
  1139. val.totalhigh <<= bitcount;
  1140. val.freehigh <<= bitcount;
  1141. out:
  1142. if (copy_to_user(info, &val, sizeof(struct sysinfo)))
  1143. return -EFAULT;
  1144. return 0;
  1145. }
  1146. static void __devinit init_timers_cpu(int cpu)
  1147. {
  1148. int j;
  1149. tvec_base_t *base;
  1150. base = &per_cpu(tvec_bases, cpu);
  1151. spin_lock_init(&base->t_base.lock);
  1152. for (j = 0; j < TVN_SIZE; j++) {
  1153. INIT_LIST_HEAD(base->tv5.vec + j);
  1154. INIT_LIST_HEAD(base->tv4.vec + j);
  1155. INIT_LIST_HEAD(base->tv3.vec + j);
  1156. INIT_LIST_HEAD(base->tv2.vec + j);
  1157. }
  1158. for (j = 0; j < TVR_SIZE; j++)
  1159. INIT_LIST_HEAD(base->tv1.vec + j);
  1160. base->timer_jiffies = jiffies;
  1161. }
  1162. #ifdef CONFIG_HOTPLUG_CPU
  1163. static void migrate_timer_list(tvec_base_t *new_base, struct list_head *head)
  1164. {
  1165. struct timer_list *timer;
  1166. while (!list_empty(head)) {
  1167. timer = list_entry(head->next, struct timer_list, entry);
  1168. detach_timer(timer, 0);
  1169. timer->base = &new_base->t_base;
  1170. internal_add_timer(new_base, timer);
  1171. }
  1172. }
  1173. static void __devinit migrate_timers(int cpu)
  1174. {
  1175. tvec_base_t *old_base;
  1176. tvec_base_t *new_base;
  1177. int i;
  1178. BUG_ON(cpu_online(cpu));
  1179. old_base = &per_cpu(tvec_bases, cpu);
  1180. new_base = &get_cpu_var(tvec_bases);
  1181. local_irq_disable();
  1182. spin_lock(&new_base->t_base.lock);
  1183. spin_lock(&old_base->t_base.lock);
  1184. if (old_base->t_base.running_timer)
  1185. BUG();
  1186. for (i = 0; i < TVR_SIZE; i++)
  1187. migrate_timer_list(new_base, old_base->tv1.vec + i);
  1188. for (i = 0; i < TVN_SIZE; i++) {
  1189. migrate_timer_list(new_base, old_base->tv2.vec + i);
  1190. migrate_timer_list(new_base, old_base->tv3.vec + i);
  1191. migrate_timer_list(new_base, old_base->tv4.vec + i);
  1192. migrate_timer_list(new_base, old_base->tv5.vec + i);
  1193. }
  1194. spin_unlock(&old_base->t_base.lock);
  1195. spin_unlock(&new_base->t_base.lock);
  1196. local_irq_enable();
  1197. put_cpu_var(tvec_bases);
  1198. }
  1199. #endif /* CONFIG_HOTPLUG_CPU */
  1200. static int __devinit timer_cpu_notify(struct notifier_block *self,
  1201. unsigned long action, void *hcpu)
  1202. {
  1203. long cpu = (long)hcpu;
  1204. switch(action) {
  1205. case CPU_UP_PREPARE:
  1206. init_timers_cpu(cpu);
  1207. break;
  1208. #ifdef CONFIG_HOTPLUG_CPU
  1209. case CPU_DEAD:
  1210. migrate_timers(cpu);
  1211. break;
  1212. #endif
  1213. default:
  1214. break;
  1215. }
  1216. return NOTIFY_OK;
  1217. }
  1218. static struct notifier_block __devinitdata timers_nb = {
  1219. .notifier_call = timer_cpu_notify,
  1220. };
  1221. void __init init_timers(void)
  1222. {
  1223. timer_cpu_notify(&timers_nb, (unsigned long)CPU_UP_PREPARE,
  1224. (void *)(long)smp_processor_id());
  1225. register_cpu_notifier(&timers_nb);
  1226. open_softirq(TIMER_SOFTIRQ, run_timer_softirq, NULL);
  1227. }
  1228. #ifdef CONFIG_TIME_INTERPOLATION
  1229. struct time_interpolator *time_interpolator;
  1230. static struct time_interpolator *time_interpolator_list;
  1231. static DEFINE_SPINLOCK(time_interpolator_lock);
  1232. static inline u64 time_interpolator_get_cycles(unsigned int src)
  1233. {
  1234. unsigned long (*x)(void);
  1235. switch (src)
  1236. {
  1237. case TIME_SOURCE_FUNCTION:
  1238. x = time_interpolator->addr;
  1239. return x();
  1240. case TIME_SOURCE_MMIO64 :
  1241. return readq((void __iomem *) time_interpolator->addr);
  1242. case TIME_SOURCE_MMIO32 :
  1243. return readl((void __iomem *) time_interpolator->addr);
  1244. default: return get_cycles();
  1245. }
  1246. }
  1247. static inline u64 time_interpolator_get_counter(void)
  1248. {
  1249. unsigned int src = time_interpolator->source;
  1250. if (time_interpolator->jitter)
  1251. {
  1252. u64 lcycle;
  1253. u64 now;
  1254. do {
  1255. lcycle = time_interpolator->last_cycle;
  1256. now = time_interpolator_get_cycles(src);
  1257. if (lcycle && time_after(lcycle, now))
  1258. return lcycle;
  1259. /* Keep track of the last timer value returned. The use of cmpxchg here
  1260. * will cause contention in an SMP environment.
  1261. */
  1262. } while (unlikely(cmpxchg(&time_interpolator->last_cycle, lcycle, now) != lcycle));
  1263. return now;
  1264. }
  1265. else
  1266. return time_interpolator_get_cycles(src);
  1267. }
  1268. void time_interpolator_reset(void)
  1269. {
  1270. time_interpolator->offset = 0;
  1271. time_interpolator->last_counter = time_interpolator_get_counter();
  1272. }
  1273. #define GET_TI_NSECS(count,i) (((((count) - i->last_counter) & (i)->mask) * (i)->nsec_per_cyc) >> (i)->shift)
  1274. unsigned long time_interpolator_get_offset(void)
  1275. {
  1276. /* If we do not have a time interpolator set up then just return zero */
  1277. if (!time_interpolator)
  1278. return 0;
  1279. return time_interpolator->offset +
  1280. GET_TI_NSECS(time_interpolator_get_counter(), time_interpolator);
  1281. }
  1282. #define INTERPOLATOR_ADJUST 65536
  1283. #define INTERPOLATOR_MAX_SKIP 10*INTERPOLATOR_ADJUST
  1284. static void time_interpolator_update(long delta_nsec)
  1285. {
  1286. u64 counter;
  1287. unsigned long offset;
  1288. /* If there is no time interpolator set up then do nothing */
  1289. if (!time_interpolator)
  1290. return;
  1291. /* The interpolator compensates for late ticks by accumulating
  1292. * the late time in time_interpolator->offset. A tick earlier than
  1293. * expected will lead to a reset of the offset and a corresponding
  1294. * jump of the clock forward. Again this only works if the
  1295. * interpolator clock is running slightly slower than the regular clock
  1296. * and the tuning logic insures that.
  1297. */
  1298. counter = time_interpolator_get_counter();
  1299. offset = time_interpolator->offset + GET_TI_NSECS(counter, time_interpolator);
  1300. if (delta_nsec < 0 || (unsigned long) delta_nsec < offset)
  1301. time_interpolator->offset = offset - delta_nsec;
  1302. else {
  1303. time_interpolator->skips++;
  1304. time_interpolator->ns_skipped += delta_nsec - offset;
  1305. time_interpolator->offset = 0;
  1306. }
  1307. time_interpolator->last_counter = counter;
  1308. /* Tuning logic for time interpolator invoked every minute or so.
  1309. * Decrease interpolator clock speed if no skips occurred and an offset is carried.
  1310. * Increase interpolator clock speed if we skip too much time.
  1311. */
  1312. if (jiffies % INTERPOLATOR_ADJUST == 0)
  1313. {
  1314. if (time_interpolator->skips == 0 && time_interpolator->offset > TICK_NSEC)
  1315. time_interpolator->nsec_per_cyc--;
  1316. if (time_interpolator->ns_skipped > INTERPOLATOR_MAX_SKIP && time_interpolator->offset == 0)
  1317. time_interpolator->nsec_per_cyc++;
  1318. time_interpolator->skips = 0;
  1319. time_interpolator->ns_skipped = 0;
  1320. }
  1321. }
  1322. static inline int
  1323. is_better_time_interpolator(struct time_interpolator *new)
  1324. {
  1325. if (!time_interpolator)
  1326. return 1;
  1327. return new->frequency > 2*time_interpolator->frequency ||
  1328. (unsigned long)new->drift < (unsigned long)time_interpolator->drift;
  1329. }
  1330. void
  1331. register_time_interpolator(struct time_interpolator *ti)
  1332. {
  1333. unsigned long flags;
  1334. /* Sanity check */
  1335. if (ti->frequency == 0 || ti->mask == 0)
  1336. BUG();
  1337. ti->nsec_per_cyc = ((u64)NSEC_PER_SEC << ti->shift) / ti->frequency;
  1338. spin_lock(&time_interpolator_lock);
  1339. write_seqlock_irqsave(&xtime_lock, flags);
  1340. if (is_better_time_interpolator(ti)) {
  1341. time_interpolator = ti;
  1342. time_interpolator_reset();
  1343. }
  1344. write_sequnlock_irqrestore(&xtime_lock, flags);
  1345. ti->next = time_interpolator_list;
  1346. time_interpolator_list = ti;
  1347. spin_unlock(&time_interpolator_lock);
  1348. }
  1349. void
  1350. unregister_time_interpolator(struct time_interpolator *ti)
  1351. {
  1352. struct time_interpolator *curr, **prev;
  1353. unsigned long flags;
  1354. spin_lock(&time_interpolator_lock);
  1355. prev = &time_interpolator_list;
  1356. for (curr = *prev; curr; curr = curr->next) {
  1357. if (curr == ti) {
  1358. *prev = curr->next;
  1359. break;
  1360. }
  1361. prev = &curr->next;
  1362. }
  1363. write_seqlock_irqsave(&xtime_lock, flags);
  1364. if (ti == time_interpolator) {
  1365. /* we lost the best time-interpolator: */
  1366. time_interpolator = NULL;
  1367. /* find the next-best interpolator */
  1368. for (curr = time_interpolator_list; curr; curr = curr->next)
  1369. if (is_better_time_interpolator(curr))
  1370. time_interpolator = curr;
  1371. time_interpolator_reset();
  1372. }
  1373. write_sequnlock_irqrestore(&xtime_lock, flags);
  1374. spin_unlock(&time_interpolator_lock);
  1375. }
  1376. #endif /* CONFIG_TIME_INTERPOLATION */
  1377. /**
  1378. * msleep - sleep safely even with waitqueue interruptions
  1379. * @msecs: Time in milliseconds to sleep for
  1380. */
  1381. void msleep(unsigned int msecs)
  1382. {
  1383. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1384. while (timeout) {
  1385. set_current_state(TASK_UNINTERRUPTIBLE);
  1386. timeout = schedule_timeout(timeout);
  1387. }
  1388. }
  1389. EXPORT_SYMBOL(msleep);
  1390. /**
  1391. * msleep_interruptible - sleep waiting for waitqueue interruptions
  1392. * @msecs: Time in milliseconds to sleep for
  1393. */
  1394. unsigned long msleep_interruptible(unsigned int msecs)
  1395. {
  1396. unsigned long timeout = msecs_to_jiffies(msecs) + 1;
  1397. while (timeout && !signal_pending(current)) {
  1398. set_current_state(TASK_INTERRUPTIBLE);
  1399. timeout = schedule_timeout(timeout);
  1400. }
  1401. return jiffies_to_msecs(timeout);
  1402. }
  1403. EXPORT_SYMBOL(msleep_interruptible);