timekeeping.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765
  1. /*
  2. * linux/kernel/time/timekeeping.c
  3. *
  4. * Kernel timekeeping code and accessor functions
  5. *
  6. * This code was moved from linux/kernel/timer.c.
  7. * Please see that file for copyright and history logs.
  8. *
  9. */
  10. #include <linux/module.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/percpu.h>
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/sysdev.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/time.h>
  19. #include <linux/tick.h>
  20. /*
  21. * This read-write spinlock protects us from races in SMP while
  22. * playing with xtime.
  23. */
  24. __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
  25. /*
  26. * The current time
  27. * wall_to_monotonic is what we need to add to xtime (or xtime corrected
  28. * for sub jiffie times) to get to monotonic time. Monotonic is pegged
  29. * at zero at system boot time, so wall_to_monotonic will be negative,
  30. * however, we will ALWAYS keep the tv_nsec part positive so we can use
  31. * the usual normalization.
  32. *
  33. * wall_to_monotonic is moved after resume from suspend for the monotonic
  34. * time not to jump. We need to add total_sleep_time to wall_to_monotonic
  35. * to get the real boot based time offset.
  36. *
  37. * - wall_to_monotonic is no longer the boot time, getboottime must be
  38. * used instead.
  39. */
  40. struct timespec xtime __attribute__ ((aligned (16)));
  41. struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
  42. static unsigned long total_sleep_time; /* seconds */
  43. /* flag for if timekeeping is suspended */
  44. int __read_mostly timekeeping_suspended;
  45. static struct timespec xtime_cache __attribute__ ((aligned (16)));
  46. void update_xtime_cache(u64 nsec)
  47. {
  48. xtime_cache = xtime;
  49. timespec_add_ns(&xtime_cache, nsec);
  50. }
  51. struct clocksource *clock;
  52. /* must hold xtime_lock */
  53. void timekeeping_leap_insert(int leapsecond)
  54. {
  55. xtime.tv_sec += leapsecond;
  56. wall_to_monotonic.tv_sec -= leapsecond;
  57. update_vsyscall(&xtime, clock);
  58. }
  59. #ifdef CONFIG_GENERIC_TIME
  60. /**
  61. * clocksource_forward_now - update clock to the current time
  62. *
  63. * Forward the current clock to update its state since the last call to
  64. * update_wall_time(). This is useful before significant clock changes,
  65. * as it avoids having to deal with this time offset explicitly.
  66. */
  67. static void clocksource_forward_now(void)
  68. {
  69. cycle_t cycle_now, cycle_delta;
  70. s64 nsec;
  71. cycle_now = clock->read(clock);
  72. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  73. clock->cycle_last = cycle_now;
  74. nsec = cyc2ns(clock, cycle_delta);
  75. /* If arch requires, add in gettimeoffset() */
  76. nsec += arch_gettimeoffset();
  77. timespec_add_ns(&xtime, nsec);
  78. nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
  79. clock->raw_time.tv_nsec += nsec;
  80. }
  81. /**
  82. * getnstimeofday - Returns the time of day in a timespec
  83. * @ts: pointer to the timespec to be set
  84. *
  85. * Returns the time of day in a timespec.
  86. */
  87. void getnstimeofday(struct timespec *ts)
  88. {
  89. cycle_t cycle_now, cycle_delta;
  90. unsigned long seq;
  91. s64 nsecs;
  92. WARN_ON(timekeeping_suspended);
  93. do {
  94. seq = read_seqbegin(&xtime_lock);
  95. *ts = xtime;
  96. /* read clocksource: */
  97. cycle_now = clock->read(clock);
  98. /* calculate the delta since the last update_wall_time: */
  99. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  100. /* convert to nanoseconds: */
  101. nsecs = cyc2ns(clock, cycle_delta);
  102. /* If arch requires, add in gettimeoffset() */
  103. nsecs += arch_gettimeoffset();
  104. } while (read_seqretry(&xtime_lock, seq));
  105. timespec_add_ns(ts, nsecs);
  106. }
  107. EXPORT_SYMBOL(getnstimeofday);
  108. ktime_t ktime_get(void)
  109. {
  110. cycle_t cycle_now, cycle_delta;
  111. unsigned int seq;
  112. s64 secs, nsecs;
  113. WARN_ON(timekeeping_suspended);
  114. do {
  115. seq = read_seqbegin(&xtime_lock);
  116. secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
  117. nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
  118. /* read clocksource: */
  119. cycle_now = clock->read(clock);
  120. /* calculate the delta since the last update_wall_time: */
  121. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  122. /* convert to nanoseconds: */
  123. nsecs += cyc2ns(clock, cycle_delta);
  124. } while (read_seqretry(&xtime_lock, seq));
  125. /*
  126. * Use ktime_set/ktime_add_ns to create a proper ktime on
  127. * 32-bit architectures without CONFIG_KTIME_SCALAR.
  128. */
  129. return ktime_add_ns(ktime_set(secs, 0), nsecs);
  130. }
  131. EXPORT_SYMBOL_GPL(ktime_get);
  132. /**
  133. * ktime_get_ts - get the monotonic clock in timespec format
  134. * @ts: pointer to timespec variable
  135. *
  136. * The function calculates the monotonic clock from the realtime
  137. * clock and the wall_to_monotonic offset and stores the result
  138. * in normalized timespec format in the variable pointed to by @ts.
  139. */
  140. void ktime_get_ts(struct timespec *ts)
  141. {
  142. cycle_t cycle_now, cycle_delta;
  143. struct timespec tomono;
  144. unsigned int seq;
  145. s64 nsecs;
  146. WARN_ON(timekeeping_suspended);
  147. do {
  148. seq = read_seqbegin(&xtime_lock);
  149. *ts = xtime;
  150. tomono = wall_to_monotonic;
  151. /* read clocksource: */
  152. cycle_now = clock->read(clock);
  153. /* calculate the delta since the last update_wall_time: */
  154. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  155. /* convert to nanoseconds: */
  156. nsecs = cyc2ns(clock, cycle_delta);
  157. } while (read_seqretry(&xtime_lock, seq));
  158. set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
  159. ts->tv_nsec + tomono.tv_nsec + nsecs);
  160. }
  161. EXPORT_SYMBOL_GPL(ktime_get_ts);
  162. /**
  163. * do_gettimeofday - Returns the time of day in a timeval
  164. * @tv: pointer to the timeval to be set
  165. *
  166. * NOTE: Users should be converted to using getnstimeofday()
  167. */
  168. void do_gettimeofday(struct timeval *tv)
  169. {
  170. struct timespec now;
  171. getnstimeofday(&now);
  172. tv->tv_sec = now.tv_sec;
  173. tv->tv_usec = now.tv_nsec/1000;
  174. }
  175. EXPORT_SYMBOL(do_gettimeofday);
  176. /**
  177. * do_settimeofday - Sets the time of day
  178. * @tv: pointer to the timespec variable containing the new time
  179. *
  180. * Sets the time of day to the new time and update NTP and notify hrtimers
  181. */
  182. int do_settimeofday(struct timespec *tv)
  183. {
  184. struct timespec ts_delta;
  185. unsigned long flags;
  186. if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  187. return -EINVAL;
  188. write_seqlock_irqsave(&xtime_lock, flags);
  189. clocksource_forward_now();
  190. ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
  191. ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
  192. wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
  193. xtime = *tv;
  194. update_xtime_cache(0);
  195. clock->error = 0;
  196. ntp_clear();
  197. update_vsyscall(&xtime, clock);
  198. write_sequnlock_irqrestore(&xtime_lock, flags);
  199. /* signal hrtimers about time change */
  200. clock_was_set();
  201. return 0;
  202. }
  203. EXPORT_SYMBOL(do_settimeofday);
  204. /**
  205. * change_clocksource - Swaps clocksources if a new one is available
  206. *
  207. * Accumulates current time interval and initializes new clocksource
  208. */
  209. static void change_clocksource(void)
  210. {
  211. struct clocksource *new, *old;
  212. new = clocksource_get_next();
  213. if (!new || clock == new)
  214. return;
  215. clocksource_forward_now();
  216. if (new->enable && !new->enable(new))
  217. return;
  218. /*
  219. * The frequency may have changed while the clocksource
  220. * was disabled. If so the code in ->enable() must update
  221. * the mult value to reflect the new frequency. Make sure
  222. * mult_orig follows this change.
  223. */
  224. new->mult_orig = new->mult;
  225. new->raw_time = clock->raw_time;
  226. old = clock;
  227. clock = new;
  228. /*
  229. * Save mult_orig in mult so that the value can be restored
  230. * regardless if ->enable() updates the value of mult or not.
  231. */
  232. old->mult = old->mult_orig;
  233. if (old->disable)
  234. old->disable(old);
  235. clock->cycle_last = clock->read(clock);
  236. clock->error = 0;
  237. clock->xtime_nsec = 0;
  238. clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
  239. tick_clock_notify();
  240. /*
  241. * We're holding xtime lock and waking up klogd would deadlock
  242. * us on enqueue. So no printing!
  243. printk(KERN_INFO "Time: %s clocksource has been installed.\n",
  244. clock->name);
  245. */
  246. }
  247. #else /* GENERIC_TIME */
  248. static inline void clocksource_forward_now(void) { }
  249. static inline void change_clocksource(void) { }
  250. /**
  251. * ktime_get - get the monotonic time in ktime_t format
  252. *
  253. * returns the time in ktime_t format
  254. */
  255. ktime_t ktime_get(void)
  256. {
  257. struct timespec now;
  258. ktime_get_ts(&now);
  259. return timespec_to_ktime(now);
  260. }
  261. EXPORT_SYMBOL_GPL(ktime_get);
  262. /**
  263. * ktime_get_ts - get the monotonic clock in timespec format
  264. * @ts: pointer to timespec variable
  265. *
  266. * The function calculates the monotonic clock from the realtime
  267. * clock and the wall_to_monotonic offset and stores the result
  268. * in normalized timespec format in the variable pointed to by @ts.
  269. */
  270. void ktime_get_ts(struct timespec *ts)
  271. {
  272. struct timespec tomono;
  273. unsigned long seq;
  274. do {
  275. seq = read_seqbegin(&xtime_lock);
  276. getnstimeofday(ts);
  277. tomono = wall_to_monotonic;
  278. } while (read_seqretry(&xtime_lock, seq));
  279. set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
  280. ts->tv_nsec + tomono.tv_nsec);
  281. }
  282. EXPORT_SYMBOL_GPL(ktime_get_ts);
  283. #endif /* !GENERIC_TIME */
  284. /**
  285. * ktime_get_real - get the real (wall-) time in ktime_t format
  286. *
  287. * returns the time in ktime_t format
  288. */
  289. ktime_t ktime_get_real(void)
  290. {
  291. struct timespec now;
  292. getnstimeofday(&now);
  293. return timespec_to_ktime(now);
  294. }
  295. EXPORT_SYMBOL_GPL(ktime_get_real);
  296. /**
  297. * getrawmonotonic - Returns the raw monotonic time in a timespec
  298. * @ts: pointer to the timespec to be set
  299. *
  300. * Returns the raw monotonic time (completely un-modified by ntp)
  301. */
  302. void getrawmonotonic(struct timespec *ts)
  303. {
  304. unsigned long seq;
  305. s64 nsecs;
  306. cycle_t cycle_now, cycle_delta;
  307. do {
  308. seq = read_seqbegin(&xtime_lock);
  309. /* read clocksource: */
  310. cycle_now = clock->read(clock);
  311. /* calculate the delta since the last update_wall_time: */
  312. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  313. /* convert to nanoseconds: */
  314. nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
  315. *ts = clock->raw_time;
  316. } while (read_seqretry(&xtime_lock, seq));
  317. timespec_add_ns(ts, nsecs);
  318. }
  319. EXPORT_SYMBOL(getrawmonotonic);
  320. /**
  321. * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
  322. */
  323. int timekeeping_valid_for_hres(void)
  324. {
  325. unsigned long seq;
  326. int ret;
  327. do {
  328. seq = read_seqbegin(&xtime_lock);
  329. ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
  330. } while (read_seqretry(&xtime_lock, seq));
  331. return ret;
  332. }
  333. /**
  334. * read_persistent_clock - Return time in seconds from the persistent clock.
  335. *
  336. * Weak dummy function for arches that do not yet support it.
  337. * Returns seconds from epoch using the battery backed persistent clock.
  338. * Returns zero if unsupported.
  339. *
  340. * XXX - Do be sure to remove it once all arches implement it.
  341. */
  342. unsigned long __attribute__((weak)) read_persistent_clock(void)
  343. {
  344. return 0;
  345. }
  346. /*
  347. * timekeeping_init - Initializes the clocksource and common timekeeping values
  348. */
  349. void __init timekeeping_init(void)
  350. {
  351. unsigned long flags;
  352. unsigned long sec = read_persistent_clock();
  353. write_seqlock_irqsave(&xtime_lock, flags);
  354. ntp_init();
  355. clock = clocksource_default_clock();
  356. if (clock->enable)
  357. clock->enable(clock);
  358. /* set mult_orig on enable */
  359. clock->mult_orig = clock->mult;
  360. clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
  361. clock->cycle_last = clock->read(clock);
  362. xtime.tv_sec = sec;
  363. xtime.tv_nsec = 0;
  364. set_normalized_timespec(&wall_to_monotonic,
  365. -xtime.tv_sec, -xtime.tv_nsec);
  366. update_xtime_cache(0);
  367. total_sleep_time = 0;
  368. write_sequnlock_irqrestore(&xtime_lock, flags);
  369. }
  370. /* time in seconds when suspend began */
  371. static unsigned long timekeeping_suspend_time;
  372. /**
  373. * timekeeping_resume - Resumes the generic timekeeping subsystem.
  374. * @dev: unused
  375. *
  376. * This is for the generic clocksource timekeeping.
  377. * xtime/wall_to_monotonic/jiffies/etc are
  378. * still managed by arch specific suspend/resume code.
  379. */
  380. static int timekeeping_resume(struct sys_device *dev)
  381. {
  382. unsigned long flags;
  383. unsigned long now = read_persistent_clock();
  384. clocksource_resume();
  385. write_seqlock_irqsave(&xtime_lock, flags);
  386. if (now && (now > timekeeping_suspend_time)) {
  387. unsigned long sleep_length = now - timekeeping_suspend_time;
  388. xtime.tv_sec += sleep_length;
  389. wall_to_monotonic.tv_sec -= sleep_length;
  390. total_sleep_time += sleep_length;
  391. }
  392. update_xtime_cache(0);
  393. /* re-base the last cycle value */
  394. clock->cycle_last = clock->read(clock);
  395. clock->error = 0;
  396. timekeeping_suspended = 0;
  397. write_sequnlock_irqrestore(&xtime_lock, flags);
  398. touch_softlockup_watchdog();
  399. clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
  400. /* Resume hrtimers */
  401. hres_timers_resume();
  402. return 0;
  403. }
  404. static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
  405. {
  406. unsigned long flags;
  407. timekeeping_suspend_time = read_persistent_clock();
  408. write_seqlock_irqsave(&xtime_lock, flags);
  409. clocksource_forward_now();
  410. timekeeping_suspended = 1;
  411. write_sequnlock_irqrestore(&xtime_lock, flags);
  412. clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
  413. return 0;
  414. }
  415. /* sysfs resume/suspend bits for timekeeping */
  416. static struct sysdev_class timekeeping_sysclass = {
  417. .name = "timekeeping",
  418. .resume = timekeeping_resume,
  419. .suspend = timekeeping_suspend,
  420. };
  421. static struct sys_device device_timer = {
  422. .id = 0,
  423. .cls = &timekeeping_sysclass,
  424. };
  425. static int __init timekeeping_init_device(void)
  426. {
  427. int error = sysdev_class_register(&timekeeping_sysclass);
  428. if (!error)
  429. error = sysdev_register(&device_timer);
  430. return error;
  431. }
  432. device_initcall(timekeeping_init_device);
  433. /*
  434. * If the error is already larger, we look ahead even further
  435. * to compensate for late or lost adjustments.
  436. */
  437. static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
  438. s64 *offset)
  439. {
  440. s64 tick_error, i;
  441. u32 look_ahead, adj;
  442. s32 error2, mult;
  443. /*
  444. * Use the current error value to determine how much to look ahead.
  445. * The larger the error the slower we adjust for it to avoid problems
  446. * with losing too many ticks, otherwise we would overadjust and
  447. * produce an even larger error. The smaller the adjustment the
  448. * faster we try to adjust for it, as lost ticks can do less harm
  449. * here. This is tuned so that an error of about 1 msec is adjusted
  450. * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
  451. */
  452. error2 = clock->error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
  453. error2 = abs(error2);
  454. for (look_ahead = 0; error2 > 0; look_ahead++)
  455. error2 >>= 2;
  456. /*
  457. * Now calculate the error in (1 << look_ahead) ticks, but first
  458. * remove the single look ahead already included in the error.
  459. */
  460. tick_error = tick_length >> (NTP_SCALE_SHIFT - clock->shift + 1);
  461. tick_error -= clock->xtime_interval >> 1;
  462. error = ((error - tick_error) >> look_ahead) + tick_error;
  463. /* Finally calculate the adjustment shift value. */
  464. i = *interval;
  465. mult = 1;
  466. if (error < 0) {
  467. error = -error;
  468. *interval = -*interval;
  469. *offset = -*offset;
  470. mult = -1;
  471. }
  472. for (adj = 0; error > i; adj++)
  473. error >>= 1;
  474. *interval <<= adj;
  475. *offset <<= adj;
  476. return mult << adj;
  477. }
  478. /*
  479. * Adjust the multiplier to reduce the error value,
  480. * this is optimized for the most common adjustments of -1,0,1,
  481. * for other values we can do a bit more work.
  482. */
  483. static void clocksource_adjust(s64 offset)
  484. {
  485. s64 error, interval = clock->cycle_interval;
  486. int adj;
  487. error = clock->error >> (NTP_SCALE_SHIFT - clock->shift - 1);
  488. if (error > interval) {
  489. error >>= 2;
  490. if (likely(error <= interval))
  491. adj = 1;
  492. else
  493. adj = clocksource_bigadjust(error, &interval, &offset);
  494. } else if (error < -interval) {
  495. error >>= 2;
  496. if (likely(error >= -interval)) {
  497. adj = -1;
  498. interval = -interval;
  499. offset = -offset;
  500. } else
  501. adj = clocksource_bigadjust(error, &interval, &offset);
  502. } else
  503. return;
  504. clock->mult += adj;
  505. clock->xtime_interval += interval;
  506. clock->xtime_nsec -= offset;
  507. clock->error -= (interval - offset) <<
  508. (NTP_SCALE_SHIFT - clock->shift);
  509. }
  510. /**
  511. * update_wall_time - Uses the current clocksource to increment the wall time
  512. *
  513. * Called from the timer interrupt, must hold a write on xtime_lock.
  514. */
  515. void update_wall_time(void)
  516. {
  517. cycle_t offset;
  518. /* Make sure we're fully resumed: */
  519. if (unlikely(timekeeping_suspended))
  520. return;
  521. #ifdef CONFIG_GENERIC_TIME
  522. offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
  523. #else
  524. offset = clock->cycle_interval;
  525. #endif
  526. clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
  527. /* normally this loop will run just once, however in the
  528. * case of lost or late ticks, it will accumulate correctly.
  529. */
  530. while (offset >= clock->cycle_interval) {
  531. /* accumulate one interval */
  532. offset -= clock->cycle_interval;
  533. clock->cycle_last += clock->cycle_interval;
  534. clock->xtime_nsec += clock->xtime_interval;
  535. if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
  536. clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
  537. xtime.tv_sec++;
  538. second_overflow();
  539. }
  540. clock->raw_time.tv_nsec += clock->raw_interval;
  541. if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
  542. clock->raw_time.tv_nsec -= NSEC_PER_SEC;
  543. clock->raw_time.tv_sec++;
  544. }
  545. /* accumulate error between NTP and clock interval */
  546. clock->error += tick_length;
  547. clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
  548. }
  549. /* correct the clock when NTP error is too big */
  550. clocksource_adjust(offset);
  551. /*
  552. * Since in the loop above, we accumulate any amount of time
  553. * in xtime_nsec over a second into xtime.tv_sec, its possible for
  554. * xtime_nsec to be fairly small after the loop. Further, if we're
  555. * slightly speeding the clocksource up in clocksource_adjust(),
  556. * its possible the required corrective factor to xtime_nsec could
  557. * cause it to underflow.
  558. *
  559. * Now, we cannot simply roll the accumulated second back, since
  560. * the NTP subsystem has been notified via second_overflow. So
  561. * instead we push xtime_nsec forward by the amount we underflowed,
  562. * and add that amount into the error.
  563. *
  564. * We'll correct this error next time through this function, when
  565. * xtime_nsec is not as small.
  566. */
  567. if (unlikely((s64)clock->xtime_nsec < 0)) {
  568. s64 neg = -(s64)clock->xtime_nsec;
  569. clock->xtime_nsec = 0;
  570. clock->error += neg << (NTP_SCALE_SHIFT - clock->shift);
  571. }
  572. /* store full nanoseconds into xtime after rounding it up and
  573. * add the remainder to the error difference.
  574. */
  575. xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
  576. clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
  577. clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
  578. update_xtime_cache(cyc2ns(clock, offset));
  579. /* check to see if there is a new clocksource to use */
  580. change_clocksource();
  581. update_vsyscall(&xtime, clock);
  582. }
  583. /**
  584. * getboottime - Return the real time of system boot.
  585. * @ts: pointer to the timespec to be set
  586. *
  587. * Returns the time of day in a timespec.
  588. *
  589. * This is based on the wall_to_monotonic offset and the total suspend
  590. * time. Calls to settimeofday will affect the value returned (which
  591. * basically means that however wrong your real time clock is at boot time,
  592. * you get the right time here).
  593. */
  594. void getboottime(struct timespec *ts)
  595. {
  596. set_normalized_timespec(ts,
  597. - (wall_to_monotonic.tv_sec + total_sleep_time),
  598. - wall_to_monotonic.tv_nsec);
  599. }
  600. /**
  601. * monotonic_to_bootbased - Convert the monotonic time to boot based.
  602. * @ts: pointer to the timespec to be converted
  603. */
  604. void monotonic_to_bootbased(struct timespec *ts)
  605. {
  606. ts->tv_sec += total_sleep_time;
  607. }
  608. unsigned long get_seconds(void)
  609. {
  610. return xtime_cache.tv_sec;
  611. }
  612. EXPORT_SYMBOL(get_seconds);
  613. struct timespec current_kernel_time(void)
  614. {
  615. struct timespec now;
  616. unsigned long seq;
  617. do {
  618. seq = read_seqbegin(&xtime_lock);
  619. now = xtime_cache;
  620. } while (read_seqretry(&xtime_lock, seq));
  621. return now;
  622. }
  623. EXPORT_SYMBOL(current_kernel_time);