timekeeping.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. /*
  2. * linux/kernel/time/timekeeping.c
  3. *
  4. * Kernel timekeeping code and accessor functions
  5. *
  6. * This code was moved from linux/kernel/timer.c.
  7. * Please see that file for copyright and history logs.
  8. *
  9. */
  10. #include <linux/module.h>
  11. #include <linux/interrupt.h>
  12. #include <linux/percpu.h>
  13. #include <linux/init.h>
  14. #include <linux/mm.h>
  15. #include <linux/sysdev.h>
  16. #include <linux/clocksource.h>
  17. #include <linux/jiffies.h>
  18. #include <linux/time.h>
  19. #include <linux/tick.h>
  20. /*
  21. * This read-write spinlock protects us from races in SMP while
  22. * playing with xtime and avenrun.
  23. */
  24. __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
  25. /*
  26. * The current time
  27. * wall_to_monotonic is what we need to add to xtime (or xtime corrected
  28. * for sub jiffie times) to get to monotonic time. Monotonic is pegged
  29. * at zero at system boot time, so wall_to_monotonic will be negative,
  30. * however, we will ALWAYS keep the tv_nsec part positive so we can use
  31. * the usual normalization.
  32. *
  33. * wall_to_monotonic is moved after resume from suspend for the monotonic
  34. * time not to jump. We need to add total_sleep_time to wall_to_monotonic
  35. * to get the real boot based time offset.
  36. *
  37. * - wall_to_monotonic is no longer the boot time, getboottime must be
  38. * used instead.
  39. */
  40. struct timespec xtime __attribute__ ((aligned (16)));
  41. struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
  42. static unsigned long total_sleep_time; /* seconds */
  43. static struct timespec xtime_cache __attribute__ ((aligned (16)));
  44. static inline void update_xtime_cache(u64 nsec)
  45. {
  46. xtime_cache = xtime;
  47. timespec_add_ns(&xtime_cache, nsec);
  48. }
  49. static struct clocksource *clock; /* pointer to current clocksource */
  50. #ifdef CONFIG_GENERIC_TIME
  51. /**
  52. * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
  53. *
  54. * private function, must hold xtime_lock lock when being
  55. * called. Returns the number of nanoseconds since the
  56. * last call to update_wall_time() (adjusted by NTP scaling)
  57. */
  58. static inline s64 __get_nsec_offset(void)
  59. {
  60. cycle_t cycle_now, cycle_delta;
  61. s64 ns_offset;
  62. /* read clocksource: */
  63. cycle_now = clocksource_read(clock);
  64. /* calculate the delta since the last update_wall_time: */
  65. cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
  66. /* convert to nanoseconds: */
  67. ns_offset = cyc2ns(clock, cycle_delta);
  68. return ns_offset;
  69. }
  70. /**
  71. * getnstimeofday - Returns the time of day in a timespec
  72. * @ts: pointer to the timespec to be set
  73. *
  74. * Returns the time of day in a timespec.
  75. */
  76. void getnstimeofday(struct timespec *ts)
  77. {
  78. unsigned long seq;
  79. s64 nsecs;
  80. do {
  81. seq = read_seqbegin(&xtime_lock);
  82. *ts = xtime;
  83. nsecs = __get_nsec_offset();
  84. } while (read_seqretry(&xtime_lock, seq));
  85. timespec_add_ns(ts, nsecs);
  86. }
  87. EXPORT_SYMBOL(getnstimeofday);
  88. /**
  89. * do_gettimeofday - Returns the time of day in a timeval
  90. * @tv: pointer to the timeval to be set
  91. *
  92. * NOTE: Users should be converted to using getnstimeofday()
  93. */
  94. void do_gettimeofday(struct timeval *tv)
  95. {
  96. struct timespec now;
  97. getnstimeofday(&now);
  98. tv->tv_sec = now.tv_sec;
  99. tv->tv_usec = now.tv_nsec/1000;
  100. }
  101. EXPORT_SYMBOL(do_gettimeofday);
  102. /**
  103. * do_settimeofday - Sets the time of day
  104. * @tv: pointer to the timespec variable containing the new time
  105. *
  106. * Sets the time of day to the new time and update NTP and notify hrtimers
  107. */
  108. int do_settimeofday(struct timespec *tv)
  109. {
  110. unsigned long flags;
  111. time_t wtm_sec, sec = tv->tv_sec;
  112. long wtm_nsec, nsec = tv->tv_nsec;
  113. if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
  114. return -EINVAL;
  115. write_seqlock_irqsave(&xtime_lock, flags);
  116. nsec -= __get_nsec_offset();
  117. wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
  118. wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
  119. set_normalized_timespec(&xtime, sec, nsec);
  120. set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
  121. clock->error = 0;
  122. ntp_clear();
  123. update_vsyscall(&xtime, clock);
  124. write_sequnlock_irqrestore(&xtime_lock, flags);
  125. /* signal hrtimers about time change */
  126. clock_was_set();
  127. return 0;
  128. }
  129. EXPORT_SYMBOL(do_settimeofday);
  130. /**
  131. * change_clocksource - Swaps clocksources if a new one is available
  132. *
  133. * Accumulates current time interval and initializes new clocksource
  134. */
  135. static void change_clocksource(void)
  136. {
  137. struct clocksource *new;
  138. cycle_t now;
  139. u64 nsec;
  140. new = clocksource_get_next();
  141. if (clock == new)
  142. return;
  143. now = clocksource_read(new);
  144. nsec = __get_nsec_offset();
  145. timespec_add_ns(&xtime, nsec);
  146. clock = new;
  147. clock->cycle_last = now;
  148. clock->error = 0;
  149. clock->xtime_nsec = 0;
  150. clocksource_calculate_interval(clock,
  151. (unsigned long)(current_tick_length()>>TICK_LENGTH_SHIFT));
  152. tick_clock_notify();
  153. printk(KERN_INFO "Time: %s clocksource has been installed.\n",
  154. clock->name);
  155. }
  156. #else
  157. static inline void change_clocksource(void) { }
  158. static inline s64 __get_nsec_offset(void) { return 0; }
  159. #endif
  160. /**
  161. * timekeeping_is_continuous - check to see if timekeeping is free running
  162. */
  163. int timekeeping_is_continuous(void)
  164. {
  165. unsigned long seq;
  166. int ret;
  167. do {
  168. seq = read_seqbegin(&xtime_lock);
  169. ret = clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
  170. } while (read_seqretry(&xtime_lock, seq));
  171. return ret;
  172. }
  173. /**
  174. * read_persistent_clock - Return time in seconds from the persistent clock.
  175. *
  176. * Weak dummy function for arches that do not yet support it.
  177. * Returns seconds from epoch using the battery backed persistent clock.
  178. * Returns zero if unsupported.
  179. *
  180. * XXX - Do be sure to remove it once all arches implement it.
  181. */
  182. unsigned long __attribute__((weak)) read_persistent_clock(void)
  183. {
  184. return 0;
  185. }
  186. /*
  187. * timekeeping_init - Initializes the clocksource and common timekeeping values
  188. */
  189. void __init timekeeping_init(void)
  190. {
  191. unsigned long flags;
  192. unsigned long sec = read_persistent_clock();
  193. write_seqlock_irqsave(&xtime_lock, flags);
  194. ntp_clear();
  195. clock = clocksource_get_next();
  196. clocksource_calculate_interval(clock,
  197. (unsigned long)(current_tick_length()>>TICK_LENGTH_SHIFT));
  198. clock->cycle_last = clocksource_read(clock);
  199. xtime.tv_sec = sec;
  200. xtime.tv_nsec = 0;
  201. set_normalized_timespec(&wall_to_monotonic,
  202. -xtime.tv_sec, -xtime.tv_nsec);
  203. total_sleep_time = 0;
  204. write_sequnlock_irqrestore(&xtime_lock, flags);
  205. }
  206. /* flag for if timekeeping is suspended */
  207. static int timekeeping_suspended;
  208. /* time in seconds when suspend began */
  209. static unsigned long timekeeping_suspend_time;
  210. /* xtime offset when we went into suspend */
  211. static s64 timekeeping_suspend_nsecs;
  212. /**
  213. * timekeeping_resume - Resumes the generic timekeeping subsystem.
  214. * @dev: unused
  215. *
  216. * This is for the generic clocksource timekeeping.
  217. * xtime/wall_to_monotonic/jiffies/etc are
  218. * still managed by arch specific suspend/resume code.
  219. */
  220. static int timekeeping_resume(struct sys_device *dev)
  221. {
  222. unsigned long flags;
  223. unsigned long now = read_persistent_clock();
  224. clocksource_resume();
  225. write_seqlock_irqsave(&xtime_lock, flags);
  226. if (now && (now > timekeeping_suspend_time)) {
  227. unsigned long sleep_length = now - timekeeping_suspend_time;
  228. xtime.tv_sec += sleep_length;
  229. wall_to_monotonic.tv_sec -= sleep_length;
  230. total_sleep_time += sleep_length;
  231. }
  232. /* Make sure that we have the correct xtime reference */
  233. timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
  234. /* re-base the last cycle value */
  235. clock->cycle_last = clocksource_read(clock);
  236. clock->error = 0;
  237. timekeeping_suspended = 0;
  238. write_sequnlock_irqrestore(&xtime_lock, flags);
  239. touch_softlockup_watchdog();
  240. clockevents_notify(CLOCK_EVT_NOTIFY_RESUME, NULL);
  241. /* Resume hrtimers */
  242. hres_timers_resume();
  243. return 0;
  244. }
  245. static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
  246. {
  247. unsigned long flags;
  248. timekeeping_suspend_time = read_persistent_clock();
  249. write_seqlock_irqsave(&xtime_lock, flags);
  250. /* Get the current xtime offset */
  251. timekeeping_suspend_nsecs = __get_nsec_offset();
  252. timekeeping_suspended = 1;
  253. write_sequnlock_irqrestore(&xtime_lock, flags);
  254. clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
  255. return 0;
  256. }
  257. /* sysfs resume/suspend bits for timekeeping */
  258. static struct sysdev_class timekeeping_sysclass = {
  259. .name = "timekeeping",
  260. .resume = timekeeping_resume,
  261. .suspend = timekeeping_suspend,
  262. };
  263. static struct sys_device device_timer = {
  264. .id = 0,
  265. .cls = &timekeeping_sysclass,
  266. };
  267. static int __init timekeeping_init_device(void)
  268. {
  269. int error = sysdev_class_register(&timekeeping_sysclass);
  270. if (!error)
  271. error = sysdev_register(&device_timer);
  272. return error;
  273. }
  274. device_initcall(timekeeping_init_device);
  275. /*
  276. * If the error is already larger, we look ahead even further
  277. * to compensate for late or lost adjustments.
  278. */
  279. static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
  280. s64 *offset)
  281. {
  282. s64 tick_error, i;
  283. u32 look_ahead, adj;
  284. s32 error2, mult;
  285. /*
  286. * Use the current error value to determine how much to look ahead.
  287. * The larger the error the slower we adjust for it to avoid problems
  288. * with losing too many ticks, otherwise we would overadjust and
  289. * produce an even larger error. The smaller the adjustment the
  290. * faster we try to adjust for it, as lost ticks can do less harm
  291. * here. This is tuned so that an error of about 1 msec is adusted
  292. * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
  293. */
  294. error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
  295. error2 = abs(error2);
  296. for (look_ahead = 0; error2 > 0; look_ahead++)
  297. error2 >>= 2;
  298. /*
  299. * Now calculate the error in (1 << look_ahead) ticks, but first
  300. * remove the single look ahead already included in the error.
  301. */
  302. tick_error = current_tick_length() >>
  303. (TICK_LENGTH_SHIFT - clock->shift + 1);
  304. tick_error -= clock->xtime_interval >> 1;
  305. error = ((error - tick_error) >> look_ahead) + tick_error;
  306. /* Finally calculate the adjustment shift value. */
  307. i = *interval;
  308. mult = 1;
  309. if (error < 0) {
  310. error = -error;
  311. *interval = -*interval;
  312. *offset = -*offset;
  313. mult = -1;
  314. }
  315. for (adj = 0; error > i; adj++)
  316. error >>= 1;
  317. *interval <<= adj;
  318. *offset <<= adj;
  319. return mult << adj;
  320. }
  321. /*
  322. * Adjust the multiplier to reduce the error value,
  323. * this is optimized for the most common adjustments of -1,0,1,
  324. * for other values we can do a bit more work.
  325. */
  326. static void clocksource_adjust(s64 offset)
  327. {
  328. s64 error, interval = clock->cycle_interval;
  329. int adj;
  330. error = clock->error >> (TICK_LENGTH_SHIFT - clock->shift - 1);
  331. if (error > interval) {
  332. error >>= 2;
  333. if (likely(error <= interval))
  334. adj = 1;
  335. else
  336. adj = clocksource_bigadjust(error, &interval, &offset);
  337. } else if (error < -interval) {
  338. error >>= 2;
  339. if (likely(error >= -interval)) {
  340. adj = -1;
  341. interval = -interval;
  342. offset = -offset;
  343. } else
  344. adj = clocksource_bigadjust(error, &interval, &offset);
  345. } else
  346. return;
  347. clock->mult += adj;
  348. clock->xtime_interval += interval;
  349. clock->xtime_nsec -= offset;
  350. clock->error -= (interval - offset) <<
  351. (TICK_LENGTH_SHIFT - clock->shift);
  352. }
  353. /**
  354. * update_wall_time - Uses the current clocksource to increment the wall time
  355. *
  356. * Called from the timer interrupt, must hold a write on xtime_lock.
  357. */
  358. void update_wall_time(void)
  359. {
  360. cycle_t offset;
  361. /* Make sure we're fully resumed: */
  362. if (unlikely(timekeeping_suspended))
  363. return;
  364. #ifdef CONFIG_GENERIC_TIME
  365. offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
  366. #else
  367. offset = clock->cycle_interval;
  368. #endif
  369. clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
  370. /* normally this loop will run just once, however in the
  371. * case of lost or late ticks, it will accumulate correctly.
  372. */
  373. while (offset >= clock->cycle_interval) {
  374. /* accumulate one interval */
  375. clock->xtime_nsec += clock->xtime_interval;
  376. clock->cycle_last += clock->cycle_interval;
  377. offset -= clock->cycle_interval;
  378. if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
  379. clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
  380. xtime.tv_sec++;
  381. second_overflow();
  382. }
  383. /* accumulate error between NTP and clock interval */
  384. clock->error += current_tick_length();
  385. clock->error -= clock->xtime_interval << (TICK_LENGTH_SHIFT - clock->shift);
  386. }
  387. /* correct the clock when NTP error is too big */
  388. clocksource_adjust(offset);
  389. /* store full nanoseconds into xtime */
  390. xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
  391. clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
  392. update_xtime_cache(cyc2ns(clock, offset));
  393. /* check to see if there is a new clocksource to use */
  394. change_clocksource();
  395. update_vsyscall(&xtime, clock);
  396. }
  397. /**
  398. * getboottime - Return the real time of system boot.
  399. * @ts: pointer to the timespec to be set
  400. *
  401. * Returns the time of day in a timespec.
  402. *
  403. * This is based on the wall_to_monotonic offset and the total suspend
  404. * time. Calls to settimeofday will affect the value returned (which
  405. * basically means that however wrong your real time clock is at boot time,
  406. * you get the right time here).
  407. */
  408. void getboottime(struct timespec *ts)
  409. {
  410. set_normalized_timespec(ts,
  411. - (wall_to_monotonic.tv_sec + total_sleep_time),
  412. - wall_to_monotonic.tv_nsec);
  413. }
  414. /**
  415. * monotonic_to_bootbased - Convert the monotonic time to boot based.
  416. * @ts: pointer to the timespec to be converted
  417. */
  418. void monotonic_to_bootbased(struct timespec *ts)
  419. {
  420. ts->tv_sec += total_sleep_time;
  421. }
  422. unsigned long get_seconds(void)
  423. {
  424. return xtime_cache.tv_sec;
  425. }
  426. EXPORT_SYMBOL(get_seconds);
  427. struct timespec current_kernel_time(void)
  428. {
  429. struct timespec now;
  430. unsigned long seq;
  431. do {
  432. seq = read_seqbegin(&xtime_lock);
  433. now = xtime_cache;
  434. } while (read_seqretry(&xtime_lock, seq));
  435. return now;
  436. }
  437. EXPORT_SYMBOL(current_kernel_time);