time.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * linux/kernel/time.c
  3. *
  4. * Copyright (C) 1991, 1992 Linus Torvalds
  5. *
  6. * This file contains the interface functions for the various
  7. * time related system calls: time, stime, gettimeofday, settimeofday,
  8. * adjtime
  9. */
  10. /*
  11. * Modification history kernel/time.c
  12. *
  13. * 1993-09-02 Philip Gladstone
  14. * Created file with time related functions from sched.c and adjtimex()
  15. * 1993-10-08 Torsten Duwe
  16. * adjtime interface update and CMOS clock write code
  17. * 1995-08-13 Torsten Duwe
  18. * kernel PLL updated to 1994-12-13 specs (rfc-1589)
  19. * 1999-01-16 Ulrich Windl
  20. * Introduced error checking for many cases in adjtimex().
  21. * Updated NTP code according to technical memorandum Jan '96
  22. * "A Kernel Model for Precision Timekeeping" by Dave Mills
  23. * Allow time_constant larger than MAXTC(6) for NTP v4 (MAXTC == 10)
  24. * (Even though the technical memorandum forbids it)
  25. * 2004-07-14 Christoph Lameter
  26. * Added getnstimeofday to allow the posix timer functions to return
  27. * with nanosecond accuracy
  28. */
  29. #include <linux/module.h>
  30. #include <linux/timex.h>
  31. #include <linux/capability.h>
  32. #include <linux/clocksource.h>
  33. #include <linux/errno.h>
  34. #include <linux/syscalls.h>
  35. #include <linux/security.h>
  36. #include <linux/fs.h>
  37. #include <linux/math64.h>
  38. #include <linux/ptrace.h>
  39. #include <asm/uaccess.h>
  40. #include <asm/unistd.h>
  41. #include "timeconst.h"
  42. /*
  43. * The timezone where the local system is located. Used as a default by some
  44. * programs who obtain this value by using gettimeofday.
  45. */
  46. struct timezone sys_tz;
  47. EXPORT_SYMBOL(sys_tz);
  48. #ifdef __ARCH_WANT_SYS_TIME
  49. /*
  50. * sys_time() can be implemented in user-level using
  51. * sys_gettimeofday(). Is this for backwards compatibility? If so,
  52. * why not move it into the appropriate arch directory (for those
  53. * architectures that need it).
  54. */
  55. SYSCALL_DEFINE1(time, time_t __user *, tloc)
  56. {
  57. time_t i = get_seconds();
  58. if (tloc) {
  59. if (put_user(i,tloc))
  60. return -EFAULT;
  61. }
  62. force_successful_syscall_return();
  63. return i;
  64. }
  65. /*
  66. * sys_stime() can be implemented in user-level using
  67. * sys_settimeofday(). Is this for backwards compatibility? If so,
  68. * why not move it into the appropriate arch directory (for those
  69. * architectures that need it).
  70. */
  71. SYSCALL_DEFINE1(stime, time_t __user *, tptr)
  72. {
  73. struct timespec tv;
  74. int err;
  75. if (get_user(tv.tv_sec, tptr))
  76. return -EFAULT;
  77. tv.tv_nsec = 0;
  78. err = security_settime(&tv, NULL);
  79. if (err)
  80. return err;
  81. do_settimeofday(&tv);
  82. return 0;
  83. }
  84. #endif /* __ARCH_WANT_SYS_TIME */
  85. SYSCALL_DEFINE2(gettimeofday, struct timeval __user *, tv,
  86. struct timezone __user *, tz)
  87. {
  88. if (likely(tv != NULL)) {
  89. struct timeval ktv;
  90. do_gettimeofday(&ktv);
  91. if (copy_to_user(tv, &ktv, sizeof(ktv)))
  92. return -EFAULT;
  93. }
  94. if (unlikely(tz != NULL)) {
  95. if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
  96. return -EFAULT;
  97. }
  98. return 0;
  99. }
  100. /*
  101. * Adjust the time obtained from the CMOS to be UTC time instead of
  102. * local time.
  103. *
  104. * This is ugly, but preferable to the alternatives. Otherwise we
  105. * would either need to write a program to do it in /etc/rc (and risk
  106. * confusion if the program gets run more than once; it would also be
  107. * hard to make the program warp the clock precisely n hours) or
  108. * compile in the timezone information into the kernel. Bad, bad....
  109. *
  110. * - TYT, 1992-01-01
  111. *
  112. * The best thing to do is to keep the CMOS clock in universal time (UTC)
  113. * as real UNIX machines always do it. This avoids all headaches about
  114. * daylight saving times and warping kernel clocks.
  115. */
  116. static inline void warp_clock(void)
  117. {
  118. struct timespec delta, adjust;
  119. delta.tv_sec = sys_tz.tz_minuteswest * 60;
  120. delta.tv_nsec = 0;
  121. adjust = timespec_add_safe(current_kernel_time(), delta);
  122. do_settimeofday(&adjust);
  123. }
  124. /*
  125. * In case for some reason the CMOS clock has not already been running
  126. * in UTC, but in some local time: The first time we set the timezone,
  127. * we will warp the clock so that it is ticking UTC time instead of
  128. * local time. Presumably, if someone is setting the timezone then we
  129. * are running in an environment where the programs understand about
  130. * timezones. This should be done at boot time in the /etc/rc script,
  131. * as soon as possible, so that the clock can be set right. Otherwise,
  132. * various programs will get confused when the clock gets warped.
  133. */
  134. int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
  135. {
  136. static int firsttime = 1;
  137. int error = 0;
  138. if (tv && !timespec_valid(tv))
  139. return -EINVAL;
  140. error = security_settime(tv, tz);
  141. if (error)
  142. return error;
  143. if (tz) {
  144. /* SMP safe, global irq locking makes it work. */
  145. sys_tz = *tz;
  146. update_vsyscall_tz();
  147. if (firsttime) {
  148. firsttime = 0;
  149. if (!tv)
  150. warp_clock();
  151. }
  152. }
  153. if (tv)
  154. {
  155. /* SMP safe, again the code in arch/foo/time.c should
  156. * globally block out interrupts when it runs.
  157. */
  158. return do_settimeofday(tv);
  159. }
  160. return 0;
  161. }
  162. SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
  163. struct timezone __user *, tz)
  164. {
  165. struct timeval user_tv;
  166. struct timespec new_ts;
  167. struct timezone new_tz;
  168. if (tv) {
  169. if (copy_from_user(&user_tv, tv, sizeof(*tv)))
  170. return -EFAULT;
  171. new_ts.tv_sec = user_tv.tv_sec;
  172. new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
  173. }
  174. if (tz) {
  175. if (copy_from_user(&new_tz, tz, sizeof(*tz)))
  176. return -EFAULT;
  177. }
  178. return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
  179. }
  180. SYSCALL_DEFINE1(adjtimex, struct timex __user *, txc_p)
  181. {
  182. struct timex txc; /* Local copy of parameter */
  183. int ret;
  184. /* Copy the user data space into the kernel copy
  185. * structure. But bear in mind that the structures
  186. * may change
  187. */
  188. if(copy_from_user(&txc, txc_p, sizeof(struct timex)))
  189. return -EFAULT;
  190. ret = do_adjtimex(&txc);
  191. return copy_to_user(txc_p, &txc, sizeof(struct timex)) ? -EFAULT : ret;
  192. }
  193. /**
  194. * current_fs_time - Return FS time
  195. * @sb: Superblock.
  196. *
  197. * Return the current time truncated to the time granularity supported by
  198. * the fs.
  199. */
  200. struct timespec current_fs_time(struct super_block *sb)
  201. {
  202. struct timespec now = current_kernel_time();
  203. return timespec_trunc(now, sb->s_time_gran);
  204. }
  205. EXPORT_SYMBOL(current_fs_time);
  206. /*
  207. * Convert jiffies to milliseconds and back.
  208. *
  209. * Avoid unnecessary multiplications/divisions in the
  210. * two most common HZ cases:
  211. */
  212. unsigned int inline jiffies_to_msecs(const unsigned long j)
  213. {
  214. #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
  215. return (MSEC_PER_SEC / HZ) * j;
  216. #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
  217. return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
  218. #else
  219. # if BITS_PER_LONG == 32
  220. return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
  221. # else
  222. return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
  223. # endif
  224. #endif
  225. }
  226. EXPORT_SYMBOL(jiffies_to_msecs);
  227. unsigned int inline jiffies_to_usecs(const unsigned long j)
  228. {
  229. #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
  230. return (USEC_PER_SEC / HZ) * j;
  231. #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
  232. return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
  233. #else
  234. # if BITS_PER_LONG == 32
  235. return (HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
  236. # else
  237. return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
  238. # endif
  239. #endif
  240. }
  241. EXPORT_SYMBOL(jiffies_to_usecs);
  242. /**
  243. * timespec_trunc - Truncate timespec to a granularity
  244. * @t: Timespec
  245. * @gran: Granularity in ns.
  246. *
  247. * Truncate a timespec to a granularity. gran must be smaller than a second.
  248. * Always rounds down.
  249. *
  250. * This function should be only used for timestamps returned by
  251. * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
  252. * it doesn't handle the better resolution of the latter.
  253. */
  254. struct timespec timespec_trunc(struct timespec t, unsigned gran)
  255. {
  256. /*
  257. * Division is pretty slow so avoid it for common cases.
  258. * Currently current_kernel_time() never returns better than
  259. * jiffies resolution. Exploit that.
  260. */
  261. if (gran <= jiffies_to_usecs(1) * 1000) {
  262. /* nothing */
  263. } else if (gran == 1000000000) {
  264. t.tv_nsec = 0;
  265. } else {
  266. t.tv_nsec -= t.tv_nsec % gran;
  267. }
  268. return t;
  269. }
  270. EXPORT_SYMBOL(timespec_trunc);
  271. #ifndef CONFIG_GENERIC_TIME
  272. /*
  273. * Simulate gettimeofday using do_gettimeofday which only allows a timeval
  274. * and therefore only yields usec accuracy
  275. */
  276. void getnstimeofday(struct timespec *tv)
  277. {
  278. struct timeval x;
  279. do_gettimeofday(&x);
  280. tv->tv_sec = x.tv_sec;
  281. tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
  282. }
  283. EXPORT_SYMBOL_GPL(getnstimeofday);
  284. #endif
  285. /* Converts Gregorian date to seconds since 1970-01-01 00:00:00.
  286. * Assumes input in normal date format, i.e. 1980-12-31 23:59:59
  287. * => year=1980, mon=12, day=31, hour=23, min=59, sec=59.
  288. *
  289. * [For the Julian calendar (which was used in Russia before 1917,
  290. * Britain & colonies before 1752, anywhere else before 1582,
  291. * and is still in use by some communities) leave out the
  292. * -year/100+year/400 terms, and add 10.]
  293. *
  294. * This algorithm was first published by Gauss (I think).
  295. *
  296. * WARNING: this function will overflow on 2106-02-07 06:28:16 on
  297. * machines where long is 32-bit! (However, as time_t is signed, we
  298. * will already get problems at other places on 2038-01-19 03:14:08)
  299. */
  300. unsigned long
  301. mktime(const unsigned int year0, const unsigned int mon0,
  302. const unsigned int day, const unsigned int hour,
  303. const unsigned int min, const unsigned int sec)
  304. {
  305. unsigned int mon = mon0, year = year0;
  306. /* 1..12 -> 11,12,1..10 */
  307. if (0 >= (int) (mon -= 2)) {
  308. mon += 12; /* Puts Feb last since it has leap day */
  309. year -= 1;
  310. }
  311. return ((((unsigned long)
  312. (year/4 - year/100 + year/400 + 367*mon/12 + day) +
  313. year*365 - 719499
  314. )*24 + hour /* now have hours */
  315. )*60 + min /* now have minutes */
  316. )*60 + sec; /* finally seconds */
  317. }
  318. EXPORT_SYMBOL(mktime);
  319. /**
  320. * set_normalized_timespec - set timespec sec and nsec parts and normalize
  321. *
  322. * @ts: pointer to timespec variable to be set
  323. * @sec: seconds to set
  324. * @nsec: nanoseconds to set
  325. *
  326. * Set seconds and nanoseconds field of a timespec variable and
  327. * normalize to the timespec storage format
  328. *
  329. * Note: The tv_nsec part is always in the range of
  330. * 0 <= tv_nsec < NSEC_PER_SEC
  331. * For negative values only the tv_sec field is negative !
  332. */
  333. void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
  334. {
  335. while (nsec >= NSEC_PER_SEC) {
  336. /*
  337. * The following asm() prevents the compiler from
  338. * optimising this loop into a modulo operation. See
  339. * also __iter_div_u64_rem() in include/linux/time.h
  340. */
  341. asm("" : "+rm"(nsec));
  342. nsec -= NSEC_PER_SEC;
  343. ++sec;
  344. }
  345. while (nsec < 0) {
  346. asm("" : "+rm"(nsec));
  347. nsec += NSEC_PER_SEC;
  348. --sec;
  349. }
  350. ts->tv_sec = sec;
  351. ts->tv_nsec = nsec;
  352. }
  353. EXPORT_SYMBOL(set_normalized_timespec);
  354. /**
  355. * ns_to_timespec - Convert nanoseconds to timespec
  356. * @nsec: the nanoseconds value to be converted
  357. *
  358. * Returns the timespec representation of the nsec parameter.
  359. */
  360. struct timespec ns_to_timespec(const s64 nsec)
  361. {
  362. struct timespec ts;
  363. s32 rem;
  364. if (!nsec)
  365. return (struct timespec) {0, 0};
  366. ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
  367. if (unlikely(rem < 0)) {
  368. ts.tv_sec--;
  369. rem += NSEC_PER_SEC;
  370. }
  371. ts.tv_nsec = rem;
  372. return ts;
  373. }
  374. EXPORT_SYMBOL(ns_to_timespec);
  375. /**
  376. * ns_to_timeval - Convert nanoseconds to timeval
  377. * @nsec: the nanoseconds value to be converted
  378. *
  379. * Returns the timeval representation of the nsec parameter.
  380. */
  381. struct timeval ns_to_timeval(const s64 nsec)
  382. {
  383. struct timespec ts = ns_to_timespec(nsec);
  384. struct timeval tv;
  385. tv.tv_sec = ts.tv_sec;
  386. tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
  387. return tv;
  388. }
  389. EXPORT_SYMBOL(ns_to_timeval);
  390. /*
  391. * When we convert to jiffies then we interpret incoming values
  392. * the following way:
  393. *
  394. * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
  395. *
  396. * - 'too large' values [that would result in larger than
  397. * MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
  398. *
  399. * - all other values are converted to jiffies by either multiplying
  400. * the input value by a factor or dividing it with a factor
  401. *
  402. * We must also be careful about 32-bit overflows.
  403. */
  404. unsigned long msecs_to_jiffies(const unsigned int m)
  405. {
  406. /*
  407. * Negative value, means infinite timeout:
  408. */
  409. if ((int)m < 0)
  410. return MAX_JIFFY_OFFSET;
  411. #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
  412. /*
  413. * HZ is equal to or smaller than 1000, and 1000 is a nice
  414. * round multiple of HZ, divide with the factor between them,
  415. * but round upwards:
  416. */
  417. return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
  418. #elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
  419. /*
  420. * HZ is larger than 1000, and HZ is a nice round multiple of
  421. * 1000 - simply multiply with the factor between them.
  422. *
  423. * But first make sure the multiplication result cannot
  424. * overflow:
  425. */
  426. if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
  427. return MAX_JIFFY_OFFSET;
  428. return m * (HZ / MSEC_PER_SEC);
  429. #else
  430. /*
  431. * Generic case - multiply, round and divide. But first
  432. * check that if we are doing a net multiplication, that
  433. * we wouldn't overflow:
  434. */
  435. if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
  436. return MAX_JIFFY_OFFSET;
  437. return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
  438. >> MSEC_TO_HZ_SHR32;
  439. #endif
  440. }
  441. EXPORT_SYMBOL(msecs_to_jiffies);
  442. unsigned long usecs_to_jiffies(const unsigned int u)
  443. {
  444. if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
  445. return MAX_JIFFY_OFFSET;
  446. #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
  447. return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
  448. #elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
  449. return u * (HZ / USEC_PER_SEC);
  450. #else
  451. return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
  452. >> USEC_TO_HZ_SHR32;
  453. #endif
  454. }
  455. EXPORT_SYMBOL(usecs_to_jiffies);
  456. /*
  457. * The TICK_NSEC - 1 rounds up the value to the next resolution. Note
  458. * that a remainder subtract here would not do the right thing as the
  459. * resolution values don't fall on second boundries. I.e. the line:
  460. * nsec -= nsec % TICK_NSEC; is NOT a correct resolution rounding.
  461. *
  462. * Rather, we just shift the bits off the right.
  463. *
  464. * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
  465. * value to a scaled second value.
  466. */
  467. unsigned long
  468. timespec_to_jiffies(const struct timespec *value)
  469. {
  470. unsigned long sec = value->tv_sec;
  471. long nsec = value->tv_nsec + TICK_NSEC - 1;
  472. if (sec >= MAX_SEC_IN_JIFFIES){
  473. sec = MAX_SEC_IN_JIFFIES;
  474. nsec = 0;
  475. }
  476. return (((u64)sec * SEC_CONVERSION) +
  477. (((u64)nsec * NSEC_CONVERSION) >>
  478. (NSEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
  479. }
  480. EXPORT_SYMBOL(timespec_to_jiffies);
  481. void
  482. jiffies_to_timespec(const unsigned long jiffies, struct timespec *value)
  483. {
  484. /*
  485. * Convert jiffies to nanoseconds and separate with
  486. * one divide.
  487. */
  488. u32 rem;
  489. value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
  490. NSEC_PER_SEC, &rem);
  491. value->tv_nsec = rem;
  492. }
  493. EXPORT_SYMBOL(jiffies_to_timespec);
  494. /* Same for "timeval"
  495. *
  496. * Well, almost. The problem here is that the real system resolution is
  497. * in nanoseconds and the value being converted is in micro seconds.
  498. * Also for some machines (those that use HZ = 1024, in-particular),
  499. * there is a LARGE error in the tick size in microseconds.
  500. * The solution we use is to do the rounding AFTER we convert the
  501. * microsecond part. Thus the USEC_ROUND, the bits to be shifted off.
  502. * Instruction wise, this should cost only an additional add with carry
  503. * instruction above the way it was done above.
  504. */
  505. unsigned long
  506. timeval_to_jiffies(const struct timeval *value)
  507. {
  508. unsigned long sec = value->tv_sec;
  509. long usec = value->tv_usec;
  510. if (sec >= MAX_SEC_IN_JIFFIES){
  511. sec = MAX_SEC_IN_JIFFIES;
  512. usec = 0;
  513. }
  514. return (((u64)sec * SEC_CONVERSION) +
  515. (((u64)usec * USEC_CONVERSION + USEC_ROUND) >>
  516. (USEC_JIFFIE_SC - SEC_JIFFIE_SC))) >> SEC_JIFFIE_SC;
  517. }
  518. EXPORT_SYMBOL(timeval_to_jiffies);
  519. void jiffies_to_timeval(const unsigned long jiffies, struct timeval *value)
  520. {
  521. /*
  522. * Convert jiffies to nanoseconds and separate with
  523. * one divide.
  524. */
  525. u32 rem;
  526. value->tv_sec = div_u64_rem((u64)jiffies * TICK_NSEC,
  527. NSEC_PER_SEC, &rem);
  528. value->tv_usec = rem / NSEC_PER_USEC;
  529. }
  530. EXPORT_SYMBOL(jiffies_to_timeval);
  531. /*
  532. * Convert jiffies/jiffies_64 to clock_t and back.
  533. */
  534. clock_t jiffies_to_clock_t(long x)
  535. {
  536. #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
  537. # if HZ < USER_HZ
  538. return x * (USER_HZ / HZ);
  539. # else
  540. return x / (HZ / USER_HZ);
  541. # endif
  542. #else
  543. return div_u64((u64)x * TICK_NSEC, NSEC_PER_SEC / USER_HZ);
  544. #endif
  545. }
  546. EXPORT_SYMBOL(jiffies_to_clock_t);
  547. unsigned long clock_t_to_jiffies(unsigned long x)
  548. {
  549. #if (HZ % USER_HZ)==0
  550. if (x >= ~0UL / (HZ / USER_HZ))
  551. return ~0UL;
  552. return x * (HZ / USER_HZ);
  553. #else
  554. /* Don't worry about loss of precision here .. */
  555. if (x >= ~0UL / HZ * USER_HZ)
  556. return ~0UL;
  557. /* .. but do try to contain it here */
  558. return div_u64((u64)x * HZ, USER_HZ);
  559. #endif
  560. }
  561. EXPORT_SYMBOL(clock_t_to_jiffies);
  562. u64 jiffies_64_to_clock_t(u64 x)
  563. {
  564. #if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
  565. # if HZ < USER_HZ
  566. x = div_u64(x * USER_HZ, HZ);
  567. # elif HZ > USER_HZ
  568. x = div_u64(x, HZ / USER_HZ);
  569. # else
  570. /* Nothing to do */
  571. # endif
  572. #else
  573. /*
  574. * There are better ways that don't overflow early,
  575. * but even this doesn't overflow in hundreds of years
  576. * in 64 bits, so..
  577. */
  578. x = div_u64(x * TICK_NSEC, (NSEC_PER_SEC / USER_HZ));
  579. #endif
  580. return x;
  581. }
  582. EXPORT_SYMBOL(jiffies_64_to_clock_t);
  583. u64 nsec_to_clock_t(u64 x)
  584. {
  585. #if (NSEC_PER_SEC % USER_HZ) == 0
  586. return div_u64(x, NSEC_PER_SEC / USER_HZ);
  587. #elif (USER_HZ % 512) == 0
  588. return div_u64(x * USER_HZ / 512, NSEC_PER_SEC / 512);
  589. #else
  590. /*
  591. * max relative error 5.7e-8 (1.8s per year) for USER_HZ <= 1024,
  592. * overflow after 64.99 years.
  593. * exact for HZ=60, 72, 90, 120, 144, 180, 300, 600, 900, ...
  594. */
  595. return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
  596. #endif
  597. }
  598. /**
  599. * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
  600. *
  601. * @n: nsecs in u64
  602. *
  603. * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
  604. * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
  605. * for scheduler, not for use in device drivers to calculate timeout value.
  606. *
  607. * note:
  608. * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
  609. * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
  610. */
  611. unsigned long nsecs_to_jiffies(u64 n)
  612. {
  613. #if (NSEC_PER_SEC % HZ) == 0
  614. /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
  615. return div_u64(n, NSEC_PER_SEC / HZ);
  616. #elif (HZ % 512) == 0
  617. /* overflow after 292 years if HZ = 1024 */
  618. return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
  619. #else
  620. /*
  621. * Generic case - optimized for cases where HZ is a multiple of 3.
  622. * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
  623. */
  624. return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
  625. #endif
  626. }
  627. #if (BITS_PER_LONG < 64)
  628. u64 get_jiffies_64(void)
  629. {
  630. unsigned long seq;
  631. u64 ret;
  632. do {
  633. seq = read_seqbegin(&xtime_lock);
  634. ret = jiffies_64;
  635. } while (read_seqretry(&xtime_lock, seq));
  636. return ret;
  637. }
  638. EXPORT_SYMBOL(get_jiffies_64);
  639. #endif
  640. EXPORT_SYMBOL(jiffies);
  641. /*
  642. * Add two timespec values and do a safety check for overflow.
  643. * It's assumed that both values are valid (>= 0)
  644. */
  645. struct timespec timespec_add_safe(const struct timespec lhs,
  646. const struct timespec rhs)
  647. {
  648. struct timespec res;
  649. set_normalized_timespec(&res, lhs.tv_sec + rhs.tv_sec,
  650. lhs.tv_nsec + rhs.tv_nsec);
  651. if (res.tv_sec < lhs.tv_sec || res.tv_sec < rhs.tv_sec)
  652. res.tv_sec = TIME_T_MAX;
  653. return res;
  654. }