clocksource.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /* linux/include/linux/clocksource.h
  2. *
  3. * This file contains the structure definitions for clocksources.
  4. *
  5. * If you are not a clocksource, or timekeeping code, you should
  6. * not be including this file!
  7. */
  8. #ifndef _LINUX_CLOCKSOURCE_H
  9. #define _LINUX_CLOCKSOURCE_H
  10. #include <linux/types.h>
  11. #include <linux/timex.h>
  12. #include <linux/time.h>
  13. #include <linux/list.h>
  14. #include <linux/cache.h>
  15. #include <linux/timer.h>
  16. #include <linux/init.h>
  17. #include <asm/div64.h>
  18. #include <asm/io.h>
  19. /* clocksource cycle base type */
  20. typedef u64 cycle_t;
  21. struct clocksource;
  22. /**
  23. * struct cyclecounter - hardware abstraction for a free running counter
  24. * Provides completely state-free accessors to the underlying hardware.
  25. * Depending on which hardware it reads, the cycle counter may wrap
  26. * around quickly. Locking rules (if necessary) have to be defined
  27. * by the implementor and user of specific instances of this API.
  28. *
  29. * @read: returns the current cycle value
  30. * @mask: bitmask for two's complement
  31. * subtraction of non 64 bit counters,
  32. * see CLOCKSOURCE_MASK() helper macro
  33. * @mult: cycle to nanosecond multiplier
  34. * @shift: cycle to nanosecond divisor (power of two)
  35. */
  36. struct cyclecounter {
  37. cycle_t (*read)(const struct cyclecounter *cc);
  38. cycle_t mask;
  39. u32 mult;
  40. u32 shift;
  41. };
  42. /**
  43. * struct timecounter - layer above a %struct cyclecounter which counts nanoseconds
  44. * Contains the state needed by timecounter_read() to detect
  45. * cycle counter wrap around. Initialize with
  46. * timecounter_init(). Also used to convert cycle counts into the
  47. * corresponding nanosecond counts with timecounter_cyc2time(). Users
  48. * of this code are responsible for initializing the underlying
  49. * cycle counter hardware, locking issues and reading the time
  50. * more often than the cycle counter wraps around. The nanosecond
  51. * counter will only wrap around after ~585 years.
  52. *
  53. * @cc: the cycle counter used by this instance
  54. * @cycle_last: most recent cycle counter value seen by
  55. * timecounter_read()
  56. * @nsec: continuously increasing count
  57. */
  58. struct timecounter {
  59. const struct cyclecounter *cc;
  60. cycle_t cycle_last;
  61. u64 nsec;
  62. };
  63. /**
  64. * cyclecounter_cyc2ns - converts cycle counter cycles to nanoseconds
  65. * @tc: Pointer to cycle counter.
  66. * @cycles: Cycles
  67. *
  68. * XXX - This could use some mult_lxl_ll() asm optimization. Same code
  69. * as in cyc2ns, but with unsigned result.
  70. */
  71. static inline u64 cyclecounter_cyc2ns(const struct cyclecounter *cc,
  72. cycle_t cycles)
  73. {
  74. u64 ret = (u64)cycles;
  75. ret = (ret * cc->mult) >> cc->shift;
  76. return ret;
  77. }
  78. /**
  79. * timecounter_init - initialize a time counter
  80. * @tc: Pointer to time counter which is to be initialized/reset
  81. * @cc: A cycle counter, ready to be used.
  82. * @start_tstamp: Arbitrary initial time stamp.
  83. *
  84. * After this call the current cycle register (roughly) corresponds to
  85. * the initial time stamp. Every call to timecounter_read() increments
  86. * the time stamp counter by the number of elapsed nanoseconds.
  87. */
  88. extern void timecounter_init(struct timecounter *tc,
  89. const struct cyclecounter *cc,
  90. u64 start_tstamp);
  91. /**
  92. * timecounter_read - return nanoseconds elapsed since timecounter_init()
  93. * plus the initial time stamp
  94. * @tc: Pointer to time counter.
  95. *
  96. * In other words, keeps track of time since the same epoch as
  97. * the function which generated the initial time stamp.
  98. */
  99. extern u64 timecounter_read(struct timecounter *tc);
  100. /**
  101. * timecounter_cyc2time - convert a cycle counter to same
  102. * time base as values returned by
  103. * timecounter_read()
  104. * @tc: Pointer to time counter.
  105. * @cycle: a value returned by tc->cc->read()
  106. *
  107. * Cycle counts that are converted correctly as long as they
  108. * fall into the interval [-1/2 max cycle count, +1/2 max cycle count],
  109. * with "max cycle count" == cs->mask+1.
  110. *
  111. * This allows conversion of cycle counter values which were generated
  112. * in the past.
  113. */
  114. extern u64 timecounter_cyc2time(struct timecounter *tc,
  115. cycle_t cycle_tstamp);
  116. /**
  117. * struct clocksource - hardware abstraction for a free running counter
  118. * Provides mostly state-free accessors to the underlying hardware.
  119. * This is the structure used for system time.
  120. *
  121. * @name: ptr to clocksource name
  122. * @list: list head for registration
  123. * @rating: rating value for selection (higher is better)
  124. * To avoid rating inflation the following
  125. * list should give you a guide as to how
  126. * to assign your clocksource a rating
  127. * 1-99: Unfit for real use
  128. * Only available for bootup and testing purposes.
  129. * 100-199: Base level usability.
  130. * Functional for real use, but not desired.
  131. * 200-299: Good.
  132. * A correct and usable clocksource.
  133. * 300-399: Desired.
  134. * A reasonably fast and accurate clocksource.
  135. * 400-499: Perfect
  136. * The ideal clocksource. A must-use where
  137. * available.
  138. * @read: returns a cycle value, passes clocksource as argument
  139. * @enable: optional function to enable the clocksource
  140. * @disable: optional function to disable the clocksource
  141. * @mask: bitmask for two's complement
  142. * subtraction of non 64 bit counters
  143. * @mult: cycle to nanosecond multiplier (adjusted by NTP)
  144. * @mult_orig: cycle to nanosecond multiplier (unadjusted by NTP)
  145. * @shift: cycle to nanosecond divisor (power of two)
  146. * @flags: flags describing special properties
  147. * @vread: vsyscall based read
  148. * @resume: resume function for the clocksource, if necessary
  149. * @cycle_interval: Used internally by timekeeping core, please ignore.
  150. * @xtime_interval: Used internally by timekeeping core, please ignore.
  151. */
  152. struct clocksource {
  153. /*
  154. * First part of structure is read mostly
  155. */
  156. char *name;
  157. struct list_head list;
  158. int rating;
  159. cycle_t (*read)(struct clocksource *cs);
  160. int (*enable)(struct clocksource *cs);
  161. void (*disable)(struct clocksource *cs);
  162. cycle_t mask;
  163. u32 mult;
  164. u32 mult_orig;
  165. u32 shift;
  166. unsigned long flags;
  167. cycle_t (*vread)(void);
  168. void (*resume)(void);
  169. #ifdef CONFIG_IA64
  170. void *fsys_mmio; /* used by fsyscall asm code */
  171. #define CLKSRC_FSYS_MMIO_SET(mmio, addr) ((mmio) = (addr))
  172. #else
  173. #define CLKSRC_FSYS_MMIO_SET(mmio, addr) do { } while (0)
  174. #endif
  175. /* timekeeping specific data, ignore */
  176. cycle_t cycle_interval;
  177. u64 xtime_interval;
  178. u32 raw_interval;
  179. /*
  180. * Second part is written at each timer interrupt
  181. * Keep it in a different cache line to dirty no
  182. * more than one cache line.
  183. */
  184. cycle_t cycle_last ____cacheline_aligned_in_smp;
  185. u64 xtime_nsec;
  186. s64 error;
  187. struct timespec raw_time;
  188. #ifdef CONFIG_CLOCKSOURCE_WATCHDOG
  189. /* Watchdog related data, used by the framework */
  190. struct list_head wd_list;
  191. cycle_t wd_last;
  192. #endif
  193. };
  194. extern struct clocksource *clock; /* current clocksource */
  195. /*
  196. * Clock source flags bits::
  197. */
  198. #define CLOCK_SOURCE_IS_CONTINUOUS 0x01
  199. #define CLOCK_SOURCE_MUST_VERIFY 0x02
  200. #define CLOCK_SOURCE_WATCHDOG 0x10
  201. #define CLOCK_SOURCE_VALID_FOR_HRES 0x20
  202. /* simplify initialization of mask field */
  203. #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
  204. /**
  205. * clocksource_khz2mult - calculates mult from khz and shift
  206. * @khz: Clocksource frequency in KHz
  207. * @shift_constant: Clocksource shift factor
  208. *
  209. * Helper functions that converts a khz counter frequency to a timsource
  210. * multiplier, given the clocksource shift value
  211. */
  212. static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant)
  213. {
  214. /* khz = cyc/(Million ns)
  215. * mult/2^shift = ns/cyc
  216. * mult = ns/cyc * 2^shift
  217. * mult = 1Million/khz * 2^shift
  218. * mult = 1000000 * 2^shift / khz
  219. * mult = (1000000<<shift) / khz
  220. */
  221. u64 tmp = ((u64)1000000) << shift_constant;
  222. tmp += khz/2; /* round for do_div */
  223. do_div(tmp, khz);
  224. return (u32)tmp;
  225. }
  226. /**
  227. * clocksource_hz2mult - calculates mult from hz and shift
  228. * @hz: Clocksource frequency in Hz
  229. * @shift_constant: Clocksource shift factor
  230. *
  231. * Helper functions that converts a hz counter
  232. * frequency to a timsource multiplier, given the
  233. * clocksource shift value
  234. */
  235. static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant)
  236. {
  237. /* hz = cyc/(Billion ns)
  238. * mult/2^shift = ns/cyc
  239. * mult = ns/cyc * 2^shift
  240. * mult = 1Billion/hz * 2^shift
  241. * mult = 1000000000 * 2^shift / hz
  242. * mult = (1000000000<<shift) / hz
  243. */
  244. u64 tmp = ((u64)1000000000) << shift_constant;
  245. tmp += hz/2; /* round for do_div */
  246. do_div(tmp, hz);
  247. return (u32)tmp;
  248. }
  249. /**
  250. * cyc2ns - converts clocksource cycles to nanoseconds
  251. * @cs: Pointer to clocksource
  252. * @cycles: Cycles
  253. *
  254. * Uses the clocksource and ntp ajdustment to convert cycle_ts to nanoseconds.
  255. *
  256. * XXX - This could use some mult_lxl_ll() asm optimization
  257. */
  258. static inline s64 cyc2ns(struct clocksource *cs, cycle_t cycles)
  259. {
  260. u64 ret = (u64)cycles;
  261. ret = (ret * cs->mult) >> cs->shift;
  262. return ret;
  263. }
  264. /**
  265. * clocksource_calculate_interval - Calculates a clocksource interval struct
  266. *
  267. * @c: Pointer to clocksource.
  268. * @length_nsec: Desired interval length in nanoseconds.
  269. *
  270. * Calculates a fixed cycle/nsec interval for a given clocksource/adjustment
  271. * pair and interval request.
  272. *
  273. * Unless you're the timekeeping code, you should not be using this!
  274. */
  275. static inline void clocksource_calculate_interval(struct clocksource *c,
  276. unsigned long length_nsec)
  277. {
  278. u64 tmp;
  279. /* Do the ns -> cycle conversion first, using original mult */
  280. tmp = length_nsec;
  281. tmp <<= c->shift;
  282. tmp += c->mult_orig/2;
  283. do_div(tmp, c->mult_orig);
  284. c->cycle_interval = (cycle_t)tmp;
  285. if (c->cycle_interval == 0)
  286. c->cycle_interval = 1;
  287. /* Go back from cycles -> shifted ns, this time use ntp adjused mult */
  288. c->xtime_interval = (u64)c->cycle_interval * c->mult;
  289. c->raw_interval = ((u64)c->cycle_interval * c->mult_orig) >> c->shift;
  290. }
  291. /* used to install a new clocksource */
  292. extern int clocksource_register(struct clocksource*);
  293. extern void clocksource_unregister(struct clocksource*);
  294. extern void clocksource_touch_watchdog(void);
  295. extern struct clocksource* clocksource_get_next(void);
  296. extern void clocksource_change_rating(struct clocksource *cs, int rating);
  297. extern void clocksource_resume(void);
  298. extern struct clocksource * __init __weak clocksource_default_clock(void);
  299. #ifdef CONFIG_GENERIC_TIME_VSYSCALL
  300. extern void update_vsyscall(struct timespec *ts, struct clocksource *c);
  301. extern void update_vsyscall_tz(void);
  302. #else
  303. static inline void update_vsyscall(struct timespec *ts, struct clocksource *c)
  304. {
  305. }
  306. static inline void update_vsyscall_tz(void)
  307. {
  308. }
  309. #endif
  310. #endif /* _LINUX_CLOCKSOURCE_H */