timex.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. /*
  2. * include/asm-s390/timex.h
  3. *
  4. * S390 version
  5. * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
  6. *
  7. * Derived from "include/asm-i386/timex.h"
  8. * Copyright (C) 1992, Linus Torvalds
  9. */
  10. #ifndef _ASM_S390_TIMEX_H
  11. #define _ASM_S390_TIMEX_H
  12. #include <asm/lowcore.h>
  13. /* The value of the TOD clock for 1.1.1970. */
  14. #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
  15. /* Inline functions for clock register access. */
  16. static inline int set_clock(__u64 time)
  17. {
  18. int cc;
  19. asm volatile(
  20. " sck %1\n"
  21. " ipm %0\n"
  22. " srl %0,28\n"
  23. : "=d" (cc) : "Q" (time) : "cc");
  24. return cc;
  25. }
  26. static inline int store_clock(__u64 *time)
  27. {
  28. int cc;
  29. asm volatile(
  30. " stck %1\n"
  31. " ipm %0\n"
  32. " srl %0,28\n"
  33. : "=d" (cc), "=Q" (*time) : : "cc");
  34. return cc;
  35. }
  36. static inline void set_clock_comparator(__u64 time)
  37. {
  38. asm volatile("sckc %0" : : "Q" (time));
  39. }
  40. static inline void store_clock_comparator(__u64 *time)
  41. {
  42. asm volatile("stckc %0" : "=Q" (*time));
  43. }
  44. void clock_comparator_work(void);
  45. static inline unsigned long long local_tick_disable(void)
  46. {
  47. unsigned long long old;
  48. old = S390_lowcore.clock_comparator;
  49. S390_lowcore.clock_comparator = -1ULL;
  50. set_clock_comparator(S390_lowcore.clock_comparator);
  51. return old;
  52. }
  53. static inline void local_tick_enable(unsigned long long comp)
  54. {
  55. S390_lowcore.clock_comparator = comp;
  56. set_clock_comparator(S390_lowcore.clock_comparator);
  57. }
  58. #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
  59. typedef unsigned long long cycles_t;
  60. static inline unsigned long long get_clock (void)
  61. {
  62. unsigned long long clk;
  63. asm volatile("stck %0" : "=Q" (clk) : : "cc");
  64. return clk;
  65. }
  66. static inline void get_clock_ext(char *clk)
  67. {
  68. asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
  69. }
  70. static inline unsigned long long get_clock_xt(void)
  71. {
  72. unsigned char clk[16];
  73. get_clock_ext(clk);
  74. return *((unsigned long long *)&clk[1]);
  75. }
  76. static inline cycles_t get_cycles(void)
  77. {
  78. return (cycles_t) get_clock() >> 2;
  79. }
  80. int get_sync_clock(unsigned long long *clock);
  81. void init_cpu_timer(void);
  82. unsigned long long monotonic_clock(void);
  83. void tod_to_timeval(__u64, struct timespec *);
  84. static inline
  85. void stck_to_timespec(unsigned long long stck, struct timespec *ts)
  86. {
  87. tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
  88. }
  89. extern u64 sched_clock_base_cc;
  90. /**
  91. * get_clock_monotonic - returns current time in clock rate units
  92. *
  93. * The caller must ensure that preemption is disabled.
  94. * The clock and sched_clock_base get changed via stop_machine.
  95. * Therefore preemption must be disabled when calling this
  96. * function, otherwise the returned value is not guaranteed to
  97. * be monotonic.
  98. */
  99. static inline unsigned long long get_clock_monotonic(void)
  100. {
  101. return get_clock_xt() - sched_clock_base_cc;
  102. }
  103. #endif