timex.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. /*
  2. * S390 version
  3. * Copyright IBM Corp. 1999
  4. *
  5. * Derived from "include/asm-i386/timex.h"
  6. * Copyright (C) 1992, Linus Torvalds
  7. */
  8. #ifndef _ASM_S390_TIMEX_H
  9. #define _ASM_S390_TIMEX_H
  10. #include <asm/lowcore.h>
  11. /* The value of the TOD clock for 1.1.1970. */
  12. #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
  13. /* Inline functions for clock register access. */
  14. static inline int set_clock(__u64 time)
  15. {
  16. int cc;
  17. asm volatile(
  18. " sck %1\n"
  19. " ipm %0\n"
  20. " srl %0,28\n"
  21. : "=d" (cc) : "Q" (time) : "cc");
  22. return cc;
  23. }
  24. static inline int store_clock(__u64 *time)
  25. {
  26. int cc;
  27. asm volatile(
  28. " stck %1\n"
  29. " ipm %0\n"
  30. " srl %0,28\n"
  31. : "=d" (cc), "=Q" (*time) : : "cc");
  32. return cc;
  33. }
  34. static inline void set_clock_comparator(__u64 time)
  35. {
  36. asm volatile("sckc %0" : : "Q" (time));
  37. }
  38. static inline void store_clock_comparator(__u64 *time)
  39. {
  40. asm volatile("stckc %0" : "=Q" (*time));
  41. }
  42. void clock_comparator_work(void);
  43. static inline unsigned long long local_tick_disable(void)
  44. {
  45. unsigned long long old;
  46. old = S390_lowcore.clock_comparator;
  47. S390_lowcore.clock_comparator = -1ULL;
  48. set_clock_comparator(S390_lowcore.clock_comparator);
  49. return old;
  50. }
  51. static inline void local_tick_enable(unsigned long long comp)
  52. {
  53. S390_lowcore.clock_comparator = comp;
  54. set_clock_comparator(S390_lowcore.clock_comparator);
  55. }
  56. #define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
  57. typedef unsigned long long cycles_t;
  58. static inline unsigned long long get_clock(void)
  59. {
  60. unsigned long long clk;
  61. #ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
  62. asm volatile(".insn s,0xb27c0000,%0" : "=Q" (clk) : : "cc");
  63. #else
  64. asm volatile("stck %0" : "=Q" (clk) : : "cc");
  65. #endif
  66. return clk;
  67. }
  68. static inline void get_clock_ext(char *clk)
  69. {
  70. asm volatile("stcke %0" : "=Q" (*clk) : : "cc");
  71. }
  72. static inline unsigned long long get_clock_xt(void)
  73. {
  74. unsigned char clk[16];
  75. get_clock_ext(clk);
  76. return *((unsigned long long *)&clk[1]);
  77. }
  78. static inline cycles_t get_cycles(void)
  79. {
  80. return (cycles_t) get_clock() >> 2;
  81. }
  82. int get_sync_clock(unsigned long long *clock);
  83. void init_cpu_timer(void);
  84. unsigned long long monotonic_clock(void);
  85. void tod_to_timeval(__u64, struct timespec *);
  86. static inline
  87. void stck_to_timespec(unsigned long long stck, struct timespec *ts)
  88. {
  89. tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
  90. }
  91. extern u64 sched_clock_base_cc;
  92. /**
  93. * get_clock_monotonic - returns current time in clock rate units
  94. *
  95. * The caller must ensure that preemption is disabled.
  96. * The clock and sched_clock_base get changed via stop_machine.
  97. * Therefore preemption must be disabled when calling this
  98. * function, otherwise the returned value is not guaranteed to
  99. * be monotonic.
  100. */
  101. static inline unsigned long long get_clock_monotonic(void)
  102. {
  103. return get_clock_xt() - sched_clock_base_cc;
  104. }
  105. #endif