sched_clock.h 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. /*
  2. * sched_clock.h: support for extending counters to full 64-bit ns counter
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef ASM_SCHED_CLOCK
  9. #define ASM_SCHED_CLOCK
  10. #include <linux/kernel.h>
  11. #include <linux/types.h>
  12. struct clock_data {
  13. u64 epoch_ns;
  14. u32 epoch_cyc;
  15. u32 epoch_cyc_copy;
  16. u32 mult;
  17. u32 shift;
  18. };
  19. #define DEFINE_CLOCK_DATA(name) struct clock_data name
  20. static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
  21. {
  22. return (cyc * mult) >> shift;
  23. }
  24. /*
  25. * Atomically update the sched_clock epoch. Your update callback will
  26. * be called from a timer before the counter wraps - read the current
  27. * counter value, and call this function to safely move the epochs
  28. * forward. Only use this from the update callback.
  29. */
  30. static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
  31. {
  32. unsigned long flags;
  33. u64 ns = cd->epoch_ns +
  34. cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
  35. /*
  36. * Write epoch_cyc and epoch_ns in a way that the update is
  37. * detectable in cyc_to_fixed_sched_clock().
  38. */
  39. raw_local_irq_save(flags);
  40. cd->epoch_cyc = cyc;
  41. smp_wmb();
  42. cd->epoch_ns = ns;
  43. smp_wmb();
  44. cd->epoch_cyc_copy = cyc;
  45. raw_local_irq_restore(flags);
  46. }
  47. /*
  48. * If your clock rate is known at compile time, using this will allow
  49. * you to optimize the mult/shift loads away. This is paired with
  50. * init_fixed_sched_clock() to ensure that your mult/shift are correct.
  51. */
  52. static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
  53. u32 cyc, u32 mask, u32 mult, u32 shift)
  54. {
  55. u64 epoch_ns;
  56. u32 epoch_cyc;
  57. /*
  58. * Load the epoch_cyc and epoch_ns atomically. We do this by
  59. * ensuring that we always write epoch_cyc, epoch_ns and
  60. * epoch_cyc_copy in strict order, and read them in strict order.
  61. * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
  62. * the middle of an update, and we should repeat the load.
  63. */
  64. do {
  65. epoch_cyc = cd->epoch_cyc;
  66. smp_rmb();
  67. epoch_ns = cd->epoch_ns;
  68. smp_rmb();
  69. } while (epoch_cyc != cd->epoch_cyc_copy);
  70. return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
  71. }
  72. /*
  73. * Otherwise, you need to use this, which will obtain the mult/shift
  74. * from the clock_data structure. Use init_sched_clock() with this.
  75. */
  76. static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
  77. u32 cyc, u32 mask)
  78. {
  79. return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
  80. }
  81. /*
  82. * Initialize the clock data - calculate the appropriate multiplier
  83. * and shift. Also setup a timer to ensure that the epoch is refreshed
  84. * at the appropriate time interval, which will call your update
  85. * handler.
  86. */
  87. void init_sched_clock(struct clock_data *, void (*)(void),
  88. unsigned int, unsigned long);
  89. /*
  90. * Use this initialization function rather than init_sched_clock() if
  91. * you're using cyc_to_fixed_sched_clock, which will warn if your
  92. * constants are incorrect.
  93. */
  94. static inline void init_fixed_sched_clock(struct clock_data *cd,
  95. void (*update)(void), unsigned int bits, unsigned long rate,
  96. u32 mult, u32 shift)
  97. {
  98. init_sched_clock(cd, update, bits, rate);
  99. if (cd->mult != mult || cd->shift != shift) {
  100. pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
  101. "sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
  102. mult, shift, cd->mult, cd->shift);
  103. }
  104. }
  105. extern void sched_clock_postinit(void);
  106. #endif