proportions.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * FLoating proportions
  3. *
  4. * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
  5. *
  6. * This file contains the public data structure and API definitions.
  7. */
  8. #ifndef _LINUX_PROPORTIONS_H
  9. #define _LINUX_PROPORTIONS_H
  10. #include <linux/percpu_counter.h>
  11. #include <linux/spinlock.h>
  12. #include <linux/mutex.h>
  13. struct prop_global {
  14. /*
  15. * The period over which we differentiate
  16. *
  17. * period = 2^shift
  18. */
  19. int shift;
  20. /*
  21. * The total event counter aka 'time'.
  22. *
  23. * Treated as an unsigned long; the lower 'shift - 1' bits are the
  24. * counter bits, the remaining upper bits the period counter.
  25. */
  26. struct percpu_counter events;
  27. };
  28. /*
  29. * global proportion descriptor
  30. *
  31. * this is needed to consitently flip prop_global structures.
  32. */
  33. struct prop_descriptor {
  34. int index;
  35. struct prop_global pg[2];
  36. struct mutex mutex; /* serialize the prop_global switch */
  37. };
  38. int prop_descriptor_init(struct prop_descriptor *pd, int shift);
  39. void prop_change_shift(struct prop_descriptor *pd, int new_shift);
  40. /*
  41. * ----- PERCPU ------
  42. */
  43. struct prop_local_percpu {
  44. /*
  45. * the local events counter
  46. */
  47. struct percpu_counter events;
  48. /*
  49. * snapshot of the last seen global state
  50. */
  51. int shift;
  52. unsigned long period;
  53. spinlock_t lock; /* protect the snapshot state */
  54. };
  55. int prop_local_init_percpu(struct prop_local_percpu *pl);
  56. void prop_local_destroy_percpu(struct prop_local_percpu *pl);
  57. void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
  58. void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
  59. long *numerator, long *denominator);
  60. static inline
  61. void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
  62. {
  63. unsigned long flags;
  64. local_irq_save(flags);
  65. __prop_inc_percpu(pd, pl);
  66. local_irq_restore(flags);
  67. }
  68. /*
  69. * ----- SINGLE ------
  70. */
  71. struct prop_local_single {
  72. /*
  73. * the local events counter
  74. */
  75. unsigned long events;
  76. /*
  77. * snapshot of the last seen global state
  78. * and a lock protecting this state
  79. */
  80. int shift;
  81. unsigned long period;
  82. spinlock_t lock; /* protect the snapshot state */
  83. };
  84. #define INIT_PROP_LOCAL_SINGLE(name) \
  85. { .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  86. }
  87. int prop_local_init_single(struct prop_local_single *pl);
  88. void prop_local_destroy_single(struct prop_local_single *pl);
  89. void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
  90. void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
  91. long *numerator, long *denominator);
  92. static inline
  93. void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
  94. {
  95. unsigned long flags;
  96. local_irq_save(flags);
  97. __prop_inc_single(pd, pl);
  98. local_irq_restore(flags);
  99. }
  100. #endif /* _LINUX_PROPORTIONS_H */