stats.c 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246
  1. /*
  2. * bcache stats code
  3. *
  4. * Copyright 2012 Google, Inc.
  5. */
  6. #include "bcache.h"
  7. #include "stats.h"
  8. #include "btree.h"
  9. #include "request.h"
  10. #include "sysfs.h"
  11. /*
  12. * We keep absolute totals of various statistics, and addionally a set of three
  13. * rolling averages.
  14. *
  15. * Every so often, a timer goes off and rescales the rolling averages.
  16. * accounting_rescale[] is how many times the timer has to go off before we
  17. * rescale each set of numbers; that gets us half lives of 5 minutes, one hour,
  18. * and one day.
  19. *
  20. * accounting_delay is how often the timer goes off - 22 times in 5 minutes,
  21. * and accounting_weight is what we use to rescale:
  22. *
  23. * pow(31 / 32, 22) ~= 1/2
  24. *
  25. * So that we don't have to increment each set of numbers every time we (say)
  26. * get a cache hit, we increment a single atomic_t in acc->collector, and when
  27. * the rescale function runs it resets the atomic counter to 0 and adds its
  28. * old value to each of the exported numbers.
  29. *
  30. * To reduce rounding error, the numbers in struct cache_stats are all
  31. * stored left shifted by 16, and scaled back in the sysfs show() function.
  32. */
  33. static const unsigned DAY_RESCALE = 288;
  34. static const unsigned HOUR_RESCALE = 12;
  35. static const unsigned FIVE_MINUTE_RESCALE = 1;
  36. static const unsigned accounting_delay = (HZ * 300) / 22;
  37. static const unsigned accounting_weight = 32;
  38. /* sysfs reading/writing */
  39. read_attribute(cache_hits);
  40. read_attribute(cache_misses);
  41. read_attribute(cache_bypass_hits);
  42. read_attribute(cache_bypass_misses);
  43. read_attribute(cache_hit_ratio);
  44. read_attribute(cache_readaheads);
  45. read_attribute(cache_miss_collisions);
  46. read_attribute(bypassed);
  47. SHOW(bch_stats)
  48. {
  49. struct cache_stats *s =
  50. container_of(kobj, struct cache_stats, kobj);
  51. #define var(stat) (s->stat >> 16)
  52. var_print(cache_hits);
  53. var_print(cache_misses);
  54. var_print(cache_bypass_hits);
  55. var_print(cache_bypass_misses);
  56. sysfs_print(cache_hit_ratio,
  57. DIV_SAFE(var(cache_hits) * 100,
  58. var(cache_hits) + var(cache_misses)));
  59. var_print(cache_readaheads);
  60. var_print(cache_miss_collisions);
  61. sysfs_hprint(bypassed, var(sectors_bypassed) << 9);
  62. #undef var
  63. return 0;
  64. }
  65. STORE(bch_stats)
  66. {
  67. return size;
  68. }
  69. static void bch_stats_release(struct kobject *k)
  70. {
  71. }
  72. static struct attribute *bch_stats_files[] = {
  73. &sysfs_cache_hits,
  74. &sysfs_cache_misses,
  75. &sysfs_cache_bypass_hits,
  76. &sysfs_cache_bypass_misses,
  77. &sysfs_cache_hit_ratio,
  78. &sysfs_cache_readaheads,
  79. &sysfs_cache_miss_collisions,
  80. &sysfs_bypassed,
  81. NULL
  82. };
  83. static KTYPE(bch_stats);
  84. static void scale_accounting(unsigned long data);
  85. void bch_cache_accounting_init(struct cache_accounting *acc,
  86. struct closure *parent)
  87. {
  88. kobject_init(&acc->total.kobj, &bch_stats_ktype);
  89. kobject_init(&acc->five_minute.kobj, &bch_stats_ktype);
  90. kobject_init(&acc->hour.kobj, &bch_stats_ktype);
  91. kobject_init(&acc->day.kobj, &bch_stats_ktype);
  92. closure_init(&acc->cl, parent);
  93. init_timer(&acc->timer);
  94. acc->timer.expires = jiffies + accounting_delay;
  95. acc->timer.data = (unsigned long) acc;
  96. acc->timer.function = scale_accounting;
  97. add_timer(&acc->timer);
  98. }
  99. int bch_cache_accounting_add_kobjs(struct cache_accounting *acc,
  100. struct kobject *parent)
  101. {
  102. int ret = kobject_add(&acc->total.kobj, parent,
  103. "stats_total");
  104. ret = ret ?: kobject_add(&acc->five_minute.kobj, parent,
  105. "stats_five_minute");
  106. ret = ret ?: kobject_add(&acc->hour.kobj, parent,
  107. "stats_hour");
  108. ret = ret ?: kobject_add(&acc->day.kobj, parent,
  109. "stats_day");
  110. return ret;
  111. }
  112. void bch_cache_accounting_clear(struct cache_accounting *acc)
  113. {
  114. memset(&acc->total.cache_hits,
  115. 0,
  116. sizeof(unsigned long) * 7);
  117. }
  118. void bch_cache_accounting_destroy(struct cache_accounting *acc)
  119. {
  120. kobject_put(&acc->total.kobj);
  121. kobject_put(&acc->five_minute.kobj);
  122. kobject_put(&acc->hour.kobj);
  123. kobject_put(&acc->day.kobj);
  124. atomic_set(&acc->closing, 1);
  125. if (del_timer_sync(&acc->timer))
  126. closure_return(&acc->cl);
  127. }
  128. /* EWMA scaling */
  129. static void scale_stat(unsigned long *stat)
  130. {
  131. *stat = ewma_add(*stat, 0, accounting_weight, 0);
  132. }
  133. static void scale_stats(struct cache_stats *stats, unsigned long rescale_at)
  134. {
  135. if (++stats->rescale == rescale_at) {
  136. stats->rescale = 0;
  137. scale_stat(&stats->cache_hits);
  138. scale_stat(&stats->cache_misses);
  139. scale_stat(&stats->cache_bypass_hits);
  140. scale_stat(&stats->cache_bypass_misses);
  141. scale_stat(&stats->cache_readaheads);
  142. scale_stat(&stats->cache_miss_collisions);
  143. scale_stat(&stats->sectors_bypassed);
  144. }
  145. }
  146. static void scale_accounting(unsigned long data)
  147. {
  148. struct cache_accounting *acc = (struct cache_accounting *) data;
  149. #define move_stat(name) do { \
  150. unsigned t = atomic_xchg(&acc->collector.name, 0); \
  151. t <<= 16; \
  152. acc->five_minute.name += t; \
  153. acc->hour.name += t; \
  154. acc->day.name += t; \
  155. acc->total.name += t; \
  156. } while (0)
  157. move_stat(cache_hits);
  158. move_stat(cache_misses);
  159. move_stat(cache_bypass_hits);
  160. move_stat(cache_bypass_misses);
  161. move_stat(cache_readaheads);
  162. move_stat(cache_miss_collisions);
  163. move_stat(sectors_bypassed);
  164. scale_stats(&acc->total, 0);
  165. scale_stats(&acc->day, DAY_RESCALE);
  166. scale_stats(&acc->hour, HOUR_RESCALE);
  167. scale_stats(&acc->five_minute, FIVE_MINUTE_RESCALE);
  168. acc->timer.expires += accounting_delay;
  169. if (!atomic_read(&acc->closing))
  170. add_timer(&acc->timer);
  171. else
  172. closure_return(&acc->cl);
  173. }
  174. static void mark_cache_stats(struct cache_stat_collector *stats,
  175. bool hit, bool bypass)
  176. {
  177. if (!bypass)
  178. if (hit)
  179. atomic_inc(&stats->cache_hits);
  180. else
  181. atomic_inc(&stats->cache_misses);
  182. else
  183. if (hit)
  184. atomic_inc(&stats->cache_bypass_hits);
  185. else
  186. atomic_inc(&stats->cache_bypass_misses);
  187. }
  188. void bch_mark_cache_accounting(struct search *s, bool hit, bool bypass)
  189. {
  190. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  191. mark_cache_stats(&dc->accounting.collector, hit, bypass);
  192. mark_cache_stats(&s->op.c->accounting.collector, hit, bypass);
  193. #ifdef CONFIG_CGROUP_BCACHE
  194. mark_cache_stats(&(bch_bio_to_cgroup(s->orig_bio)->stats), hit, bypass);
  195. #endif
  196. }
  197. void bch_mark_cache_readahead(struct search *s)
  198. {
  199. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  200. atomic_inc(&dc->accounting.collector.cache_readaheads);
  201. atomic_inc(&s->op.c->accounting.collector.cache_readaheads);
  202. }
  203. void bch_mark_cache_miss_collision(struct search *s)
  204. {
  205. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  206. atomic_inc(&dc->accounting.collector.cache_miss_collisions);
  207. atomic_inc(&s->op.c->accounting.collector.cache_miss_collisions);
  208. }
  209. void bch_mark_sectors_bypassed(struct search *s, int sectors)
  210. {
  211. struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
  212. atomic_add(sectors, &dc->accounting.collector.sectors_bypassed);
  213. atomic_add(sectors, &s->op.c->accounting.collector.sectors_bypassed);
  214. }