writeback.h 1.5 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364
  1. #ifndef _BCACHE_WRITEBACK_H
  2. #define _BCACHE_WRITEBACK_H
  3. #define CUTOFF_WRITEBACK 40
  4. #define CUTOFF_WRITEBACK_SYNC 70
  5. static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
  6. {
  7. uint64_t i, ret = 0;
  8. for (i = 0; i < d->nr_stripes; i++)
  9. ret += atomic_read(d->stripe_sectors_dirty + i);
  10. return ret;
  11. }
  12. static inline bool bcache_dev_stripe_dirty(struct bcache_device *d,
  13. uint64_t offset,
  14. unsigned nr_sectors)
  15. {
  16. uint64_t stripe = offset >> d->stripe_size_bits;
  17. while (1) {
  18. if (atomic_read(d->stripe_sectors_dirty + stripe))
  19. return true;
  20. if (nr_sectors <= 1 << d->stripe_size_bits)
  21. return false;
  22. nr_sectors -= 1 << d->stripe_size_bits;
  23. stripe++;
  24. }
  25. }
  26. static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
  27. unsigned cache_mode, bool would_skip)
  28. {
  29. unsigned in_use = dc->disk.c->gc_stats.in_use;
  30. if (cache_mode != CACHE_MODE_WRITEBACK ||
  31. atomic_read(&dc->disk.detaching) ||
  32. in_use > CUTOFF_WRITEBACK_SYNC)
  33. return false;
  34. if (dc->partial_stripes_expensive &&
  35. bcache_dev_stripe_dirty(&dc->disk, bio->bi_sector,
  36. bio_sectors(bio)))
  37. return true;
  38. if (would_skip)
  39. return false;
  40. return bio->bi_rw & REQ_SYNC ||
  41. in_use <= CUTOFF_WRITEBACK;
  42. }
  43. void bcache_dev_sectors_dirty_add(struct cache_set *, unsigned, uint64_t, int);
  44. void bch_writeback_queue(struct cached_dev *);
  45. void bch_writeback_add(struct cached_dev *);
  46. void bch_sectors_dirty_init(struct cached_dev *dc);
  47. void bch_cached_dev_writeback_init(struct cached_dev *);
  48. #endif