request.h 1.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263
  1. #ifndef _BCACHE_REQUEST_H_
  2. #define _BCACHE_REQUEST_H_
  3. #include <linux/cgroup.h>
  4. struct search {
  5. /* Stack frame for bio_complete */
  6. struct closure cl;
  7. struct bcache_device *d;
  8. struct task_struct *task;
  9. struct bbio bio;
  10. struct bio *orig_bio;
  11. struct bio *cache_miss;
  12. unsigned cache_bio_sectors;
  13. unsigned recoverable:1;
  14. unsigned unaligned_bvec:1;
  15. unsigned write:1;
  16. unsigned writeback:1;
  17. /* IO error returned to s->bio */
  18. short error;
  19. unsigned long start_time;
  20. struct btree_op op;
  21. /* Anything past this point won't get zeroed in search_alloc() */
  22. struct keylist insert_keys;
  23. };
  24. void bch_cache_read_endio(struct bio *, int);
  25. unsigned bch_get_congested(struct cache_set *);
  26. void bch_data_insert(struct closure *cl);
  27. void bch_cache_read_endio(struct bio *, int);
  28. void bch_open_buckets_free(struct cache_set *);
  29. int bch_open_buckets_alloc(struct cache_set *);
  30. void bch_cached_dev_request_init(struct cached_dev *dc);
  31. void bch_flash_dev_request_init(struct bcache_device *d);
  32. extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
  33. struct bch_cgroup {
  34. #ifdef CONFIG_CGROUP_BCACHE
  35. struct cgroup_subsys_state css;
  36. #endif
  37. /*
  38. * We subtract one from the index into bch_cache_modes[], so that
  39. * default == -1; this makes it so the rest match up with d->cache_mode,
  40. * and we use d->cache_mode if cgrp->cache_mode < 0
  41. */
  42. short cache_mode;
  43. bool verify;
  44. struct cache_stat_collector stats;
  45. };
  46. struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
  47. #endif /* _BCACHE_REQUEST_H_ */