request.h 1.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061
  1. #ifndef _BCACHE_REQUEST_H_
  2. #define _BCACHE_REQUEST_H_
  3. #include <linux/cgroup.h>
  4. struct search {
  5. /* Stack frame for bio_complete */
  6. struct closure cl;
  7. struct bcache_device *d;
  8. struct task_struct *task;
  9. struct bbio bio;
  10. struct bio *orig_bio;
  11. struct bio *cache_miss;
  12. unsigned cache_bio_sectors;
  13. unsigned recoverable:1;
  14. unsigned unaligned_bvec:1;
  15. unsigned write:1;
  16. unsigned writeback:1;
  17. /* IO error returned to s->bio */
  18. short error;
  19. unsigned long start_time;
  20. struct btree_op op;
  21. /* Anything past this point won't get zeroed in search_alloc() */
  22. struct keylist insert_keys;
  23. };
  24. unsigned bch_get_congested(struct cache_set *);
  25. void bch_data_insert(struct closure *cl);
  26. void bch_open_buckets_free(struct cache_set *);
  27. int bch_open_buckets_alloc(struct cache_set *);
  28. void bch_cached_dev_request_init(struct cached_dev *dc);
  29. void bch_flash_dev_request_init(struct bcache_device *d);
  30. extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
  31. struct bch_cgroup {
  32. #ifdef CONFIG_CGROUP_BCACHE
  33. struct cgroup_subsys_state css;
  34. #endif
  35. /*
  36. * We subtract one from the index into bch_cache_modes[], so that
  37. * default == -1; this makes it so the rest match up with d->cache_mode,
  38. * and we use d->cache_mode if cgrp->cache_mode < 0
  39. */
  40. short cache_mode;
  41. bool verify;
  42. struct cache_stat_collector stats;
  43. };
  44. struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
  45. #endif /* _BCACHE_REQUEST_H_ */