request.h 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162
  1. #ifndef _BCACHE_REQUEST_H_
  2. #define _BCACHE_REQUEST_H_
  3. #include <linux/cgroup.h>
  4. struct search {
  5. /* Stack frame for bio_complete */
  6. struct closure cl;
  7. struct bcache_device *d;
  8. struct task_struct *task;
  9. struct bbio bio;
  10. struct bio *orig_bio;
  11. struct bio *cache_miss;
  12. unsigned cache_bio_sectors;
  13. unsigned recoverable:1;
  14. unsigned unaligned_bvec:1;
  15. unsigned write:1;
  16. unsigned writeback:1;
  17. /* IO error returned to s->bio */
  18. short error;
  19. unsigned long start_time;
  20. /* Anything past op->keys won't get zeroed in do_bio_hook */
  21. struct btree_op op;
  22. };
  23. void bch_cache_read_endio(struct bio *, int);
  24. int bch_get_congested(struct cache_set *);
  25. void bch_insert_data(struct closure *cl);
  26. void bch_btree_insert_async(struct closure *);
  27. void bch_cache_read_endio(struct bio *, int);
  28. void bch_open_buckets_free(struct cache_set *);
  29. int bch_open_buckets_alloc(struct cache_set *);
  30. void bch_cached_dev_request_init(struct cached_dev *dc);
  31. void bch_flash_dev_request_init(struct bcache_device *d);
  32. extern struct kmem_cache *bch_search_cache, *bch_passthrough_cache;
  33. struct bch_cgroup {
  34. #ifdef CONFIG_CGROUP_BCACHE
  35. struct cgroup_subsys_state css;
  36. #endif
  37. /*
  38. * We subtract one from the index into bch_cache_modes[], so that
  39. * default == -1; this makes it so the rest match up with d->cache_mode,
  40. * and we use d->cache_mode if cgrp->cache_mode < 0
  41. */
  42. short cache_mode;
  43. bool verify;
  44. struct cache_stat_collector stats;
  45. };
  46. struct bch_cgroup *bch_bio_to_cgroup(struct bio *bio);
  47. #endif /* _BCACHE_REQUEST_H_ */