blk.h 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. #ifndef BLK_INTERNAL_H
  2. #define BLK_INTERNAL_H
  3. #include <linux/idr.h>
  4. /* Amount of time in which a process may batch requests */
  5. #define BLK_BATCH_TIME (HZ/50UL)
  6. /* Number of requests a "batching" process may submit */
  7. #define BLK_BATCH_REQ 32
  8. extern struct kmem_cache *blk_requestq_cachep;
  9. extern struct kobj_type blk_queue_ktype;
  10. extern struct ida blk_queue_ida;
  11. static inline void __blk_get_queue(struct request_queue *q)
  12. {
  13. kobject_get(&q->kobj);
  14. }
  15. int blk_init_rl(struct request_list *rl, struct request_queue *q,
  16. gfp_t gfp_mask);
  17. void blk_exit_rl(struct request_list *rl);
  18. void init_request_from_bio(struct request *req, struct bio *bio);
  19. void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
  20. struct bio *bio);
  21. int blk_rq_append_bio(struct request_queue *q, struct request *rq,
  22. struct bio *bio);
  23. void blk_queue_bypass_start(struct request_queue *q);
  24. void blk_queue_bypass_end(struct request_queue *q);
  25. void blk_dequeue_request(struct request *rq);
  26. void __blk_queue_free_tags(struct request_queue *q);
  27. bool __blk_end_bidi_request(struct request *rq, int error,
  28. unsigned int nr_bytes, unsigned int bidi_bytes);
  29. void blk_rq_timed_out_timer(unsigned long data);
  30. void blk_delete_timer(struct request *);
  31. void blk_add_timer(struct request *);
  32. void __generic_unplug_device(struct request_queue *);
  33. /*
  34. * Internal atomic flags for request handling
  35. */
  36. enum rq_atomic_flags {
  37. REQ_ATOM_COMPLETE = 0,
  38. };
  39. /*
  40. * EH timer and IO completion will both attempt to 'grab' the request, make
  41. * sure that only one of them succeeds
  42. */
  43. static inline int blk_mark_rq_complete(struct request *rq)
  44. {
  45. return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  46. }
  47. static inline void blk_clear_rq_complete(struct request *rq)
  48. {
  49. clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags);
  50. }
  51. /*
  52. * Internal elevator interface
  53. */
  54. #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash))
  55. void blk_insert_flush(struct request *rq);
  56. void blk_abort_flushes(struct request_queue *q);
  57. static inline struct request *__elv_next_request(struct request_queue *q)
  58. {
  59. struct request *rq;
  60. while (1) {
  61. if (!list_empty(&q->queue_head)) {
  62. rq = list_entry_rq(q->queue_head.next);
  63. return rq;
  64. }
  65. /*
  66. * Flush request is running and flush request isn't queueable
  67. * in the drive, we can hold the queue till flush request is
  68. * finished. Even we don't do this, driver can't dispatch next
  69. * requests and will requeue them. And this can improve
  70. * throughput too. For example, we have request flush1, write1,
  71. * flush 2. flush1 is dispatched, then queue is hold, write1
  72. * isn't inserted to queue. After flush1 is finished, flush2
  73. * will be dispatched. Since disk cache is already clean,
  74. * flush2 will be finished very soon, so looks like flush2 is
  75. * folded to flush1.
  76. * Since the queue is hold, a flag is set to indicate the queue
  77. * should be restarted later. Please see flush_end_io() for
  78. * details.
  79. */
  80. if (q->flush_pending_idx != q->flush_running_idx &&
  81. !queue_flush_queueable(q)) {
  82. q->flush_queue_delayed = 1;
  83. return NULL;
  84. }
  85. if (unlikely(blk_queue_dead(q)) ||
  86. !q->elevator->type->ops.elevator_dispatch_fn(q, 0))
  87. return NULL;
  88. }
  89. }
  90. static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
  91. {
  92. struct elevator_queue *e = q->elevator;
  93. if (e->type->ops.elevator_activate_req_fn)
  94. e->type->ops.elevator_activate_req_fn(q, rq);
  95. }
  96. static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
  97. {
  98. struct elevator_queue *e = q->elevator;
  99. if (e->type->ops.elevator_deactivate_req_fn)
  100. e->type->ops.elevator_deactivate_req_fn(q, rq);
  101. }
  102. #ifdef CONFIG_FAIL_IO_TIMEOUT
  103. int blk_should_fake_timeout(struct request_queue *);
  104. ssize_t part_timeout_show(struct device *, struct device_attribute *, char *);
  105. ssize_t part_timeout_store(struct device *, struct device_attribute *,
  106. const char *, size_t);
  107. #else
  108. static inline int blk_should_fake_timeout(struct request_queue *q)
  109. {
  110. return 0;
  111. }
  112. #endif
  113. int ll_back_merge_fn(struct request_queue *q, struct request *req,
  114. struct bio *bio);
  115. int ll_front_merge_fn(struct request_queue *q, struct request *req,
  116. struct bio *bio);
  117. int attempt_back_merge(struct request_queue *q, struct request *rq);
  118. int attempt_front_merge(struct request_queue *q, struct request *rq);
  119. int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
  120. struct request *next);
  121. void blk_recalc_rq_segments(struct request *rq);
  122. void blk_rq_set_mixed_merge(struct request *rq);
  123. bool blk_rq_merge_ok(struct request *rq, struct bio *bio);
  124. int blk_try_merge(struct request *rq, struct bio *bio);
  125. void blk_queue_congestion_threshold(struct request_queue *q);
  126. int blk_dev_init(void);
  127. /*
  128. * Return the threshold (number of used requests) at which the queue is
  129. * considered to be congested. It include a little hysteresis to keep the
  130. * context switch rate down.
  131. */
  132. static inline int queue_congestion_on_threshold(struct request_queue *q)
  133. {
  134. return q->nr_congestion_on;
  135. }
  136. /*
  137. * The threshold at which a queue is considered to be uncongested
  138. */
  139. static inline int queue_congestion_off_threshold(struct request_queue *q)
  140. {
  141. return q->nr_congestion_off;
  142. }
  143. /*
  144. * Contribute to IO statistics IFF:
  145. *
  146. * a) it's attached to a gendisk, and
  147. * b) the queue had IO stats enabled when this request was started, and
  148. * c) it's a file system request or a discard request
  149. */
  150. static inline int blk_do_io_stat(struct request *rq)
  151. {
  152. return rq->rq_disk &&
  153. (rq->cmd_flags & REQ_IO_STAT) &&
  154. (rq->cmd_type == REQ_TYPE_FS ||
  155. (rq->cmd_flags & REQ_DISCARD));
  156. }
  157. /*
  158. * Internal io_context interface
  159. */
  160. void get_io_context(struct io_context *ioc);
  161. struct io_cq *ioc_lookup_icq(struct io_context *ioc, struct request_queue *q);
  162. struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
  163. gfp_t gfp_mask);
  164. void ioc_clear_queue(struct request_queue *q);
  165. int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
  166. /**
  167. * create_io_context - try to create task->io_context
  168. * @gfp_mask: allocation mask
  169. * @node: allocation node
  170. *
  171. * If %current->io_context is %NULL, allocate a new io_context and install
  172. * it. Returns the current %current->io_context which may be %NULL if
  173. * allocation failed.
  174. *
  175. * Note that this function can't be called with IRQ disabled because
  176. * task_lock which protects %current->io_context is IRQ-unsafe.
  177. */
  178. static inline struct io_context *create_io_context(gfp_t gfp_mask, int node)
  179. {
  180. WARN_ON_ONCE(irqs_disabled());
  181. if (unlikely(!current->io_context))
  182. create_task_io_context(current, gfp_mask, node);
  183. return current->io_context;
  184. }
  185. /*
  186. * Internal throttling interface
  187. */
  188. #ifdef CONFIG_BLK_DEV_THROTTLING
  189. extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
  190. extern void blk_throtl_drain(struct request_queue *q);
  191. extern int blk_throtl_init(struct request_queue *q);
  192. extern void blk_throtl_exit(struct request_queue *q);
  193. #else /* CONFIG_BLK_DEV_THROTTLING */
  194. static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
  195. {
  196. return false;
  197. }
  198. static inline void blk_throtl_drain(struct request_queue *q) { }
  199. static inline int blk_throtl_init(struct request_queue *q) { return 0; }
  200. static inline void blk_throtl_exit(struct request_queue *q) { }
  201. #endif /* CONFIG_BLK_DEV_THROTTLING */
  202. #endif /* BLK_INTERNAL_H */