blk-tag.c 9.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405
  1. /*
  2. * Functions related to tagged command queuing
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. #include "blk.h"
  9. /**
  10. * blk_queue_find_tag - find a request by its tag and queue
  11. * @q: The request queue for the device
  12. * @tag: The tag of the request
  13. *
  14. * Notes:
  15. * Should be used when a device returns a tag and you want to match
  16. * it with a request.
  17. *
  18. * no locks need be held.
  19. **/
  20. struct request *blk_queue_find_tag(struct request_queue *q, int tag)
  21. {
  22. return blk_map_queue_find_tag(q->queue_tags, tag);
  23. }
  24. EXPORT_SYMBOL(blk_queue_find_tag);
  25. /**
  26. * __blk_free_tags - release a given set of tag maintenance info
  27. * @bqt: the tag map to free
  28. *
  29. * Tries to free the specified @bqt. Returns true if it was
  30. * actually freed and false if there are still references using it
  31. */
  32. static int __blk_free_tags(struct blk_queue_tag *bqt)
  33. {
  34. int retval;
  35. retval = atomic_dec_and_test(&bqt->refcnt);
  36. if (retval) {
  37. BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
  38. bqt->max_depth);
  39. kfree(bqt->tag_index);
  40. bqt->tag_index = NULL;
  41. kfree(bqt->tag_map);
  42. bqt->tag_map = NULL;
  43. kfree(bqt);
  44. }
  45. return retval;
  46. }
  47. /**
  48. * __blk_queue_free_tags - release tag maintenance info
  49. * @q: the request queue for the device
  50. *
  51. * Notes:
  52. * blk_cleanup_queue() will take care of calling this function, if tagging
  53. * has been used. So there's no need to call this directly.
  54. **/
  55. void __blk_queue_free_tags(struct request_queue *q)
  56. {
  57. struct blk_queue_tag *bqt = q->queue_tags;
  58. if (!bqt)
  59. return;
  60. __blk_free_tags(bqt);
  61. q->queue_tags = NULL;
  62. queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
  63. }
  64. /**
  65. * blk_free_tags - release a given set of tag maintenance info
  66. * @bqt: the tag map to free
  67. *
  68. * For externally managed @bqt frees the map. Callers of this
  69. * function must guarantee to have released all the queues that
  70. * might have been using this tag map.
  71. */
  72. void blk_free_tags(struct blk_queue_tag *bqt)
  73. {
  74. if (unlikely(!__blk_free_tags(bqt)))
  75. BUG();
  76. }
  77. EXPORT_SYMBOL(blk_free_tags);
  78. /**
  79. * blk_queue_free_tags - release tag maintenance info
  80. * @q: the request queue for the device
  81. *
  82. * Notes:
  83. * This is used to disable tagged queuing to a device, yet leave
  84. * queue in function.
  85. **/
  86. void blk_queue_free_tags(struct request_queue *q)
  87. {
  88. queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
  89. }
  90. EXPORT_SYMBOL(blk_queue_free_tags);
  91. static int
  92. init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
  93. {
  94. struct request **tag_index;
  95. unsigned long *tag_map;
  96. int nr_ulongs;
  97. if (q && depth > q->nr_requests * 2) {
  98. depth = q->nr_requests * 2;
  99. printk(KERN_ERR "%s: adjusted depth to %d\n",
  100. __func__, depth);
  101. }
  102. tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
  103. if (!tag_index)
  104. goto fail;
  105. nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
  106. tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
  107. if (!tag_map)
  108. goto fail;
  109. tags->real_max_depth = depth;
  110. tags->max_depth = depth;
  111. tags->tag_index = tag_index;
  112. tags->tag_map = tag_map;
  113. return 0;
  114. fail:
  115. kfree(tag_index);
  116. return -ENOMEM;
  117. }
  118. static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
  119. int depth)
  120. {
  121. struct blk_queue_tag *tags;
  122. tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
  123. if (!tags)
  124. goto fail;
  125. if (init_tag_map(q, tags, depth))
  126. goto fail;
  127. atomic_set(&tags->refcnt, 1);
  128. return tags;
  129. fail:
  130. kfree(tags);
  131. return NULL;
  132. }
  133. /**
  134. * blk_init_tags - initialize the tag info for an external tag map
  135. * @depth: the maximum queue depth supported
  136. **/
  137. struct blk_queue_tag *blk_init_tags(int depth)
  138. {
  139. return __blk_queue_init_tags(NULL, depth);
  140. }
  141. EXPORT_SYMBOL(blk_init_tags);
  142. /**
  143. * blk_queue_init_tags - initialize the queue tag info
  144. * @q: the request queue for the device
  145. * @depth: the maximum queue depth supported
  146. * @tags: the tag to use
  147. *
  148. * Queue lock must be held here if the function is called to resize an
  149. * existing map.
  150. **/
  151. int blk_queue_init_tags(struct request_queue *q, int depth,
  152. struct blk_queue_tag *tags)
  153. {
  154. int rc;
  155. BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
  156. if (!tags && !q->queue_tags) {
  157. tags = __blk_queue_init_tags(q, depth);
  158. if (!tags)
  159. goto fail;
  160. } else if (q->queue_tags) {
  161. rc = blk_queue_resize_tags(q, depth);
  162. if (rc)
  163. return rc;
  164. queue_flag_set(QUEUE_FLAG_QUEUED, q);
  165. return 0;
  166. } else
  167. atomic_inc(&tags->refcnt);
  168. /*
  169. * assign it, all done
  170. */
  171. q->queue_tags = tags;
  172. queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
  173. INIT_LIST_HEAD(&q->tag_busy_list);
  174. return 0;
  175. fail:
  176. kfree(tags);
  177. return -ENOMEM;
  178. }
  179. EXPORT_SYMBOL(blk_queue_init_tags);
  180. /**
  181. * blk_queue_resize_tags - change the queueing depth
  182. * @q: the request queue for the device
  183. * @new_depth: the new max command queueing depth
  184. *
  185. * Notes:
  186. * Must be called with the queue lock held.
  187. **/
  188. int blk_queue_resize_tags(struct request_queue *q, int new_depth)
  189. {
  190. struct blk_queue_tag *bqt = q->queue_tags;
  191. struct request **tag_index;
  192. unsigned long *tag_map;
  193. int max_depth, nr_ulongs;
  194. if (!bqt)
  195. return -ENXIO;
  196. /*
  197. * if we already have large enough real_max_depth. just
  198. * adjust max_depth. *NOTE* as requests with tag value
  199. * between new_depth and real_max_depth can be in-flight, tag
  200. * map can not be shrunk blindly here.
  201. */
  202. if (new_depth <= bqt->real_max_depth) {
  203. bqt->max_depth = new_depth;
  204. return 0;
  205. }
  206. /*
  207. * Currently cannot replace a shared tag map with a new
  208. * one, so error out if this is the case
  209. */
  210. if (atomic_read(&bqt->refcnt) != 1)
  211. return -EBUSY;
  212. /*
  213. * save the old state info, so we can copy it back
  214. */
  215. tag_index = bqt->tag_index;
  216. tag_map = bqt->tag_map;
  217. max_depth = bqt->real_max_depth;
  218. if (init_tag_map(q, bqt, new_depth))
  219. return -ENOMEM;
  220. memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
  221. nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
  222. memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
  223. kfree(tag_index);
  224. kfree(tag_map);
  225. return 0;
  226. }
  227. EXPORT_SYMBOL(blk_queue_resize_tags);
  228. /**
  229. * blk_queue_end_tag - end tag operations for a request
  230. * @q: the request queue for the device
  231. * @rq: the request that has completed
  232. *
  233. * Description:
  234. * Typically called when end_that_request_first() returns %0, meaning
  235. * all transfers have been done for a request. It's important to call
  236. * this function before end_that_request_last(), as that will put the
  237. * request back on the free list thus corrupting the internal tag list.
  238. *
  239. * Notes:
  240. * queue lock must be held.
  241. **/
  242. void blk_queue_end_tag(struct request_queue *q, struct request *rq)
  243. {
  244. struct blk_queue_tag *bqt = q->queue_tags;
  245. int tag = rq->tag;
  246. BUG_ON(tag == -1);
  247. if (unlikely(tag >= bqt->real_max_depth))
  248. /*
  249. * This can happen after tag depth has been reduced.
  250. * FIXME: how about a warning or info message here?
  251. */
  252. return;
  253. list_del_init(&rq->queuelist);
  254. rq->cmd_flags &= ~REQ_QUEUED;
  255. rq->tag = -1;
  256. if (unlikely(bqt->tag_index[tag] == NULL))
  257. printk(KERN_ERR "%s: tag %d is missing\n",
  258. __func__, tag);
  259. bqt->tag_index[tag] = NULL;
  260. if (unlikely(!test_bit(tag, bqt->tag_map))) {
  261. printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
  262. __func__, tag);
  263. return;
  264. }
  265. /*
  266. * The tag_map bit acts as a lock for tag_index[bit], so we need
  267. * unlock memory barrier semantics.
  268. */
  269. clear_bit_unlock(tag, bqt->tag_map);
  270. }
  271. EXPORT_SYMBOL(blk_queue_end_tag);
  272. /**
  273. * blk_queue_start_tag - find a free tag and assign it
  274. * @q: the request queue for the device
  275. * @rq: the block request that needs tagging
  276. *
  277. * Description:
  278. * This can either be used as a stand-alone helper, or possibly be
  279. * assigned as the queue &prep_rq_fn (in which case &struct request
  280. * automagically gets a tag assigned). Note that this function
  281. * assumes that any type of request can be queued! if this is not
  282. * true for your device, you must check the request type before
  283. * calling this function. The request will also be removed from
  284. * the request queue, so it's the drivers responsibility to readd
  285. * it if it should need to be restarted for some reason.
  286. *
  287. * Notes:
  288. * queue lock must be held.
  289. **/
  290. int blk_queue_start_tag(struct request_queue *q, struct request *rq)
  291. {
  292. struct blk_queue_tag *bqt = q->queue_tags;
  293. unsigned max_depth;
  294. int tag;
  295. if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
  296. printk(KERN_ERR
  297. "%s: request %p for device [%s] already tagged %d",
  298. __func__, rq,
  299. rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
  300. BUG();
  301. }
  302. /*
  303. * Protect against shared tag maps, as we may not have exclusive
  304. * access to the tag map.
  305. *
  306. * We reserve a few tags just for sync IO, since we don't want
  307. * to starve sync IO on behalf of flooding async IO.
  308. */
  309. max_depth = bqt->max_depth;
  310. if (!rq_is_sync(rq) && max_depth > 1) {
  311. max_depth -= 2;
  312. if (!max_depth)
  313. max_depth = 1;
  314. if (q->in_flight[BLK_RW_ASYNC] > max_depth)
  315. return 1;
  316. }
  317. do {
  318. tag = find_first_zero_bit(bqt->tag_map, max_depth);
  319. if (tag >= max_depth)
  320. return 1;
  321. } while (test_and_set_bit_lock(tag, bqt->tag_map));
  322. /*
  323. * We need lock ordering semantics given by test_and_set_bit_lock.
  324. * See blk_queue_end_tag for details.
  325. */
  326. rq->cmd_flags |= REQ_QUEUED;
  327. rq->tag = tag;
  328. bqt->tag_index[tag] = rq;
  329. blk_start_request(rq);
  330. list_add(&rq->queuelist, &q->tag_busy_list);
  331. return 0;
  332. }
  333. EXPORT_SYMBOL(blk_queue_start_tag);
  334. /**
  335. * blk_queue_invalidate_tags - invalidate all pending tags
  336. * @q: the request queue for the device
  337. *
  338. * Description:
  339. * Hardware conditions may dictate a need to stop all pending requests.
  340. * In this case, we will safely clear the block side of the tag queue and
  341. * readd all requests to the request queue in the right order.
  342. *
  343. * Notes:
  344. * queue lock must be held.
  345. **/
  346. void blk_queue_invalidate_tags(struct request_queue *q)
  347. {
  348. struct list_head *tmp, *n;
  349. list_for_each_safe(tmp, n, &q->tag_busy_list)
  350. blk_requeue_request(q, list_entry_rq(tmp));
  351. }
  352. EXPORT_SYMBOL(blk_queue_invalidate_tags);