blk-tag.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390
  1. /*
  2. * Functions related to tagged command queuing
  3. */
  4. #include <linux/kernel.h>
  5. #include <linux/module.h>
  6. #include <linux/bio.h>
  7. #include <linux/blkdev.h>
  8. /**
  9. * blk_queue_find_tag - find a request by its tag and queue
  10. * @q: The request queue for the device
  11. * @tag: The tag of the request
  12. *
  13. * Notes:
  14. * Should be used when a device returns a tag and you want to match
  15. * it with a request.
  16. *
  17. * no locks need be held.
  18. **/
  19. struct request *blk_queue_find_tag(struct request_queue *q, int tag)
  20. {
  21. return blk_map_queue_find_tag(q->queue_tags, tag);
  22. }
  23. EXPORT_SYMBOL(blk_queue_find_tag);
  24. /**
  25. * __blk_free_tags - release a given set of tag maintenance info
  26. * @bqt: the tag map to free
  27. *
  28. * Tries to free the specified @bqt@. Returns true if it was
  29. * actually freed and false if there are still references using it
  30. */
  31. static int __blk_free_tags(struct blk_queue_tag *bqt)
  32. {
  33. int retval;
  34. retval = atomic_dec_and_test(&bqt->refcnt);
  35. if (retval) {
  36. BUG_ON(bqt->busy);
  37. kfree(bqt->tag_index);
  38. bqt->tag_index = NULL;
  39. kfree(bqt->tag_map);
  40. bqt->tag_map = NULL;
  41. kfree(bqt);
  42. }
  43. return retval;
  44. }
  45. /**
  46. * __blk_queue_free_tags - release tag maintenance info
  47. * @q: the request queue for the device
  48. *
  49. * Notes:
  50. * blk_cleanup_queue() will take care of calling this function, if tagging
  51. * has been used. So there's no need to call this directly.
  52. **/
  53. void __blk_queue_free_tags(struct request_queue *q)
  54. {
  55. struct blk_queue_tag *bqt = q->queue_tags;
  56. if (!bqt)
  57. return;
  58. __blk_free_tags(bqt);
  59. q->queue_tags = NULL;
  60. q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED);
  61. }
  62. /**
  63. * blk_free_tags - release a given set of tag maintenance info
  64. * @bqt: the tag map to free
  65. *
  66. * For externally managed @bqt@ frees the map. Callers of this
  67. * function must guarantee to have released all the queues that
  68. * might have been using this tag map.
  69. */
  70. void blk_free_tags(struct blk_queue_tag *bqt)
  71. {
  72. if (unlikely(!__blk_free_tags(bqt)))
  73. BUG();
  74. }
  75. EXPORT_SYMBOL(blk_free_tags);
  76. /**
  77. * blk_queue_free_tags - release tag maintenance info
  78. * @q: the request queue for the device
  79. *
  80. * Notes:
  81. * This is used to disabled tagged queuing to a device, yet leave
  82. * queue in function.
  83. **/
  84. void blk_queue_free_tags(struct request_queue *q)
  85. {
  86. clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
  87. }
  88. EXPORT_SYMBOL(blk_queue_free_tags);
  89. static int
  90. init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
  91. {
  92. struct request **tag_index;
  93. unsigned long *tag_map;
  94. int nr_ulongs;
  95. if (q && depth > q->nr_requests * 2) {
  96. depth = q->nr_requests * 2;
  97. printk(KERN_ERR "%s: adjusted depth to %d\n",
  98. __FUNCTION__, depth);
  99. }
  100. tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
  101. if (!tag_index)
  102. goto fail;
  103. nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
  104. tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
  105. if (!tag_map)
  106. goto fail;
  107. tags->real_max_depth = depth;
  108. tags->max_depth = depth;
  109. tags->tag_index = tag_index;
  110. tags->tag_map = tag_map;
  111. return 0;
  112. fail:
  113. kfree(tag_index);
  114. return -ENOMEM;
  115. }
  116. static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
  117. int depth)
  118. {
  119. struct blk_queue_tag *tags;
  120. tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
  121. if (!tags)
  122. goto fail;
  123. if (init_tag_map(q, tags, depth))
  124. goto fail;
  125. tags->busy = 0;
  126. atomic_set(&tags->refcnt, 1);
  127. return tags;
  128. fail:
  129. kfree(tags);
  130. return NULL;
  131. }
  132. /**
  133. * blk_init_tags - initialize the tag info for an external tag map
  134. * @depth: the maximum queue depth supported
  135. * @tags: the tag to use
  136. **/
  137. struct blk_queue_tag *blk_init_tags(int depth)
  138. {
  139. return __blk_queue_init_tags(NULL, depth);
  140. }
  141. EXPORT_SYMBOL(blk_init_tags);
  142. /**
  143. * blk_queue_init_tags - initialize the queue tag info
  144. * @q: the request queue for the device
  145. * @depth: the maximum queue depth supported
  146. * @tags: the tag to use
  147. **/
  148. int blk_queue_init_tags(struct request_queue *q, int depth,
  149. struct blk_queue_tag *tags)
  150. {
  151. int rc;
  152. BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
  153. if (!tags && !q->queue_tags) {
  154. tags = __blk_queue_init_tags(q, depth);
  155. if (!tags)
  156. goto fail;
  157. } else if (q->queue_tags) {
  158. rc = blk_queue_resize_tags(q, depth);
  159. if (rc)
  160. return rc;
  161. set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags);
  162. return 0;
  163. } else
  164. atomic_inc(&tags->refcnt);
  165. /*
  166. * assign it, all done
  167. */
  168. q->queue_tags = tags;
  169. q->queue_flags |= (1 << QUEUE_FLAG_QUEUED);
  170. INIT_LIST_HEAD(&q->tag_busy_list);
  171. return 0;
  172. fail:
  173. kfree(tags);
  174. return -ENOMEM;
  175. }
  176. EXPORT_SYMBOL(blk_queue_init_tags);
  177. /**
  178. * blk_queue_resize_tags - change the queueing depth
  179. * @q: the request queue for the device
  180. * @new_depth: the new max command queueing depth
  181. *
  182. * Notes:
  183. * Must be called with the queue lock held.
  184. **/
  185. int blk_queue_resize_tags(struct request_queue *q, int new_depth)
  186. {
  187. struct blk_queue_tag *bqt = q->queue_tags;
  188. struct request **tag_index;
  189. unsigned long *tag_map;
  190. int max_depth, nr_ulongs;
  191. if (!bqt)
  192. return -ENXIO;
  193. /*
  194. * if we already have large enough real_max_depth. just
  195. * adjust max_depth. *NOTE* as requests with tag value
  196. * between new_depth and real_max_depth can be in-flight, tag
  197. * map can not be shrunk blindly here.
  198. */
  199. if (new_depth <= bqt->real_max_depth) {
  200. bqt->max_depth = new_depth;
  201. return 0;
  202. }
  203. /*
  204. * Currently cannot replace a shared tag map with a new
  205. * one, so error out if this is the case
  206. */
  207. if (atomic_read(&bqt->refcnt) != 1)
  208. return -EBUSY;
  209. /*
  210. * save the old state info, so we can copy it back
  211. */
  212. tag_index = bqt->tag_index;
  213. tag_map = bqt->tag_map;
  214. max_depth = bqt->real_max_depth;
  215. if (init_tag_map(q, bqt, new_depth))
  216. return -ENOMEM;
  217. memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
  218. nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
  219. memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
  220. kfree(tag_index);
  221. kfree(tag_map);
  222. return 0;
  223. }
  224. EXPORT_SYMBOL(blk_queue_resize_tags);
  225. /**
  226. * blk_queue_end_tag - end tag operations for a request
  227. * @q: the request queue for the device
  228. * @rq: the request that has completed
  229. *
  230. * Description:
  231. * Typically called when end_that_request_first() returns 0, meaning
  232. * all transfers have been done for a request. It's important to call
  233. * this function before end_that_request_last(), as that will put the
  234. * request back on the free list thus corrupting the internal tag list.
  235. *
  236. * Notes:
  237. * queue lock must be held.
  238. **/
  239. void blk_queue_end_tag(struct request_queue *q, struct request *rq)
  240. {
  241. struct blk_queue_tag *bqt = q->queue_tags;
  242. int tag = rq->tag;
  243. BUG_ON(tag == -1);
  244. if (unlikely(tag >= bqt->real_max_depth))
  245. /*
  246. * This can happen after tag depth has been reduced.
  247. * FIXME: how about a warning or info message here?
  248. */
  249. return;
  250. list_del_init(&rq->queuelist);
  251. rq->cmd_flags &= ~REQ_QUEUED;
  252. rq->tag = -1;
  253. if (unlikely(bqt->tag_index[tag] == NULL))
  254. printk(KERN_ERR "%s: tag %d is missing\n",
  255. __FUNCTION__, tag);
  256. bqt->tag_index[tag] = NULL;
  257. if (unlikely(!test_bit(tag, bqt->tag_map))) {
  258. printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
  259. __FUNCTION__, tag);
  260. return;
  261. }
  262. /*
  263. * The tag_map bit acts as a lock for tag_index[bit], so we need
  264. * unlock memory barrier semantics.
  265. */
  266. clear_bit_unlock(tag, bqt->tag_map);
  267. bqt->busy--;
  268. }
  269. EXPORT_SYMBOL(blk_queue_end_tag);
  270. /**
  271. * blk_queue_start_tag - find a free tag and assign it
  272. * @q: the request queue for the device
  273. * @rq: the block request that needs tagging
  274. *
  275. * Description:
  276. * This can either be used as a stand-alone helper, or possibly be
  277. * assigned as the queue &prep_rq_fn (in which case &struct request
  278. * automagically gets a tag assigned). Note that this function
  279. * assumes that any type of request can be queued! if this is not
  280. * true for your device, you must check the request type before
  281. * calling this function. The request will also be removed from
  282. * the request queue, so it's the drivers responsibility to readd
  283. * it if it should need to be restarted for some reason.
  284. *
  285. * Notes:
  286. * queue lock must be held.
  287. **/
  288. int blk_queue_start_tag(struct request_queue *q, struct request *rq)
  289. {
  290. struct blk_queue_tag *bqt = q->queue_tags;
  291. int tag;
  292. if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
  293. printk(KERN_ERR
  294. "%s: request %p for device [%s] already tagged %d",
  295. __FUNCTION__, rq,
  296. rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
  297. BUG();
  298. }
  299. /*
  300. * Protect against shared tag maps, as we may not have exclusive
  301. * access to the tag map.
  302. */
  303. do {
  304. tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth);
  305. if (tag >= bqt->max_depth)
  306. return 1;
  307. } while (test_and_set_bit_lock(tag, bqt->tag_map));
  308. /*
  309. * We need lock ordering semantics given by test_and_set_bit_lock.
  310. * See blk_queue_end_tag for details.
  311. */
  312. rq->cmd_flags |= REQ_QUEUED;
  313. rq->tag = tag;
  314. bqt->tag_index[tag] = rq;
  315. blkdev_dequeue_request(rq);
  316. list_add(&rq->queuelist, &q->tag_busy_list);
  317. bqt->busy++;
  318. return 0;
  319. }
  320. EXPORT_SYMBOL(blk_queue_start_tag);
  321. /**
  322. * blk_queue_invalidate_tags - invalidate all pending tags
  323. * @q: the request queue for the device
  324. *
  325. * Description:
  326. * Hardware conditions may dictate a need to stop all pending requests.
  327. * In this case, we will safely clear the block side of the tag queue and
  328. * readd all requests to the request queue in the right order.
  329. *
  330. * Notes:
  331. * queue lock must be held.
  332. **/
  333. void blk_queue_invalidate_tags(struct request_queue *q)
  334. {
  335. struct list_head *tmp, *n;
  336. list_for_each_safe(tmp, n, &q->tag_busy_list)
  337. blk_requeue_request(q, list_entry_rq(tmp));
  338. }
  339. EXPORT_SYMBOL(blk_queue_invalidate_tags);