bsg-lib.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * BSG helper library
  3. *
  4. * Copyright (C) 2008 James Smart, Emulex Corporation
  5. * Copyright (C) 2011 Red Hat, Inc. All rights reserved.
  6. * Copyright (C) 2011 Mike Christie
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2 of the License, or
  11. * (at your option) any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  21. *
  22. */
  23. #include <linux/slab.h>
  24. #include <linux/blkdev.h>
  25. #include <linux/delay.h>
  26. #include <linux/scatterlist.h>
  27. #include <linux/bsg-lib.h>
  28. #include <scsi/scsi_cmnd.h>
  29. /**
  30. * bsg_destroy_job - routine to teardown/delete a bsg job
  31. * @job: bsg_job that is to be torn down
  32. */
  33. static void bsg_destroy_job(struct bsg_job *job)
  34. {
  35. put_device(job->dev); /* release reference for the request */
  36. kfree(job->request_payload.sg_list);
  37. kfree(job->reply_payload.sg_list);
  38. kfree(job);
  39. }
  40. /**
  41. * bsg_job_done - completion routine for bsg requests
  42. * @job: bsg_job that is complete
  43. * @result: job reply result
  44. * @reply_payload_rcv_len: length of payload recvd
  45. *
  46. * The LLD should call this when the bsg job has completed.
  47. */
  48. void bsg_job_done(struct bsg_job *job, int result,
  49. unsigned int reply_payload_rcv_len)
  50. {
  51. struct request *req = job->req;
  52. struct request *rsp = req->next_rq;
  53. int err;
  54. err = job->req->errors = result;
  55. if (err < 0)
  56. /* we're only returning the result field in the reply */
  57. job->req->sense_len = sizeof(u32);
  58. else
  59. job->req->sense_len = job->reply_len;
  60. /* we assume all request payload was transferred, residual == 0 */
  61. req->resid_len = 0;
  62. if (rsp) {
  63. WARN_ON(reply_payload_rcv_len > rsp->resid_len);
  64. /* set reply (bidi) residual */
  65. rsp->resid_len -= min(reply_payload_rcv_len, rsp->resid_len);
  66. }
  67. blk_complete_request(req);
  68. }
  69. EXPORT_SYMBOL_GPL(bsg_job_done);
  70. /**
  71. * bsg_softirq_done - softirq done routine for destroying the bsg requests
  72. * @rq: BSG request that holds the job to be destroyed
  73. */
  74. static void bsg_softirq_done(struct request *rq)
  75. {
  76. struct bsg_job *job = rq->special;
  77. blk_end_request_all(rq, rq->errors);
  78. bsg_destroy_job(job);
  79. }
  80. static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
  81. {
  82. size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments);
  83. BUG_ON(!req->nr_phys_segments);
  84. buf->sg_list = kzalloc(sz, GFP_KERNEL);
  85. if (!buf->sg_list)
  86. return -ENOMEM;
  87. sg_init_table(buf->sg_list, req->nr_phys_segments);
  88. buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list);
  89. buf->payload_len = blk_rq_bytes(req);
  90. return 0;
  91. }
  92. /**
  93. * bsg_create_job - create the bsg_job structure for the bsg request
  94. * @dev: device that is being sent the bsg request
  95. * @req: BSG request that needs a job structure
  96. */
  97. static int bsg_create_job(struct device *dev, struct request *req)
  98. {
  99. struct request *rsp = req->next_rq;
  100. struct request_queue *q = req->q;
  101. struct bsg_job *job;
  102. int ret;
  103. BUG_ON(req->special);
  104. job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
  105. if (!job)
  106. return -ENOMEM;
  107. req->special = job;
  108. job->req = req;
  109. if (q->bsg_job_size)
  110. job->dd_data = (void *)&job[1];
  111. job->request = req->cmd;
  112. job->request_len = req->cmd_len;
  113. job->reply = req->sense;
  114. job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
  115. * allocated */
  116. if (req->bio) {
  117. ret = bsg_map_buffer(&job->request_payload, req);
  118. if (ret)
  119. goto failjob_rls_job;
  120. }
  121. if (rsp && rsp->bio) {
  122. ret = bsg_map_buffer(&job->reply_payload, rsp);
  123. if (ret)
  124. goto failjob_rls_rqst_payload;
  125. }
  126. job->dev = dev;
  127. /* take a reference for the request */
  128. get_device(job->dev);
  129. return 0;
  130. failjob_rls_rqst_payload:
  131. kfree(job->request_payload.sg_list);
  132. failjob_rls_job:
  133. kfree(job);
  134. return -ENOMEM;
  135. }
  136. /*
  137. * bsg_goose_queue - restart queue in case it was stopped
  138. * @q: request q to be restarted
  139. */
  140. void bsg_goose_queue(struct request_queue *q)
  141. {
  142. if (!q)
  143. return;
  144. blk_run_queue_async(q);
  145. }
  146. EXPORT_SYMBOL_GPL(bsg_goose_queue);
  147. /**
  148. * bsg_request_fn - generic handler for bsg requests
  149. * @q: request queue to manage
  150. *
  151. * On error the create_bsg_job function should return a -Exyz error value
  152. * that will be set to the req->errors.
  153. *
  154. * Drivers/subsys should pass this to the queue init function.
  155. */
  156. void bsg_request_fn(struct request_queue *q)
  157. {
  158. struct device *dev = q->queuedata;
  159. struct request *req;
  160. struct bsg_job *job;
  161. int ret;
  162. if (!get_device(dev))
  163. return;
  164. while (1) {
  165. req = blk_fetch_request(q);
  166. if (!req)
  167. break;
  168. spin_unlock_irq(q->queue_lock);
  169. ret = bsg_create_job(dev, req);
  170. if (ret) {
  171. req->errors = ret;
  172. blk_end_request_all(req, ret);
  173. spin_lock_irq(q->queue_lock);
  174. continue;
  175. }
  176. job = req->special;
  177. ret = q->bsg_job_fn(job);
  178. spin_lock_irq(q->queue_lock);
  179. if (ret)
  180. break;
  181. }
  182. spin_unlock_irq(q->queue_lock);
  183. put_device(dev);
  184. spin_lock_irq(q->queue_lock);
  185. }
  186. EXPORT_SYMBOL_GPL(bsg_request_fn);
  187. /**
  188. * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
  189. * @dev: device to attach bsg device to
  190. * @q: request queue setup by caller
  191. * @name: device to give bsg device
  192. * @job_fn: bsg job handler
  193. * @dd_job_size: size of LLD data needed for each job
  194. *
  195. * The caller should have setup the reuqest queue with bsg_request_fn
  196. * as the request_fn.
  197. */
  198. int bsg_setup_queue(struct device *dev, struct request_queue *q,
  199. char *name, bsg_job_fn *job_fn, int dd_job_size)
  200. {
  201. int ret;
  202. q->queuedata = dev;
  203. q->bsg_job_size = dd_job_size;
  204. q->bsg_job_fn = job_fn;
  205. queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
  206. blk_queue_softirq_done(q, bsg_softirq_done);
  207. blk_queue_rq_timeout(q, BLK_DEFAULT_SG_TIMEOUT);
  208. ret = bsg_register_queue(q, dev, name, NULL);
  209. if (ret) {
  210. printk(KERN_ERR "%s: bsg interface failed to "
  211. "initialize - register queue\n", dev->kobj.name);
  212. return ret;
  213. }
  214. return 0;
  215. }
  216. EXPORT_SYMBOL_GPL(bsg_setup_queue);
  217. /**
  218. * bsg_remove_queue - Deletes the bsg dev from the q
  219. * @q: the request_queue that is to be torn down.
  220. *
  221. * Notes:
  222. * Before unregistering the queue empty any requests that are blocked
  223. */
  224. void bsg_remove_queue(struct request_queue *q)
  225. {
  226. struct request *req; /* block request */
  227. int counts; /* totals for request_list count and starved */
  228. if (!q)
  229. return;
  230. /* Stop taking in new requests */
  231. spin_lock_irq(q->queue_lock);
  232. blk_stop_queue(q);
  233. /* drain all requests in the queue */
  234. while (1) {
  235. /* need the lock to fetch a request
  236. * this may fetch the same reqeust as the previous pass
  237. */
  238. req = blk_fetch_request(q);
  239. /* save requests in use and starved */
  240. counts = q->rq.count[0] + q->rq.count[1] +
  241. q->rq.starved[0] + q->rq.starved[1];
  242. spin_unlock_irq(q->queue_lock);
  243. /* any requests still outstanding? */
  244. if (counts == 0)
  245. break;
  246. /* This may be the same req as the previous iteration,
  247. * always send the blk_end_request_all after a prefetch.
  248. * It is not okay to not end the request because the
  249. * prefetch started the request.
  250. */
  251. if (req) {
  252. /* return -ENXIO to indicate that this queue is
  253. * going away
  254. */
  255. req->errors = -ENXIO;
  256. blk_end_request_all(req, -ENXIO);
  257. }
  258. msleep(200); /* allow bsg to possibly finish */
  259. spin_lock_irq(q->queue_lock);
  260. }
  261. bsg_unregister_queue(q);
  262. }
  263. EXPORT_SYMBOL_GPL(bsg_remove_queue);