bsg.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104
  1. /*
  2. * bsg.c - block layer implementation of the sg v4 interface
  3. *
  4. * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
  5. * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License version 2. See the file "COPYING" in the main directory of this
  9. * archive for more details.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/file.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/poll.h>
  17. #include <linux/cdev.h>
  18. #include <linux/percpu.h>
  19. #include <linux/uio.h>
  20. #include <linux/idr.h>
  21. #include <linux/bsg.h>
  22. #include <linux/smp_lock.h>
  23. #include <scsi/scsi.h>
  24. #include <scsi/scsi_ioctl.h>
  25. #include <scsi/scsi_cmnd.h>
  26. #include <scsi/scsi_device.h>
  27. #include <scsi/scsi_driver.h>
  28. #include <scsi/sg.h>
  29. #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
  30. #define BSG_VERSION "0.4"
  31. struct bsg_device {
  32. struct request_queue *queue;
  33. spinlock_t lock;
  34. struct list_head busy_list;
  35. struct list_head done_list;
  36. struct hlist_node dev_list;
  37. atomic_t ref_count;
  38. int queued_cmds;
  39. int done_cmds;
  40. wait_queue_head_t wq_done;
  41. wait_queue_head_t wq_free;
  42. char name[BUS_ID_SIZE];
  43. int max_queue;
  44. unsigned long flags;
  45. };
  46. enum {
  47. BSG_F_BLOCK = 1,
  48. };
  49. #define BSG_DEFAULT_CMDS 64
  50. #define BSG_MAX_DEVS 32768
  51. #undef BSG_DEBUG
  52. #ifdef BSG_DEBUG
  53. #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
  54. #else
  55. #define dprintk(fmt, args...)
  56. #endif
  57. static DEFINE_MUTEX(bsg_mutex);
  58. static DEFINE_IDR(bsg_minor_idr);
  59. #define BSG_LIST_ARRAY_SIZE 8
  60. static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
  61. static struct class *bsg_class;
  62. static int bsg_major;
  63. static struct kmem_cache *bsg_cmd_cachep;
  64. /*
  65. * our internal command type
  66. */
  67. struct bsg_command {
  68. struct bsg_device *bd;
  69. struct list_head list;
  70. struct request *rq;
  71. struct bio *bio;
  72. struct bio *bidi_bio;
  73. int err;
  74. struct sg_io_v4 hdr;
  75. char sense[SCSI_SENSE_BUFFERSIZE];
  76. };
  77. static void bsg_free_command(struct bsg_command *bc)
  78. {
  79. struct bsg_device *bd = bc->bd;
  80. unsigned long flags;
  81. kmem_cache_free(bsg_cmd_cachep, bc);
  82. spin_lock_irqsave(&bd->lock, flags);
  83. bd->queued_cmds--;
  84. spin_unlock_irqrestore(&bd->lock, flags);
  85. wake_up(&bd->wq_free);
  86. }
  87. static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
  88. {
  89. struct bsg_command *bc = ERR_PTR(-EINVAL);
  90. spin_lock_irq(&bd->lock);
  91. if (bd->queued_cmds >= bd->max_queue)
  92. goto out;
  93. bd->queued_cmds++;
  94. spin_unlock_irq(&bd->lock);
  95. bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
  96. if (unlikely(!bc)) {
  97. spin_lock_irq(&bd->lock);
  98. bd->queued_cmds--;
  99. bc = ERR_PTR(-ENOMEM);
  100. goto out;
  101. }
  102. bc->bd = bd;
  103. INIT_LIST_HEAD(&bc->list);
  104. dprintk("%s: returning free cmd %p\n", bd->name, bc);
  105. return bc;
  106. out:
  107. spin_unlock_irq(&bd->lock);
  108. return bc;
  109. }
  110. static inline struct hlist_head *bsg_dev_idx_hash(int index)
  111. {
  112. return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
  113. }
  114. static int bsg_io_schedule(struct bsg_device *bd)
  115. {
  116. DEFINE_WAIT(wait);
  117. int ret = 0;
  118. spin_lock_irq(&bd->lock);
  119. BUG_ON(bd->done_cmds > bd->queued_cmds);
  120. /*
  121. * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
  122. * work to do", even though we return -ENOSPC after this same test
  123. * during bsg_write() -- there, it means our buffer can't have more
  124. * bsg_commands added to it, thus has no space left.
  125. */
  126. if (bd->done_cmds == bd->queued_cmds) {
  127. ret = -ENODATA;
  128. goto unlock;
  129. }
  130. if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
  131. ret = -EAGAIN;
  132. goto unlock;
  133. }
  134. prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
  135. spin_unlock_irq(&bd->lock);
  136. io_schedule();
  137. finish_wait(&bd->wq_done, &wait);
  138. return ret;
  139. unlock:
  140. spin_unlock_irq(&bd->lock);
  141. return ret;
  142. }
  143. static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
  144. struct sg_io_v4 *hdr, struct bsg_device *bd,
  145. int has_write_perm)
  146. {
  147. if (hdr->request_len > BLK_MAX_CDB) {
  148. rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
  149. if (!rq->cmd)
  150. return -ENOMEM;
  151. }
  152. if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
  153. hdr->request_len))
  154. return -EFAULT;
  155. if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
  156. if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
  157. return -EPERM;
  158. } else if (!capable(CAP_SYS_RAWIO))
  159. return -EPERM;
  160. /*
  161. * fill in request structure
  162. */
  163. rq->cmd_len = hdr->request_len;
  164. rq->cmd_type = REQ_TYPE_BLOCK_PC;
  165. rq->timeout = (hdr->timeout * HZ) / 1000;
  166. if (!rq->timeout)
  167. rq->timeout = q->sg_timeout;
  168. if (!rq->timeout)
  169. rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
  170. return 0;
  171. }
  172. /*
  173. * Check if sg_io_v4 from user is allowed and valid
  174. */
  175. static int
  176. bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
  177. {
  178. int ret = 0;
  179. if (hdr->guard != 'Q')
  180. return -EINVAL;
  181. if (hdr->dout_xfer_len > (q->max_sectors << 9) ||
  182. hdr->din_xfer_len > (q->max_sectors << 9))
  183. return -EIO;
  184. switch (hdr->protocol) {
  185. case BSG_PROTOCOL_SCSI:
  186. switch (hdr->subprotocol) {
  187. case BSG_SUB_PROTOCOL_SCSI_CMD:
  188. case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
  189. break;
  190. default:
  191. ret = -EINVAL;
  192. }
  193. break;
  194. default:
  195. ret = -EINVAL;
  196. }
  197. *rw = hdr->dout_xfer_len ? WRITE : READ;
  198. return ret;
  199. }
  200. /*
  201. * map sg_io_v4 to a request.
  202. */
  203. static struct request *
  204. bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, int has_write_perm)
  205. {
  206. struct request_queue *q = bd->queue;
  207. struct request *rq, *next_rq = NULL;
  208. int ret, rw;
  209. unsigned int dxfer_len;
  210. void *dxferp = NULL;
  211. dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
  212. hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
  213. hdr->din_xfer_len);
  214. ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
  215. if (ret)
  216. return ERR_PTR(ret);
  217. /*
  218. * map scatter-gather elements seperately and string them to request
  219. */
  220. rq = blk_get_request(q, rw, GFP_KERNEL);
  221. if (!rq)
  222. return ERR_PTR(-ENOMEM);
  223. ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
  224. if (ret)
  225. goto out;
  226. if (rw == WRITE && hdr->din_xfer_len) {
  227. if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
  228. ret = -EOPNOTSUPP;
  229. goto out;
  230. }
  231. next_rq = blk_get_request(q, READ, GFP_KERNEL);
  232. if (!next_rq) {
  233. ret = -ENOMEM;
  234. goto out;
  235. }
  236. rq->next_rq = next_rq;
  237. next_rq->cmd_type = rq->cmd_type;
  238. dxferp = (void*)(unsigned long)hdr->din_xferp;
  239. ret = blk_rq_map_user(q, next_rq, dxferp, hdr->din_xfer_len);
  240. if (ret)
  241. goto out;
  242. }
  243. if (hdr->dout_xfer_len) {
  244. dxfer_len = hdr->dout_xfer_len;
  245. dxferp = (void*)(unsigned long)hdr->dout_xferp;
  246. } else if (hdr->din_xfer_len) {
  247. dxfer_len = hdr->din_xfer_len;
  248. dxferp = (void*)(unsigned long)hdr->din_xferp;
  249. } else
  250. dxfer_len = 0;
  251. if (dxfer_len) {
  252. ret = blk_rq_map_user(q, rq, dxferp, dxfer_len);
  253. if (ret)
  254. goto out;
  255. }
  256. return rq;
  257. out:
  258. if (rq->cmd != rq->__cmd)
  259. kfree(rq->cmd);
  260. blk_put_request(rq);
  261. if (next_rq) {
  262. blk_rq_unmap_user(next_rq->bio);
  263. blk_put_request(next_rq);
  264. }
  265. return ERR_PTR(ret);
  266. }
  267. /*
  268. * async completion call-back from the block layer, when scsi/ide/whatever
  269. * calls end_that_request_last() on a request
  270. */
  271. static void bsg_rq_end_io(struct request *rq, int uptodate)
  272. {
  273. struct bsg_command *bc = rq->end_io_data;
  274. struct bsg_device *bd = bc->bd;
  275. unsigned long flags;
  276. dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
  277. bd->name, rq, bc, bc->bio, uptodate);
  278. bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
  279. spin_lock_irqsave(&bd->lock, flags);
  280. list_move_tail(&bc->list, &bd->done_list);
  281. bd->done_cmds++;
  282. spin_unlock_irqrestore(&bd->lock, flags);
  283. wake_up(&bd->wq_done);
  284. }
  285. /*
  286. * do final setup of a 'bc' and submit the matching 'rq' to the block
  287. * layer for io
  288. */
  289. static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
  290. struct bsg_command *bc, struct request *rq)
  291. {
  292. rq->sense = bc->sense;
  293. rq->sense_len = 0;
  294. /*
  295. * add bc command to busy queue and submit rq for io
  296. */
  297. bc->rq = rq;
  298. bc->bio = rq->bio;
  299. if (rq->next_rq)
  300. bc->bidi_bio = rq->next_rq->bio;
  301. bc->hdr.duration = jiffies;
  302. spin_lock_irq(&bd->lock);
  303. list_add_tail(&bc->list, &bd->busy_list);
  304. spin_unlock_irq(&bd->lock);
  305. dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
  306. rq->end_io_data = bc;
  307. blk_execute_rq_nowait(q, NULL, rq, 1, bsg_rq_end_io);
  308. }
  309. static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
  310. {
  311. struct bsg_command *bc = NULL;
  312. spin_lock_irq(&bd->lock);
  313. if (bd->done_cmds) {
  314. bc = list_first_entry(&bd->done_list, struct bsg_command, list);
  315. list_del(&bc->list);
  316. bd->done_cmds--;
  317. }
  318. spin_unlock_irq(&bd->lock);
  319. return bc;
  320. }
  321. /*
  322. * Get a finished command from the done list
  323. */
  324. static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
  325. {
  326. struct bsg_command *bc;
  327. int ret;
  328. do {
  329. bc = bsg_next_done_cmd(bd);
  330. if (bc)
  331. break;
  332. if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
  333. bc = ERR_PTR(-EAGAIN);
  334. break;
  335. }
  336. ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
  337. if (ret) {
  338. bc = ERR_PTR(-ERESTARTSYS);
  339. break;
  340. }
  341. } while (1);
  342. dprintk("%s: returning done %p\n", bd->name, bc);
  343. return bc;
  344. }
  345. static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
  346. struct bio *bio, struct bio *bidi_bio)
  347. {
  348. int ret = 0;
  349. dprintk("rq %p bio %p %u\n", rq, bio, rq->errors);
  350. /*
  351. * fill in all the output members
  352. */
  353. hdr->device_status = status_byte(rq->errors);
  354. hdr->transport_status = host_byte(rq->errors);
  355. hdr->driver_status = driver_byte(rq->errors);
  356. hdr->info = 0;
  357. if (hdr->device_status || hdr->transport_status || hdr->driver_status)
  358. hdr->info |= SG_INFO_CHECK;
  359. hdr->response_len = 0;
  360. if (rq->sense_len && hdr->response) {
  361. int len = min_t(unsigned int, hdr->max_response_len,
  362. rq->sense_len);
  363. ret = copy_to_user((void*)(unsigned long)hdr->response,
  364. rq->sense, len);
  365. if (!ret)
  366. hdr->response_len = len;
  367. else
  368. ret = -EFAULT;
  369. }
  370. if (rq->next_rq) {
  371. hdr->dout_resid = rq->data_len;
  372. hdr->din_resid = rq->next_rq->data_len;
  373. blk_rq_unmap_user(bidi_bio);
  374. blk_put_request(rq->next_rq);
  375. } else if (rq_data_dir(rq) == READ)
  376. hdr->din_resid = rq->data_len;
  377. else
  378. hdr->dout_resid = rq->data_len;
  379. /*
  380. * If the request generated a negative error number, return it
  381. * (providing we aren't already returning an error); if it's
  382. * just a protocol response (i.e. non negative), that gets
  383. * processed above.
  384. */
  385. if (!ret && rq->errors < 0)
  386. ret = rq->errors;
  387. blk_rq_unmap_user(bio);
  388. if (rq->cmd != rq->__cmd)
  389. kfree(rq->cmd);
  390. blk_put_request(rq);
  391. return ret;
  392. }
  393. static int bsg_complete_all_commands(struct bsg_device *bd)
  394. {
  395. struct bsg_command *bc;
  396. int ret, tret;
  397. dprintk("%s: entered\n", bd->name);
  398. /*
  399. * wait for all commands to complete
  400. */
  401. ret = 0;
  402. do {
  403. ret = bsg_io_schedule(bd);
  404. /*
  405. * look for -ENODATA specifically -- we'll sometimes get
  406. * -ERESTARTSYS when we've taken a signal, but we can't
  407. * return until we're done freeing the queue, so ignore
  408. * it. The signal will get handled when we're done freeing
  409. * the bsg_device.
  410. */
  411. } while (ret != -ENODATA);
  412. /*
  413. * discard done commands
  414. */
  415. ret = 0;
  416. do {
  417. spin_lock_irq(&bd->lock);
  418. if (!bd->queued_cmds) {
  419. spin_unlock_irq(&bd->lock);
  420. break;
  421. }
  422. spin_unlock_irq(&bd->lock);
  423. bc = bsg_get_done_cmd(bd);
  424. if (IS_ERR(bc))
  425. break;
  426. tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
  427. bc->bidi_bio);
  428. if (!ret)
  429. ret = tret;
  430. bsg_free_command(bc);
  431. } while (1);
  432. return ret;
  433. }
  434. static int
  435. __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
  436. const struct iovec *iov, ssize_t *bytes_read)
  437. {
  438. struct bsg_command *bc;
  439. int nr_commands, ret;
  440. if (count % sizeof(struct sg_io_v4))
  441. return -EINVAL;
  442. ret = 0;
  443. nr_commands = count / sizeof(struct sg_io_v4);
  444. while (nr_commands) {
  445. bc = bsg_get_done_cmd(bd);
  446. if (IS_ERR(bc)) {
  447. ret = PTR_ERR(bc);
  448. break;
  449. }
  450. /*
  451. * this is the only case where we need to copy data back
  452. * after completing the request. so do that here,
  453. * bsg_complete_work() cannot do that for us
  454. */
  455. ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
  456. bc->bidi_bio);
  457. if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
  458. ret = -EFAULT;
  459. bsg_free_command(bc);
  460. if (ret)
  461. break;
  462. buf += sizeof(struct sg_io_v4);
  463. *bytes_read += sizeof(struct sg_io_v4);
  464. nr_commands--;
  465. }
  466. return ret;
  467. }
  468. static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
  469. {
  470. if (file->f_flags & O_NONBLOCK)
  471. clear_bit(BSG_F_BLOCK, &bd->flags);
  472. else
  473. set_bit(BSG_F_BLOCK, &bd->flags);
  474. }
  475. /*
  476. * Check if the error is a "real" error that we should return.
  477. */
  478. static inline int err_block_err(int ret)
  479. {
  480. if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
  481. return 1;
  482. return 0;
  483. }
  484. static ssize_t
  485. bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  486. {
  487. struct bsg_device *bd = file->private_data;
  488. int ret;
  489. ssize_t bytes_read;
  490. dprintk("%s: read %Zd bytes\n", bd->name, count);
  491. bsg_set_block(bd, file);
  492. bytes_read = 0;
  493. ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
  494. *ppos = bytes_read;
  495. if (!bytes_read || (bytes_read && err_block_err(ret)))
  496. bytes_read = ret;
  497. return bytes_read;
  498. }
  499. static int __bsg_write(struct bsg_device *bd, const char __user *buf,
  500. size_t count, ssize_t *bytes_written, int has_write_perm)
  501. {
  502. struct bsg_command *bc;
  503. struct request *rq;
  504. int ret, nr_commands;
  505. if (count % sizeof(struct sg_io_v4))
  506. return -EINVAL;
  507. nr_commands = count / sizeof(struct sg_io_v4);
  508. rq = NULL;
  509. bc = NULL;
  510. ret = 0;
  511. while (nr_commands) {
  512. struct request_queue *q = bd->queue;
  513. bc = bsg_alloc_command(bd);
  514. if (IS_ERR(bc)) {
  515. ret = PTR_ERR(bc);
  516. bc = NULL;
  517. break;
  518. }
  519. if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
  520. ret = -EFAULT;
  521. break;
  522. }
  523. /*
  524. * get a request, fill in the blanks, and add to request queue
  525. */
  526. rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm);
  527. if (IS_ERR(rq)) {
  528. ret = PTR_ERR(rq);
  529. rq = NULL;
  530. break;
  531. }
  532. bsg_add_command(bd, q, bc, rq);
  533. bc = NULL;
  534. rq = NULL;
  535. nr_commands--;
  536. buf += sizeof(struct sg_io_v4);
  537. *bytes_written += sizeof(struct sg_io_v4);
  538. }
  539. if (bc)
  540. bsg_free_command(bc);
  541. return ret;
  542. }
  543. static ssize_t
  544. bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  545. {
  546. struct bsg_device *bd = file->private_data;
  547. ssize_t bytes_written;
  548. int ret;
  549. dprintk("%s: write %Zd bytes\n", bd->name, count);
  550. bsg_set_block(bd, file);
  551. bytes_written = 0;
  552. ret = __bsg_write(bd, buf, count, &bytes_written,
  553. file->f_mode & FMODE_WRITE);
  554. *ppos = bytes_written;
  555. /*
  556. * return bytes written on non-fatal errors
  557. */
  558. if (!bytes_written || (bytes_written && err_block_err(ret)))
  559. bytes_written = ret;
  560. dprintk("%s: returning %Zd\n", bd->name, bytes_written);
  561. return bytes_written;
  562. }
  563. static struct bsg_device *bsg_alloc_device(void)
  564. {
  565. struct bsg_device *bd;
  566. bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
  567. if (unlikely(!bd))
  568. return NULL;
  569. spin_lock_init(&bd->lock);
  570. bd->max_queue = BSG_DEFAULT_CMDS;
  571. INIT_LIST_HEAD(&bd->busy_list);
  572. INIT_LIST_HEAD(&bd->done_list);
  573. INIT_HLIST_NODE(&bd->dev_list);
  574. init_waitqueue_head(&bd->wq_free);
  575. init_waitqueue_head(&bd->wq_done);
  576. return bd;
  577. }
  578. static void bsg_kref_release_function(struct kref *kref)
  579. {
  580. struct bsg_class_device *bcd =
  581. container_of(kref, struct bsg_class_device, ref);
  582. struct device *parent = bcd->parent;
  583. if (bcd->release)
  584. bcd->release(bcd->parent);
  585. put_device(parent);
  586. }
  587. static int bsg_put_device(struct bsg_device *bd)
  588. {
  589. int ret = 0, do_free;
  590. struct request_queue *q = bd->queue;
  591. mutex_lock(&bsg_mutex);
  592. do_free = atomic_dec_and_test(&bd->ref_count);
  593. if (!do_free) {
  594. mutex_unlock(&bsg_mutex);
  595. goto out;
  596. }
  597. hlist_del(&bd->dev_list);
  598. mutex_unlock(&bsg_mutex);
  599. dprintk("%s: tearing down\n", bd->name);
  600. /*
  601. * close can always block
  602. */
  603. set_bit(BSG_F_BLOCK, &bd->flags);
  604. /*
  605. * correct error detection baddies here again. it's the responsibility
  606. * of the app to properly reap commands before close() if it wants
  607. * fool-proof error detection
  608. */
  609. ret = bsg_complete_all_commands(bd);
  610. kfree(bd);
  611. out:
  612. kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
  613. if (do_free)
  614. blk_put_queue(q);
  615. return ret;
  616. }
  617. static struct bsg_device *bsg_add_device(struct inode *inode,
  618. struct request_queue *rq,
  619. struct file *file)
  620. {
  621. struct bsg_device *bd;
  622. int ret;
  623. #ifdef BSG_DEBUG
  624. unsigned char buf[32];
  625. #endif
  626. ret = blk_get_queue(rq);
  627. if (ret)
  628. return ERR_PTR(-ENXIO);
  629. bd = bsg_alloc_device();
  630. if (!bd) {
  631. blk_put_queue(rq);
  632. return ERR_PTR(-ENOMEM);
  633. }
  634. bd->queue = rq;
  635. bsg_set_block(bd, file);
  636. atomic_set(&bd->ref_count, 1);
  637. mutex_lock(&bsg_mutex);
  638. hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
  639. strncpy(bd->name, rq->bsg_dev.class_dev->bus_id, sizeof(bd->name) - 1);
  640. dprintk("bound to <%s>, max queue %d\n",
  641. format_dev_t(buf, inode->i_rdev), bd->max_queue);
  642. mutex_unlock(&bsg_mutex);
  643. return bd;
  644. }
  645. static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
  646. {
  647. struct bsg_device *bd;
  648. struct hlist_node *entry;
  649. mutex_lock(&bsg_mutex);
  650. hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
  651. if (bd->queue == q) {
  652. atomic_inc(&bd->ref_count);
  653. goto found;
  654. }
  655. }
  656. bd = NULL;
  657. found:
  658. mutex_unlock(&bsg_mutex);
  659. return bd;
  660. }
  661. static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
  662. {
  663. struct bsg_device *bd;
  664. struct bsg_class_device *bcd;
  665. /*
  666. * find the class device
  667. */
  668. mutex_lock(&bsg_mutex);
  669. bcd = idr_find(&bsg_minor_idr, iminor(inode));
  670. if (bcd)
  671. kref_get(&bcd->ref);
  672. mutex_unlock(&bsg_mutex);
  673. if (!bcd)
  674. return ERR_PTR(-ENODEV);
  675. bd = __bsg_get_device(iminor(inode), bcd->queue);
  676. if (bd)
  677. return bd;
  678. bd = bsg_add_device(inode, bcd->queue, file);
  679. if (IS_ERR(bd))
  680. kref_put(&bcd->ref, bsg_kref_release_function);
  681. return bd;
  682. }
  683. static int bsg_open(struct inode *inode, struct file *file)
  684. {
  685. struct bsg_device *bd;
  686. lock_kernel();
  687. bd = bsg_get_device(inode, file);
  688. unlock_kernel();
  689. if (IS_ERR(bd))
  690. return PTR_ERR(bd);
  691. file->private_data = bd;
  692. return 0;
  693. }
  694. static int bsg_release(struct inode *inode, struct file *file)
  695. {
  696. struct bsg_device *bd = file->private_data;
  697. file->private_data = NULL;
  698. return bsg_put_device(bd);
  699. }
  700. static unsigned int bsg_poll(struct file *file, poll_table *wait)
  701. {
  702. struct bsg_device *bd = file->private_data;
  703. unsigned int mask = 0;
  704. poll_wait(file, &bd->wq_done, wait);
  705. poll_wait(file, &bd->wq_free, wait);
  706. spin_lock_irq(&bd->lock);
  707. if (!list_empty(&bd->done_list))
  708. mask |= POLLIN | POLLRDNORM;
  709. if (bd->queued_cmds >= bd->max_queue)
  710. mask |= POLLOUT;
  711. spin_unlock_irq(&bd->lock);
  712. return mask;
  713. }
  714. static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  715. {
  716. struct bsg_device *bd = file->private_data;
  717. int __user *uarg = (int __user *) arg;
  718. int ret;
  719. switch (cmd) {
  720. /*
  721. * our own ioctls
  722. */
  723. case SG_GET_COMMAND_Q:
  724. return put_user(bd->max_queue, uarg);
  725. case SG_SET_COMMAND_Q: {
  726. int queue;
  727. if (get_user(queue, uarg))
  728. return -EFAULT;
  729. if (queue < 1)
  730. return -EINVAL;
  731. spin_lock_irq(&bd->lock);
  732. bd->max_queue = queue;
  733. spin_unlock_irq(&bd->lock);
  734. return 0;
  735. }
  736. /*
  737. * SCSI/sg ioctls
  738. */
  739. case SG_GET_VERSION_NUM:
  740. case SCSI_IOCTL_GET_IDLUN:
  741. case SCSI_IOCTL_GET_BUS_NUMBER:
  742. case SG_SET_TIMEOUT:
  743. case SG_GET_TIMEOUT:
  744. case SG_GET_RESERVED_SIZE:
  745. case SG_SET_RESERVED_SIZE:
  746. case SG_EMULATED_HOST:
  747. case SCSI_IOCTL_SEND_COMMAND: {
  748. void __user *uarg = (void __user *) arg;
  749. return scsi_cmd_ioctl(file, bd->queue, NULL, cmd, uarg);
  750. }
  751. case SG_IO: {
  752. struct request *rq;
  753. struct bio *bio, *bidi_bio = NULL;
  754. struct sg_io_v4 hdr;
  755. if (copy_from_user(&hdr, uarg, sizeof(hdr)))
  756. return -EFAULT;
  757. rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE);
  758. if (IS_ERR(rq))
  759. return PTR_ERR(rq);
  760. bio = rq->bio;
  761. if (rq->next_rq)
  762. bidi_bio = rq->next_rq->bio;
  763. blk_execute_rq(bd->queue, NULL, rq, 0);
  764. ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
  765. if (copy_to_user(uarg, &hdr, sizeof(hdr)))
  766. return -EFAULT;
  767. return ret;
  768. }
  769. /*
  770. * block device ioctls
  771. */
  772. default:
  773. #if 0
  774. return ioctl_by_bdev(bd->bdev, cmd, arg);
  775. #else
  776. return -ENOTTY;
  777. #endif
  778. }
  779. }
  780. static const struct file_operations bsg_fops = {
  781. .read = bsg_read,
  782. .write = bsg_write,
  783. .poll = bsg_poll,
  784. .open = bsg_open,
  785. .release = bsg_release,
  786. .unlocked_ioctl = bsg_ioctl,
  787. .owner = THIS_MODULE,
  788. };
  789. void bsg_unregister_queue(struct request_queue *q)
  790. {
  791. struct bsg_class_device *bcd = &q->bsg_dev;
  792. if (!bcd->class_dev)
  793. return;
  794. mutex_lock(&bsg_mutex);
  795. idr_remove(&bsg_minor_idr, bcd->minor);
  796. sysfs_remove_link(&q->kobj, "bsg");
  797. device_unregister(bcd->class_dev);
  798. bcd->class_dev = NULL;
  799. kref_put(&bcd->ref, bsg_kref_release_function);
  800. mutex_unlock(&bsg_mutex);
  801. }
  802. EXPORT_SYMBOL_GPL(bsg_unregister_queue);
  803. int bsg_register_queue(struct request_queue *q, struct device *parent,
  804. const char *name, void (*release)(struct device *))
  805. {
  806. struct bsg_class_device *bcd;
  807. dev_t dev;
  808. int ret, minor;
  809. struct device *class_dev = NULL;
  810. const char *devname;
  811. if (name)
  812. devname = name;
  813. else
  814. devname = parent->bus_id;
  815. /*
  816. * we need a proper transport to send commands, not a stacked device
  817. */
  818. if (!q->request_fn)
  819. return 0;
  820. bcd = &q->bsg_dev;
  821. memset(bcd, 0, sizeof(*bcd));
  822. mutex_lock(&bsg_mutex);
  823. ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
  824. if (!ret) {
  825. ret = -ENOMEM;
  826. goto unlock;
  827. }
  828. ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
  829. if (ret < 0)
  830. goto unlock;
  831. if (minor >= BSG_MAX_DEVS) {
  832. printk(KERN_ERR "bsg: too many bsg devices\n");
  833. ret = -EINVAL;
  834. goto remove_idr;
  835. }
  836. bcd->minor = minor;
  837. bcd->queue = q;
  838. bcd->parent = get_device(parent);
  839. bcd->release = release;
  840. kref_init(&bcd->ref);
  841. dev = MKDEV(bsg_major, bcd->minor);
  842. class_dev = device_create_drvdata(bsg_class, parent, dev, NULL,
  843. "%s", devname);
  844. if (IS_ERR(class_dev)) {
  845. ret = PTR_ERR(class_dev);
  846. goto put_dev;
  847. }
  848. bcd->class_dev = class_dev;
  849. if (q->kobj.sd) {
  850. ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
  851. if (ret)
  852. goto unregister_class_dev;
  853. }
  854. mutex_unlock(&bsg_mutex);
  855. return 0;
  856. unregister_class_dev:
  857. device_unregister(class_dev);
  858. put_dev:
  859. put_device(parent);
  860. remove_idr:
  861. idr_remove(&bsg_minor_idr, minor);
  862. unlock:
  863. mutex_unlock(&bsg_mutex);
  864. return ret;
  865. }
  866. EXPORT_SYMBOL_GPL(bsg_register_queue);
  867. static struct cdev bsg_cdev;
  868. static int __init bsg_init(void)
  869. {
  870. int ret, i;
  871. dev_t devid;
  872. bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
  873. sizeof(struct bsg_command), 0, 0, NULL);
  874. if (!bsg_cmd_cachep) {
  875. printk(KERN_ERR "bsg: failed creating slab cache\n");
  876. return -ENOMEM;
  877. }
  878. for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
  879. INIT_HLIST_HEAD(&bsg_device_list[i]);
  880. bsg_class = class_create(THIS_MODULE, "bsg");
  881. if (IS_ERR(bsg_class)) {
  882. ret = PTR_ERR(bsg_class);
  883. goto destroy_kmemcache;
  884. }
  885. ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
  886. if (ret)
  887. goto destroy_bsg_class;
  888. bsg_major = MAJOR(devid);
  889. cdev_init(&bsg_cdev, &bsg_fops);
  890. ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
  891. if (ret)
  892. goto unregister_chrdev;
  893. printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
  894. " loaded (major %d)\n", bsg_major);
  895. return 0;
  896. unregister_chrdev:
  897. unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
  898. destroy_bsg_class:
  899. class_destroy(bsg_class);
  900. destroy_kmemcache:
  901. kmem_cache_destroy(bsg_cmd_cachep);
  902. return ret;
  903. }
  904. MODULE_AUTHOR("Jens Axboe");
  905. MODULE_DESCRIPTION(BSG_DESCRIPTION);
  906. MODULE_LICENSE("GPL");
  907. device_initcall(bsg_init);