bsg.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122
  1. /*
  2. * bsg.c - block layer implementation of the sg v4 interface
  3. *
  4. * Copyright (C) 2004 Jens Axboe <axboe@suse.de> SUSE Labs
  5. * Copyright (C) 2004 Peter M. Jones <pjones@redhat.com>
  6. *
  7. * This file is subject to the terms and conditions of the GNU General Public
  8. * License version 2. See the file "COPYING" in the main directory of this
  9. * archive for more details.
  10. *
  11. */
  12. #include <linux/module.h>
  13. #include <linux/init.h>
  14. #include <linux/file.h>
  15. #include <linux/blkdev.h>
  16. #include <linux/poll.h>
  17. #include <linux/cdev.h>
  18. #include <linux/percpu.h>
  19. #include <linux/uio.h>
  20. #include <linux/idr.h>
  21. #include <linux/bsg.h>
  22. #include <linux/smp_lock.h>
  23. #include <scsi/scsi.h>
  24. #include <scsi/scsi_ioctl.h>
  25. #include <scsi/scsi_cmnd.h>
  26. #include <scsi/scsi_device.h>
  27. #include <scsi/scsi_driver.h>
  28. #include <scsi/sg.h>
  29. #define BSG_DESCRIPTION "Block layer SCSI generic (bsg) driver"
  30. #define BSG_VERSION "0.4"
  31. struct bsg_device {
  32. struct request_queue *queue;
  33. spinlock_t lock;
  34. struct list_head busy_list;
  35. struct list_head done_list;
  36. struct hlist_node dev_list;
  37. atomic_t ref_count;
  38. int queued_cmds;
  39. int done_cmds;
  40. wait_queue_head_t wq_done;
  41. wait_queue_head_t wq_free;
  42. char name[20];
  43. int max_queue;
  44. unsigned long flags;
  45. };
  46. enum {
  47. BSG_F_BLOCK = 1,
  48. };
  49. #define BSG_DEFAULT_CMDS 64
  50. #define BSG_MAX_DEVS 32768
  51. #undef BSG_DEBUG
  52. #ifdef BSG_DEBUG
  53. #define dprintk(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ##args)
  54. #else
  55. #define dprintk(fmt, args...)
  56. #endif
  57. static DEFINE_MUTEX(bsg_mutex);
  58. static DEFINE_IDR(bsg_minor_idr);
  59. #define BSG_LIST_ARRAY_SIZE 8
  60. static struct hlist_head bsg_device_list[BSG_LIST_ARRAY_SIZE];
  61. static struct class *bsg_class;
  62. static int bsg_major;
  63. static struct kmem_cache *bsg_cmd_cachep;
  64. /*
  65. * our internal command type
  66. */
  67. struct bsg_command {
  68. struct bsg_device *bd;
  69. struct list_head list;
  70. struct request *rq;
  71. struct bio *bio;
  72. struct bio *bidi_bio;
  73. int err;
  74. struct sg_io_v4 hdr;
  75. char sense[SCSI_SENSE_BUFFERSIZE];
  76. };
  77. static void bsg_free_command(struct bsg_command *bc)
  78. {
  79. struct bsg_device *bd = bc->bd;
  80. unsigned long flags;
  81. kmem_cache_free(bsg_cmd_cachep, bc);
  82. spin_lock_irqsave(&bd->lock, flags);
  83. bd->queued_cmds--;
  84. spin_unlock_irqrestore(&bd->lock, flags);
  85. wake_up(&bd->wq_free);
  86. }
  87. static struct bsg_command *bsg_alloc_command(struct bsg_device *bd)
  88. {
  89. struct bsg_command *bc = ERR_PTR(-EINVAL);
  90. spin_lock_irq(&bd->lock);
  91. if (bd->queued_cmds >= bd->max_queue)
  92. goto out;
  93. bd->queued_cmds++;
  94. spin_unlock_irq(&bd->lock);
  95. bc = kmem_cache_zalloc(bsg_cmd_cachep, GFP_KERNEL);
  96. if (unlikely(!bc)) {
  97. spin_lock_irq(&bd->lock);
  98. bd->queued_cmds--;
  99. bc = ERR_PTR(-ENOMEM);
  100. goto out;
  101. }
  102. bc->bd = bd;
  103. INIT_LIST_HEAD(&bc->list);
  104. dprintk("%s: returning free cmd %p\n", bd->name, bc);
  105. return bc;
  106. out:
  107. spin_unlock_irq(&bd->lock);
  108. return bc;
  109. }
  110. static inline struct hlist_head *bsg_dev_idx_hash(int index)
  111. {
  112. return &bsg_device_list[index & (BSG_LIST_ARRAY_SIZE - 1)];
  113. }
  114. static int bsg_io_schedule(struct bsg_device *bd)
  115. {
  116. DEFINE_WAIT(wait);
  117. int ret = 0;
  118. spin_lock_irq(&bd->lock);
  119. BUG_ON(bd->done_cmds > bd->queued_cmds);
  120. /*
  121. * -ENOSPC or -ENODATA? I'm going for -ENODATA, meaning "I have no
  122. * work to do", even though we return -ENOSPC after this same test
  123. * during bsg_write() -- there, it means our buffer can't have more
  124. * bsg_commands added to it, thus has no space left.
  125. */
  126. if (bd->done_cmds == bd->queued_cmds) {
  127. ret = -ENODATA;
  128. goto unlock;
  129. }
  130. if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
  131. ret = -EAGAIN;
  132. goto unlock;
  133. }
  134. prepare_to_wait(&bd->wq_done, &wait, TASK_UNINTERRUPTIBLE);
  135. spin_unlock_irq(&bd->lock);
  136. io_schedule();
  137. finish_wait(&bd->wq_done, &wait);
  138. return ret;
  139. unlock:
  140. spin_unlock_irq(&bd->lock);
  141. return ret;
  142. }
  143. static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
  144. struct sg_io_v4 *hdr, struct bsg_device *bd,
  145. fmode_t has_write_perm)
  146. {
  147. if (hdr->request_len > BLK_MAX_CDB) {
  148. rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
  149. if (!rq->cmd)
  150. return -ENOMEM;
  151. }
  152. if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
  153. hdr->request_len))
  154. return -EFAULT;
  155. if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
  156. if (blk_verify_command(&q->cmd_filter, rq->cmd, has_write_perm))
  157. return -EPERM;
  158. } else if (!capable(CAP_SYS_RAWIO))
  159. return -EPERM;
  160. /*
  161. * fill in request structure
  162. */
  163. rq->cmd_len = hdr->request_len;
  164. rq->cmd_type = REQ_TYPE_BLOCK_PC;
  165. rq->timeout = (hdr->timeout * HZ) / 1000;
  166. if (!rq->timeout)
  167. rq->timeout = q->sg_timeout;
  168. if (!rq->timeout)
  169. rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
  170. if (rq->timeout < BLK_MIN_SG_TIMEOUT)
  171. rq->timeout = BLK_MIN_SG_TIMEOUT;
  172. return 0;
  173. }
  174. /*
  175. * Check if sg_io_v4 from user is allowed and valid
  176. */
  177. static int
  178. bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw)
  179. {
  180. int ret = 0;
  181. if (hdr->guard != 'Q')
  182. return -EINVAL;
  183. switch (hdr->protocol) {
  184. case BSG_PROTOCOL_SCSI:
  185. switch (hdr->subprotocol) {
  186. case BSG_SUB_PROTOCOL_SCSI_CMD:
  187. case BSG_SUB_PROTOCOL_SCSI_TRANSPORT:
  188. break;
  189. default:
  190. ret = -EINVAL;
  191. }
  192. break;
  193. default:
  194. ret = -EINVAL;
  195. }
  196. *rw = hdr->dout_xfer_len ? WRITE : READ;
  197. return ret;
  198. }
  199. /*
  200. * map sg_io_v4 to a request.
  201. */
  202. static struct request *
  203. bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
  204. u8 *sense)
  205. {
  206. struct request_queue *q = bd->queue;
  207. struct request *rq, *next_rq = NULL;
  208. int ret, rw;
  209. unsigned int dxfer_len;
  210. void *dxferp = NULL;
  211. dprintk("map hdr %llx/%u %llx/%u\n", (unsigned long long) hdr->dout_xferp,
  212. hdr->dout_xfer_len, (unsigned long long) hdr->din_xferp,
  213. hdr->din_xfer_len);
  214. ret = bsg_validate_sgv4_hdr(q, hdr, &rw);
  215. if (ret)
  216. return ERR_PTR(ret);
  217. /*
  218. * map scatter-gather elements seperately and string them to request
  219. */
  220. rq = blk_get_request(q, rw, GFP_KERNEL);
  221. if (!rq)
  222. return ERR_PTR(-ENOMEM);
  223. ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm);
  224. if (ret)
  225. goto out;
  226. if (rw == WRITE && hdr->din_xfer_len) {
  227. if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) {
  228. ret = -EOPNOTSUPP;
  229. goto out;
  230. }
  231. next_rq = blk_get_request(q, READ, GFP_KERNEL);
  232. if (!next_rq) {
  233. ret = -ENOMEM;
  234. goto out;
  235. }
  236. rq->next_rq = next_rq;
  237. next_rq->cmd_type = rq->cmd_type;
  238. dxferp = (void*)(unsigned long)hdr->din_xferp;
  239. ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
  240. hdr->din_xfer_len, GFP_KERNEL);
  241. if (ret)
  242. goto out;
  243. }
  244. if (hdr->dout_xfer_len) {
  245. dxfer_len = hdr->dout_xfer_len;
  246. dxferp = (void*)(unsigned long)hdr->dout_xferp;
  247. } else if (hdr->din_xfer_len) {
  248. dxfer_len = hdr->din_xfer_len;
  249. dxferp = (void*)(unsigned long)hdr->din_xferp;
  250. } else
  251. dxfer_len = 0;
  252. if (dxfer_len) {
  253. ret = blk_rq_map_user(q, rq, NULL, dxferp, dxfer_len,
  254. GFP_KERNEL);
  255. if (ret)
  256. goto out;
  257. }
  258. rq->sense = sense;
  259. rq->sense_len = 0;
  260. return rq;
  261. out:
  262. if (rq->cmd != rq->__cmd)
  263. kfree(rq->cmd);
  264. blk_put_request(rq);
  265. if (next_rq) {
  266. blk_rq_unmap_user(next_rq->bio);
  267. next_rq->bio = NULL;
  268. blk_put_request(next_rq);
  269. }
  270. return ERR_PTR(ret);
  271. }
  272. /*
  273. * async completion call-back from the block layer, when scsi/ide/whatever
  274. * calls end_that_request_last() on a request
  275. */
  276. static void bsg_rq_end_io(struct request *rq, int uptodate)
  277. {
  278. struct bsg_command *bc = rq->end_io_data;
  279. struct bsg_device *bd = bc->bd;
  280. unsigned long flags;
  281. dprintk("%s: finished rq %p bc %p, bio %p stat %d\n",
  282. bd->name, rq, bc, bc->bio, uptodate);
  283. bc->hdr.duration = jiffies_to_msecs(jiffies - bc->hdr.duration);
  284. spin_lock_irqsave(&bd->lock, flags);
  285. list_move_tail(&bc->list, &bd->done_list);
  286. bd->done_cmds++;
  287. spin_unlock_irqrestore(&bd->lock, flags);
  288. wake_up(&bd->wq_done);
  289. }
  290. /*
  291. * do final setup of a 'bc' and submit the matching 'rq' to the block
  292. * layer for io
  293. */
  294. static void bsg_add_command(struct bsg_device *bd, struct request_queue *q,
  295. struct bsg_command *bc, struct request *rq)
  296. {
  297. int at_head = (0 == (bc->hdr.flags & BSG_FLAG_Q_AT_TAIL));
  298. /*
  299. * add bc command to busy queue and submit rq for io
  300. */
  301. bc->rq = rq;
  302. bc->bio = rq->bio;
  303. if (rq->next_rq)
  304. bc->bidi_bio = rq->next_rq->bio;
  305. bc->hdr.duration = jiffies;
  306. spin_lock_irq(&bd->lock);
  307. list_add_tail(&bc->list, &bd->busy_list);
  308. spin_unlock_irq(&bd->lock);
  309. dprintk("%s: queueing rq %p, bc %p\n", bd->name, rq, bc);
  310. rq->end_io_data = bc;
  311. blk_execute_rq_nowait(q, NULL, rq, at_head, bsg_rq_end_io);
  312. }
  313. static struct bsg_command *bsg_next_done_cmd(struct bsg_device *bd)
  314. {
  315. struct bsg_command *bc = NULL;
  316. spin_lock_irq(&bd->lock);
  317. if (bd->done_cmds) {
  318. bc = list_first_entry(&bd->done_list, struct bsg_command, list);
  319. list_del(&bc->list);
  320. bd->done_cmds--;
  321. }
  322. spin_unlock_irq(&bd->lock);
  323. return bc;
  324. }
  325. /*
  326. * Get a finished command from the done list
  327. */
  328. static struct bsg_command *bsg_get_done_cmd(struct bsg_device *bd)
  329. {
  330. struct bsg_command *bc;
  331. int ret;
  332. do {
  333. bc = bsg_next_done_cmd(bd);
  334. if (bc)
  335. break;
  336. if (!test_bit(BSG_F_BLOCK, &bd->flags)) {
  337. bc = ERR_PTR(-EAGAIN);
  338. break;
  339. }
  340. ret = wait_event_interruptible(bd->wq_done, bd->done_cmds);
  341. if (ret) {
  342. bc = ERR_PTR(-ERESTARTSYS);
  343. break;
  344. }
  345. } while (1);
  346. dprintk("%s: returning done %p\n", bd->name, bc);
  347. return bc;
  348. }
  349. static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
  350. struct bio *bio, struct bio *bidi_bio)
  351. {
  352. int ret = 0;
  353. dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors);
  354. /*
  355. * fill in all the output members
  356. */
  357. hdr->device_status = status_byte(rq->errors);
  358. hdr->transport_status = host_byte(rq->errors);
  359. hdr->driver_status = driver_byte(rq->errors);
  360. hdr->info = 0;
  361. if (hdr->device_status || hdr->transport_status || hdr->driver_status)
  362. hdr->info |= SG_INFO_CHECK;
  363. hdr->response_len = 0;
  364. if (rq->sense_len && hdr->response) {
  365. int len = min_t(unsigned int, hdr->max_response_len,
  366. rq->sense_len);
  367. ret = copy_to_user((void*)(unsigned long)hdr->response,
  368. rq->sense, len);
  369. if (!ret)
  370. hdr->response_len = len;
  371. else
  372. ret = -EFAULT;
  373. }
  374. if (rq->next_rq) {
  375. hdr->dout_resid = rq->resid_len;
  376. hdr->din_resid = rq->next_rq->resid_len;
  377. blk_rq_unmap_user(bidi_bio);
  378. rq->next_rq->bio = NULL;
  379. blk_put_request(rq->next_rq);
  380. } else if (rq_data_dir(rq) == READ)
  381. hdr->din_resid = rq->resid_len;
  382. else
  383. hdr->dout_resid = rq->resid_len;
  384. /*
  385. * If the request generated a negative error number, return it
  386. * (providing we aren't already returning an error); if it's
  387. * just a protocol response (i.e. non negative), that gets
  388. * processed above.
  389. */
  390. if (!ret && rq->errors < 0)
  391. ret = rq->errors;
  392. blk_rq_unmap_user(bio);
  393. if (rq->cmd != rq->__cmd)
  394. kfree(rq->cmd);
  395. rq->bio = NULL;
  396. blk_put_request(rq);
  397. return ret;
  398. }
  399. static int bsg_complete_all_commands(struct bsg_device *bd)
  400. {
  401. struct bsg_command *bc;
  402. int ret, tret;
  403. dprintk("%s: entered\n", bd->name);
  404. /*
  405. * wait for all commands to complete
  406. */
  407. ret = 0;
  408. do {
  409. ret = bsg_io_schedule(bd);
  410. /*
  411. * look for -ENODATA specifically -- we'll sometimes get
  412. * -ERESTARTSYS when we've taken a signal, but we can't
  413. * return until we're done freeing the queue, so ignore
  414. * it. The signal will get handled when we're done freeing
  415. * the bsg_device.
  416. */
  417. } while (ret != -ENODATA);
  418. /*
  419. * discard done commands
  420. */
  421. ret = 0;
  422. do {
  423. spin_lock_irq(&bd->lock);
  424. if (!bd->queued_cmds) {
  425. spin_unlock_irq(&bd->lock);
  426. break;
  427. }
  428. spin_unlock_irq(&bd->lock);
  429. bc = bsg_get_done_cmd(bd);
  430. if (IS_ERR(bc))
  431. break;
  432. tret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
  433. bc->bidi_bio);
  434. if (!ret)
  435. ret = tret;
  436. bsg_free_command(bc);
  437. } while (1);
  438. return ret;
  439. }
  440. static int
  441. __bsg_read(char __user *buf, size_t count, struct bsg_device *bd,
  442. const struct iovec *iov, ssize_t *bytes_read)
  443. {
  444. struct bsg_command *bc;
  445. int nr_commands, ret;
  446. if (count % sizeof(struct sg_io_v4))
  447. return -EINVAL;
  448. ret = 0;
  449. nr_commands = count / sizeof(struct sg_io_v4);
  450. while (nr_commands) {
  451. bc = bsg_get_done_cmd(bd);
  452. if (IS_ERR(bc)) {
  453. ret = PTR_ERR(bc);
  454. break;
  455. }
  456. /*
  457. * this is the only case where we need to copy data back
  458. * after completing the request. so do that here,
  459. * bsg_complete_work() cannot do that for us
  460. */
  461. ret = blk_complete_sgv4_hdr_rq(bc->rq, &bc->hdr, bc->bio,
  462. bc->bidi_bio);
  463. if (copy_to_user(buf, &bc->hdr, sizeof(bc->hdr)))
  464. ret = -EFAULT;
  465. bsg_free_command(bc);
  466. if (ret)
  467. break;
  468. buf += sizeof(struct sg_io_v4);
  469. *bytes_read += sizeof(struct sg_io_v4);
  470. nr_commands--;
  471. }
  472. return ret;
  473. }
  474. static inline void bsg_set_block(struct bsg_device *bd, struct file *file)
  475. {
  476. if (file->f_flags & O_NONBLOCK)
  477. clear_bit(BSG_F_BLOCK, &bd->flags);
  478. else
  479. set_bit(BSG_F_BLOCK, &bd->flags);
  480. }
  481. /*
  482. * Check if the error is a "real" error that we should return.
  483. */
  484. static inline int err_block_err(int ret)
  485. {
  486. if (ret && ret != -ENOSPC && ret != -ENODATA && ret != -EAGAIN)
  487. return 1;
  488. return 0;
  489. }
  490. static ssize_t
  491. bsg_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
  492. {
  493. struct bsg_device *bd = file->private_data;
  494. int ret;
  495. ssize_t bytes_read;
  496. dprintk("%s: read %Zd bytes\n", bd->name, count);
  497. bsg_set_block(bd, file);
  498. bytes_read = 0;
  499. ret = __bsg_read(buf, count, bd, NULL, &bytes_read);
  500. *ppos = bytes_read;
  501. if (!bytes_read || (bytes_read && err_block_err(ret)))
  502. bytes_read = ret;
  503. return bytes_read;
  504. }
  505. static int __bsg_write(struct bsg_device *bd, const char __user *buf,
  506. size_t count, ssize_t *bytes_written,
  507. fmode_t has_write_perm)
  508. {
  509. struct bsg_command *bc;
  510. struct request *rq;
  511. int ret, nr_commands;
  512. if (count % sizeof(struct sg_io_v4))
  513. return -EINVAL;
  514. nr_commands = count / sizeof(struct sg_io_v4);
  515. rq = NULL;
  516. bc = NULL;
  517. ret = 0;
  518. while (nr_commands) {
  519. struct request_queue *q = bd->queue;
  520. bc = bsg_alloc_command(bd);
  521. if (IS_ERR(bc)) {
  522. ret = PTR_ERR(bc);
  523. bc = NULL;
  524. break;
  525. }
  526. if (copy_from_user(&bc->hdr, buf, sizeof(bc->hdr))) {
  527. ret = -EFAULT;
  528. break;
  529. }
  530. /*
  531. * get a request, fill in the blanks, and add to request queue
  532. */
  533. rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense);
  534. if (IS_ERR(rq)) {
  535. ret = PTR_ERR(rq);
  536. rq = NULL;
  537. break;
  538. }
  539. bsg_add_command(bd, q, bc, rq);
  540. bc = NULL;
  541. rq = NULL;
  542. nr_commands--;
  543. buf += sizeof(struct sg_io_v4);
  544. *bytes_written += sizeof(struct sg_io_v4);
  545. }
  546. if (bc)
  547. bsg_free_command(bc);
  548. return ret;
  549. }
  550. static ssize_t
  551. bsg_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
  552. {
  553. struct bsg_device *bd = file->private_data;
  554. ssize_t bytes_written;
  555. int ret;
  556. dprintk("%s: write %Zd bytes\n", bd->name, count);
  557. bsg_set_block(bd, file);
  558. bytes_written = 0;
  559. ret = __bsg_write(bd, buf, count, &bytes_written,
  560. file->f_mode & FMODE_WRITE);
  561. *ppos = bytes_written;
  562. /*
  563. * return bytes written on non-fatal errors
  564. */
  565. if (!bytes_written || (bytes_written && err_block_err(ret)))
  566. bytes_written = ret;
  567. dprintk("%s: returning %Zd\n", bd->name, bytes_written);
  568. return bytes_written;
  569. }
  570. static struct bsg_device *bsg_alloc_device(void)
  571. {
  572. struct bsg_device *bd;
  573. bd = kzalloc(sizeof(struct bsg_device), GFP_KERNEL);
  574. if (unlikely(!bd))
  575. return NULL;
  576. spin_lock_init(&bd->lock);
  577. bd->max_queue = BSG_DEFAULT_CMDS;
  578. INIT_LIST_HEAD(&bd->busy_list);
  579. INIT_LIST_HEAD(&bd->done_list);
  580. INIT_HLIST_NODE(&bd->dev_list);
  581. init_waitqueue_head(&bd->wq_free);
  582. init_waitqueue_head(&bd->wq_done);
  583. return bd;
  584. }
  585. static void bsg_kref_release_function(struct kref *kref)
  586. {
  587. struct bsg_class_device *bcd =
  588. container_of(kref, struct bsg_class_device, ref);
  589. struct device *parent = bcd->parent;
  590. if (bcd->release)
  591. bcd->release(bcd->parent);
  592. put_device(parent);
  593. }
  594. static int bsg_put_device(struct bsg_device *bd)
  595. {
  596. int ret = 0, do_free;
  597. struct request_queue *q = bd->queue;
  598. mutex_lock(&bsg_mutex);
  599. do_free = atomic_dec_and_test(&bd->ref_count);
  600. if (!do_free) {
  601. mutex_unlock(&bsg_mutex);
  602. goto out;
  603. }
  604. hlist_del(&bd->dev_list);
  605. mutex_unlock(&bsg_mutex);
  606. dprintk("%s: tearing down\n", bd->name);
  607. /*
  608. * close can always block
  609. */
  610. set_bit(BSG_F_BLOCK, &bd->flags);
  611. /*
  612. * correct error detection baddies here again. it's the responsibility
  613. * of the app to properly reap commands before close() if it wants
  614. * fool-proof error detection
  615. */
  616. ret = bsg_complete_all_commands(bd);
  617. kfree(bd);
  618. out:
  619. kref_put(&q->bsg_dev.ref, bsg_kref_release_function);
  620. if (do_free)
  621. blk_put_queue(q);
  622. return ret;
  623. }
  624. static struct bsg_device *bsg_add_device(struct inode *inode,
  625. struct request_queue *rq,
  626. struct file *file)
  627. {
  628. struct bsg_device *bd;
  629. int ret;
  630. #ifdef BSG_DEBUG
  631. unsigned char buf[32];
  632. #endif
  633. ret = blk_get_queue(rq);
  634. if (ret)
  635. return ERR_PTR(-ENXIO);
  636. bd = bsg_alloc_device();
  637. if (!bd) {
  638. blk_put_queue(rq);
  639. return ERR_PTR(-ENOMEM);
  640. }
  641. bd->queue = rq;
  642. bsg_set_block(bd, file);
  643. atomic_set(&bd->ref_count, 1);
  644. mutex_lock(&bsg_mutex);
  645. hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode)));
  646. strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1);
  647. dprintk("bound to <%s>, max queue %d\n",
  648. format_dev_t(buf, inode->i_rdev), bd->max_queue);
  649. mutex_unlock(&bsg_mutex);
  650. return bd;
  651. }
  652. static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q)
  653. {
  654. struct bsg_device *bd;
  655. struct hlist_node *entry;
  656. mutex_lock(&bsg_mutex);
  657. hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) {
  658. if (bd->queue == q) {
  659. atomic_inc(&bd->ref_count);
  660. goto found;
  661. }
  662. }
  663. bd = NULL;
  664. found:
  665. mutex_unlock(&bsg_mutex);
  666. return bd;
  667. }
  668. static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file)
  669. {
  670. struct bsg_device *bd;
  671. struct bsg_class_device *bcd;
  672. /*
  673. * find the class device
  674. */
  675. mutex_lock(&bsg_mutex);
  676. bcd = idr_find(&bsg_minor_idr, iminor(inode));
  677. if (bcd)
  678. kref_get(&bcd->ref);
  679. mutex_unlock(&bsg_mutex);
  680. if (!bcd)
  681. return ERR_PTR(-ENODEV);
  682. bd = __bsg_get_device(iminor(inode), bcd->queue);
  683. if (bd)
  684. return bd;
  685. bd = bsg_add_device(inode, bcd->queue, file);
  686. if (IS_ERR(bd))
  687. kref_put(&bcd->ref, bsg_kref_release_function);
  688. return bd;
  689. }
  690. static int bsg_open(struct inode *inode, struct file *file)
  691. {
  692. struct bsg_device *bd;
  693. lock_kernel();
  694. bd = bsg_get_device(inode, file);
  695. unlock_kernel();
  696. if (IS_ERR(bd))
  697. return PTR_ERR(bd);
  698. file->private_data = bd;
  699. return 0;
  700. }
  701. static int bsg_release(struct inode *inode, struct file *file)
  702. {
  703. struct bsg_device *bd = file->private_data;
  704. file->private_data = NULL;
  705. return bsg_put_device(bd);
  706. }
  707. static unsigned int bsg_poll(struct file *file, poll_table *wait)
  708. {
  709. struct bsg_device *bd = file->private_data;
  710. unsigned int mask = 0;
  711. poll_wait(file, &bd->wq_done, wait);
  712. poll_wait(file, &bd->wq_free, wait);
  713. spin_lock_irq(&bd->lock);
  714. if (!list_empty(&bd->done_list))
  715. mask |= POLLIN | POLLRDNORM;
  716. if (bd->queued_cmds >= bd->max_queue)
  717. mask |= POLLOUT;
  718. spin_unlock_irq(&bd->lock);
  719. return mask;
  720. }
  721. static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
  722. {
  723. struct bsg_device *bd = file->private_data;
  724. int __user *uarg = (int __user *) arg;
  725. int ret;
  726. switch (cmd) {
  727. /*
  728. * our own ioctls
  729. */
  730. case SG_GET_COMMAND_Q:
  731. return put_user(bd->max_queue, uarg);
  732. case SG_SET_COMMAND_Q: {
  733. int queue;
  734. if (get_user(queue, uarg))
  735. return -EFAULT;
  736. if (queue < 1)
  737. return -EINVAL;
  738. spin_lock_irq(&bd->lock);
  739. bd->max_queue = queue;
  740. spin_unlock_irq(&bd->lock);
  741. return 0;
  742. }
  743. /*
  744. * SCSI/sg ioctls
  745. */
  746. case SG_GET_VERSION_NUM:
  747. case SCSI_IOCTL_GET_IDLUN:
  748. case SCSI_IOCTL_GET_BUS_NUMBER:
  749. case SG_SET_TIMEOUT:
  750. case SG_GET_TIMEOUT:
  751. case SG_GET_RESERVED_SIZE:
  752. case SG_SET_RESERVED_SIZE:
  753. case SG_EMULATED_HOST:
  754. case SCSI_IOCTL_SEND_COMMAND: {
  755. void __user *uarg = (void __user *) arg;
  756. return scsi_cmd_ioctl(bd->queue, NULL, file->f_mode, cmd, uarg);
  757. }
  758. case SG_IO: {
  759. struct request *rq;
  760. struct bio *bio, *bidi_bio = NULL;
  761. struct sg_io_v4 hdr;
  762. int at_head;
  763. u8 sense[SCSI_SENSE_BUFFERSIZE];
  764. if (copy_from_user(&hdr, uarg, sizeof(hdr)))
  765. return -EFAULT;
  766. rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense);
  767. if (IS_ERR(rq))
  768. return PTR_ERR(rq);
  769. bio = rq->bio;
  770. if (rq->next_rq)
  771. bidi_bio = rq->next_rq->bio;
  772. at_head = (0 == (hdr.flags & BSG_FLAG_Q_AT_TAIL));
  773. blk_execute_rq(bd->queue, NULL, rq, at_head);
  774. ret = blk_complete_sgv4_hdr_rq(rq, &hdr, bio, bidi_bio);
  775. if (copy_to_user(uarg, &hdr, sizeof(hdr)))
  776. return -EFAULT;
  777. return ret;
  778. }
  779. /*
  780. * block device ioctls
  781. */
  782. default:
  783. #if 0
  784. return ioctl_by_bdev(bd->bdev, cmd, arg);
  785. #else
  786. return -ENOTTY;
  787. #endif
  788. }
  789. }
  790. static const struct file_operations bsg_fops = {
  791. .read = bsg_read,
  792. .write = bsg_write,
  793. .poll = bsg_poll,
  794. .open = bsg_open,
  795. .release = bsg_release,
  796. .unlocked_ioctl = bsg_ioctl,
  797. .owner = THIS_MODULE,
  798. };
  799. void bsg_unregister_queue(struct request_queue *q)
  800. {
  801. struct bsg_class_device *bcd = &q->bsg_dev;
  802. if (!bcd->class_dev)
  803. return;
  804. mutex_lock(&bsg_mutex);
  805. idr_remove(&bsg_minor_idr, bcd->minor);
  806. sysfs_remove_link(&q->kobj, "bsg");
  807. device_unregister(bcd->class_dev);
  808. bcd->class_dev = NULL;
  809. kref_put(&bcd->ref, bsg_kref_release_function);
  810. mutex_unlock(&bsg_mutex);
  811. }
  812. EXPORT_SYMBOL_GPL(bsg_unregister_queue);
  813. int bsg_register_queue(struct request_queue *q, struct device *parent,
  814. const char *name, void (*release)(struct device *))
  815. {
  816. struct bsg_class_device *bcd;
  817. dev_t dev;
  818. int ret, minor;
  819. struct device *class_dev = NULL;
  820. const char *devname;
  821. if (name)
  822. devname = name;
  823. else
  824. devname = dev_name(parent);
  825. /*
  826. * we need a proper transport to send commands, not a stacked device
  827. */
  828. if (!q->request_fn)
  829. return 0;
  830. bcd = &q->bsg_dev;
  831. memset(bcd, 0, sizeof(*bcd));
  832. mutex_lock(&bsg_mutex);
  833. ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL);
  834. if (!ret) {
  835. ret = -ENOMEM;
  836. goto unlock;
  837. }
  838. ret = idr_get_new(&bsg_minor_idr, bcd, &minor);
  839. if (ret < 0)
  840. goto unlock;
  841. if (minor >= BSG_MAX_DEVS) {
  842. printk(KERN_ERR "bsg: too many bsg devices\n");
  843. ret = -EINVAL;
  844. goto remove_idr;
  845. }
  846. bcd->minor = minor;
  847. bcd->queue = q;
  848. bcd->parent = get_device(parent);
  849. bcd->release = release;
  850. kref_init(&bcd->ref);
  851. dev = MKDEV(bsg_major, bcd->minor);
  852. class_dev = device_create(bsg_class, parent, dev, NULL, "%s", devname);
  853. if (IS_ERR(class_dev)) {
  854. ret = PTR_ERR(class_dev);
  855. goto put_dev;
  856. }
  857. bcd->class_dev = class_dev;
  858. if (q->kobj.sd) {
  859. ret = sysfs_create_link(&q->kobj, &bcd->class_dev->kobj, "bsg");
  860. if (ret)
  861. goto unregister_class_dev;
  862. }
  863. mutex_unlock(&bsg_mutex);
  864. return 0;
  865. unregister_class_dev:
  866. device_unregister(class_dev);
  867. put_dev:
  868. put_device(parent);
  869. remove_idr:
  870. idr_remove(&bsg_minor_idr, minor);
  871. unlock:
  872. mutex_unlock(&bsg_mutex);
  873. return ret;
  874. }
  875. EXPORT_SYMBOL_GPL(bsg_register_queue);
  876. static struct cdev bsg_cdev;
  877. static char *bsg_nodename(struct device *dev)
  878. {
  879. return kasprintf(GFP_KERNEL, "bsg/%s", dev_name(dev));
  880. }
  881. static int __init bsg_init(void)
  882. {
  883. int ret, i;
  884. dev_t devid;
  885. bsg_cmd_cachep = kmem_cache_create("bsg_cmd",
  886. sizeof(struct bsg_command), 0, 0, NULL);
  887. if (!bsg_cmd_cachep) {
  888. printk(KERN_ERR "bsg: failed creating slab cache\n");
  889. return -ENOMEM;
  890. }
  891. for (i = 0; i < BSG_LIST_ARRAY_SIZE; i++)
  892. INIT_HLIST_HEAD(&bsg_device_list[i]);
  893. bsg_class = class_create(THIS_MODULE, "bsg");
  894. if (IS_ERR(bsg_class)) {
  895. ret = PTR_ERR(bsg_class);
  896. goto destroy_kmemcache;
  897. }
  898. bsg_class->nodename = bsg_nodename;
  899. ret = alloc_chrdev_region(&devid, 0, BSG_MAX_DEVS, "bsg");
  900. if (ret)
  901. goto destroy_bsg_class;
  902. bsg_major = MAJOR(devid);
  903. cdev_init(&bsg_cdev, &bsg_fops);
  904. ret = cdev_add(&bsg_cdev, MKDEV(bsg_major, 0), BSG_MAX_DEVS);
  905. if (ret)
  906. goto unregister_chrdev;
  907. printk(KERN_INFO BSG_DESCRIPTION " version " BSG_VERSION
  908. " loaded (major %d)\n", bsg_major);
  909. return 0;
  910. unregister_chrdev:
  911. unregister_chrdev_region(MKDEV(bsg_major, 0), BSG_MAX_DEVS);
  912. destroy_bsg_class:
  913. class_destroy(bsg_class);
  914. destroy_kmemcache:
  915. kmem_cache_destroy(bsg_cmd_cachep);
  916. return ret;
  917. }
  918. MODULE_AUTHOR("Jens Axboe");
  919. MODULE_DESCRIPTION(BSG_DESCRIPTION);
  920. MODULE_LICENSE("GPL");
  921. device_initcall(bsg_init);