virtio_scsi.c 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041
  1. /*
  2. * Virtio SCSI HBA driver
  3. *
  4. * Copyright IBM Corp. 2010
  5. * Copyright Red Hat, Inc. 2011
  6. *
  7. * Authors:
  8. * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com>
  9. * Paolo Bonzini <pbonzini@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  12. * See the COPYING file in the top-level directory.
  13. *
  14. */
  15. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <linux/mempool.h>
  19. #include <linux/virtio.h>
  20. #include <linux/virtio_ids.h>
  21. #include <linux/virtio_config.h>
  22. #include <linux/virtio_scsi.h>
  23. #include <linux/cpu.h>
  24. #include <scsi/scsi_host.h>
  25. #include <scsi/scsi_device.h>
  26. #include <scsi/scsi_cmnd.h>
  27. #define VIRTIO_SCSI_MEMPOOL_SZ 64
  28. #define VIRTIO_SCSI_EVENT_LEN 8
  29. #define VIRTIO_SCSI_VQ_BASE 2
  30. /* Command queue element */
  31. struct virtio_scsi_cmd {
  32. struct scsi_cmnd *sc;
  33. struct completion *comp;
  34. union {
  35. struct virtio_scsi_cmd_req cmd;
  36. struct virtio_scsi_ctrl_tmf_req tmf;
  37. struct virtio_scsi_ctrl_an_req an;
  38. } req;
  39. union {
  40. struct virtio_scsi_cmd_resp cmd;
  41. struct virtio_scsi_ctrl_tmf_resp tmf;
  42. struct virtio_scsi_ctrl_an_resp an;
  43. struct virtio_scsi_event evt;
  44. } resp;
  45. } ____cacheline_aligned_in_smp;
  46. struct virtio_scsi_event_node {
  47. struct virtio_scsi *vscsi;
  48. struct virtio_scsi_event event;
  49. struct work_struct work;
  50. };
  51. struct virtio_scsi_vq {
  52. /* Protects vq */
  53. spinlock_t vq_lock;
  54. struct virtqueue *vq;
  55. };
  56. /*
  57. * Per-target queue state.
  58. *
  59. * This struct holds the data needed by the queue steering policy. When a
  60. * target is sent multiple requests, we need to drive them to the same queue so
  61. * that FIFO processing order is kept. However, if a target was idle, we can
  62. * choose a queue arbitrarily. In this case the queue is chosen according to
  63. * the current VCPU, so the driver expects the number of request queues to be
  64. * equal to the number of VCPUs. This makes it easy and fast to select the
  65. * queue, and also lets the driver optimize the IRQ affinity for the virtqueues
  66. * (each virtqueue's affinity is set to the CPU that "owns" the queue).
  67. *
  68. * An interesting effect of this policy is that only writes to req_vq need to
  69. * take the tgt_lock. Read can be done outside the lock because:
  70. *
  71. * - writes of req_vq only occur when atomic_inc_return(&tgt->reqs) returns 1.
  72. * In that case, no other CPU is reading req_vq: even if they were in
  73. * virtscsi_queuecommand_multi, they would be spinning on tgt_lock.
  74. *
  75. * - reads of req_vq only occur when the target is not idle (reqs != 0).
  76. * A CPU that enters virtscsi_queuecommand_multi will not modify req_vq.
  77. *
  78. * Similarly, decrements of reqs are never concurrent with writes of req_vq.
  79. * Thus they can happen outside the tgt_lock, provided of course we make reqs
  80. * an atomic_t.
  81. */
  82. struct virtio_scsi_target_state {
  83. /* This spinlock never held at the same time as vq_lock. */
  84. spinlock_t tgt_lock;
  85. /* Count of outstanding requests. */
  86. atomic_t reqs;
  87. /* Currently active virtqueue for requests sent to this target. */
  88. struct virtio_scsi_vq *req_vq;
  89. };
  90. /* Driver instance state */
  91. struct virtio_scsi {
  92. struct virtio_device *vdev;
  93. /* Get some buffers ready for event vq */
  94. struct virtio_scsi_event_node event_list[VIRTIO_SCSI_EVENT_LEN];
  95. u32 num_queues;
  96. /* If the affinity hint is set for virtqueues */
  97. bool affinity_hint_set;
  98. /* CPU hotplug notifier */
  99. struct notifier_block nb;
  100. struct virtio_scsi_vq ctrl_vq;
  101. struct virtio_scsi_vq event_vq;
  102. struct virtio_scsi_vq req_vqs[];
  103. };
  104. static struct kmem_cache *virtscsi_cmd_cache;
  105. static mempool_t *virtscsi_cmd_pool;
  106. static inline struct Scsi_Host *virtio_scsi_host(struct virtio_device *vdev)
  107. {
  108. return vdev->priv;
  109. }
  110. static void virtscsi_compute_resid(struct scsi_cmnd *sc, u32 resid)
  111. {
  112. if (!resid)
  113. return;
  114. if (!scsi_bidi_cmnd(sc)) {
  115. scsi_set_resid(sc, resid);
  116. return;
  117. }
  118. scsi_in(sc)->resid = min(resid, scsi_in(sc)->length);
  119. scsi_out(sc)->resid = resid - scsi_in(sc)->resid;
  120. }
  121. /**
  122. * virtscsi_complete_cmd - finish a scsi_cmd and invoke scsi_done
  123. *
  124. * Called with vq_lock held.
  125. */
  126. static void virtscsi_complete_cmd(struct virtio_scsi *vscsi, void *buf)
  127. {
  128. struct virtio_scsi_cmd *cmd = buf;
  129. struct scsi_cmnd *sc = cmd->sc;
  130. struct virtio_scsi_cmd_resp *resp = &cmd->resp.cmd;
  131. struct virtio_scsi_target_state *tgt =
  132. scsi_target(sc->device)->hostdata;
  133. dev_dbg(&sc->device->sdev_gendev,
  134. "cmd %p response %u status %#02x sense_len %u\n",
  135. sc, resp->response, resp->status, resp->sense_len);
  136. sc->result = resp->status;
  137. virtscsi_compute_resid(sc, resp->resid);
  138. switch (resp->response) {
  139. case VIRTIO_SCSI_S_OK:
  140. set_host_byte(sc, DID_OK);
  141. break;
  142. case VIRTIO_SCSI_S_OVERRUN:
  143. set_host_byte(sc, DID_ERROR);
  144. break;
  145. case VIRTIO_SCSI_S_ABORTED:
  146. set_host_byte(sc, DID_ABORT);
  147. break;
  148. case VIRTIO_SCSI_S_BAD_TARGET:
  149. set_host_byte(sc, DID_BAD_TARGET);
  150. break;
  151. case VIRTIO_SCSI_S_RESET:
  152. set_host_byte(sc, DID_RESET);
  153. break;
  154. case VIRTIO_SCSI_S_BUSY:
  155. set_host_byte(sc, DID_BUS_BUSY);
  156. break;
  157. case VIRTIO_SCSI_S_TRANSPORT_FAILURE:
  158. set_host_byte(sc, DID_TRANSPORT_DISRUPTED);
  159. break;
  160. case VIRTIO_SCSI_S_TARGET_FAILURE:
  161. set_host_byte(sc, DID_TARGET_FAILURE);
  162. break;
  163. case VIRTIO_SCSI_S_NEXUS_FAILURE:
  164. set_host_byte(sc, DID_NEXUS_FAILURE);
  165. break;
  166. default:
  167. scmd_printk(KERN_WARNING, sc, "Unknown response %d",
  168. resp->response);
  169. /* fall through */
  170. case VIRTIO_SCSI_S_FAILURE:
  171. set_host_byte(sc, DID_ERROR);
  172. break;
  173. }
  174. WARN_ON(resp->sense_len > VIRTIO_SCSI_SENSE_SIZE);
  175. if (sc->sense_buffer) {
  176. memcpy(sc->sense_buffer, resp->sense,
  177. min_t(u32, resp->sense_len, VIRTIO_SCSI_SENSE_SIZE));
  178. if (resp->sense_len)
  179. set_driver_byte(sc, DRIVER_SENSE);
  180. }
  181. mempool_free(cmd, virtscsi_cmd_pool);
  182. sc->scsi_done(sc);
  183. atomic_dec(&tgt->reqs);
  184. }
  185. static void virtscsi_vq_done(struct virtio_scsi *vscsi,
  186. struct virtio_scsi_vq *virtscsi_vq,
  187. void (*fn)(struct virtio_scsi *vscsi, void *buf))
  188. {
  189. void *buf;
  190. unsigned int len;
  191. unsigned long flags;
  192. struct virtqueue *vq = virtscsi_vq->vq;
  193. spin_lock_irqsave(&virtscsi_vq->vq_lock, flags);
  194. do {
  195. virtqueue_disable_cb(vq);
  196. while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
  197. fn(vscsi, buf);
  198. } while (!virtqueue_enable_cb(vq));
  199. spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
  200. }
  201. static void virtscsi_req_done(struct virtqueue *vq)
  202. {
  203. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  204. struct virtio_scsi *vscsi = shost_priv(sh);
  205. int index = vq->index - VIRTIO_SCSI_VQ_BASE;
  206. struct virtio_scsi_vq *req_vq = &vscsi->req_vqs[index];
  207. /*
  208. * Read req_vq before decrementing the reqs field in
  209. * virtscsi_complete_cmd.
  210. *
  211. * With barriers:
  212. *
  213. * CPU #0 virtscsi_queuecommand_multi (CPU #1)
  214. * ------------------------------------------------------------
  215. * lock vq_lock
  216. * read req_vq
  217. * read reqs (reqs = 1)
  218. * write reqs (reqs = 0)
  219. * increment reqs (reqs = 1)
  220. * write req_vq
  221. *
  222. * Possible reordering without barriers:
  223. *
  224. * CPU #0 virtscsi_queuecommand_multi (CPU #1)
  225. * ------------------------------------------------------------
  226. * lock vq_lock
  227. * read reqs (reqs = 1)
  228. * write reqs (reqs = 0)
  229. * increment reqs (reqs = 1)
  230. * write req_vq
  231. * read (wrong) req_vq
  232. *
  233. * We do not need a full smp_rmb, because req_vq is required to get
  234. * to tgt->reqs: tgt is &vscsi->tgt[sc->device->id], where sc is stored
  235. * in the virtqueue as the user token.
  236. */
  237. smp_read_barrier_depends();
  238. virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
  239. };
  240. static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
  241. {
  242. struct virtio_scsi_cmd *cmd = buf;
  243. if (cmd->comp)
  244. complete_all(cmd->comp);
  245. else
  246. mempool_free(cmd, virtscsi_cmd_pool);
  247. }
  248. static void virtscsi_ctrl_done(struct virtqueue *vq)
  249. {
  250. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  251. struct virtio_scsi *vscsi = shost_priv(sh);
  252. virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
  253. };
  254. static int virtscsi_kick_event(struct virtio_scsi *vscsi,
  255. struct virtio_scsi_event_node *event_node)
  256. {
  257. int err;
  258. struct scatterlist sg;
  259. unsigned long flags;
  260. sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
  261. spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
  262. err = virtqueue_add_inbuf(vscsi->event_vq.vq, &sg, 1, event_node,
  263. GFP_ATOMIC);
  264. if (!err)
  265. virtqueue_kick(vscsi->event_vq.vq);
  266. spin_unlock_irqrestore(&vscsi->event_vq.vq_lock, flags);
  267. return err;
  268. }
  269. static int virtscsi_kick_event_all(struct virtio_scsi *vscsi)
  270. {
  271. int i;
  272. for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++) {
  273. vscsi->event_list[i].vscsi = vscsi;
  274. virtscsi_kick_event(vscsi, &vscsi->event_list[i]);
  275. }
  276. return 0;
  277. }
  278. static void virtscsi_cancel_event_work(struct virtio_scsi *vscsi)
  279. {
  280. int i;
  281. for (i = 0; i < VIRTIO_SCSI_EVENT_LEN; i++)
  282. cancel_work_sync(&vscsi->event_list[i].work);
  283. }
  284. static void virtscsi_handle_transport_reset(struct virtio_scsi *vscsi,
  285. struct virtio_scsi_event *event)
  286. {
  287. struct scsi_device *sdev;
  288. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  289. unsigned int target = event->lun[1];
  290. unsigned int lun = (event->lun[2] << 8) | event->lun[3];
  291. switch (event->reason) {
  292. case VIRTIO_SCSI_EVT_RESET_RESCAN:
  293. scsi_add_device(shost, 0, target, lun);
  294. break;
  295. case VIRTIO_SCSI_EVT_RESET_REMOVED:
  296. sdev = scsi_device_lookup(shost, 0, target, lun);
  297. if (sdev) {
  298. scsi_remove_device(sdev);
  299. scsi_device_put(sdev);
  300. } else {
  301. pr_err("SCSI device %d 0 %d %d not found\n",
  302. shost->host_no, target, lun);
  303. }
  304. break;
  305. default:
  306. pr_info("Unsupport virtio scsi event reason %x\n", event->reason);
  307. }
  308. }
  309. static void virtscsi_handle_param_change(struct virtio_scsi *vscsi,
  310. struct virtio_scsi_event *event)
  311. {
  312. struct scsi_device *sdev;
  313. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  314. unsigned int target = event->lun[1];
  315. unsigned int lun = (event->lun[2] << 8) | event->lun[3];
  316. u8 asc = event->reason & 255;
  317. u8 ascq = event->reason >> 8;
  318. sdev = scsi_device_lookup(shost, 0, target, lun);
  319. if (!sdev) {
  320. pr_err("SCSI device %d 0 %d %d not found\n",
  321. shost->host_no, target, lun);
  322. return;
  323. }
  324. /* Handle "Parameters changed", "Mode parameters changed", and
  325. "Capacity data has changed". */
  326. if (asc == 0x2a && (ascq == 0x00 || ascq == 0x01 || ascq == 0x09))
  327. scsi_rescan_device(&sdev->sdev_gendev);
  328. scsi_device_put(sdev);
  329. }
  330. static void virtscsi_handle_event(struct work_struct *work)
  331. {
  332. struct virtio_scsi_event_node *event_node =
  333. container_of(work, struct virtio_scsi_event_node, work);
  334. struct virtio_scsi *vscsi = event_node->vscsi;
  335. struct virtio_scsi_event *event = &event_node->event;
  336. if (event->event & VIRTIO_SCSI_T_EVENTS_MISSED) {
  337. event->event &= ~VIRTIO_SCSI_T_EVENTS_MISSED;
  338. scsi_scan_host(virtio_scsi_host(vscsi->vdev));
  339. }
  340. switch (event->event) {
  341. case VIRTIO_SCSI_T_NO_EVENT:
  342. break;
  343. case VIRTIO_SCSI_T_TRANSPORT_RESET:
  344. virtscsi_handle_transport_reset(vscsi, event);
  345. break;
  346. case VIRTIO_SCSI_T_PARAM_CHANGE:
  347. virtscsi_handle_param_change(vscsi, event);
  348. break;
  349. default:
  350. pr_err("Unsupport virtio scsi event %x\n", event->event);
  351. }
  352. virtscsi_kick_event(vscsi, event_node);
  353. }
  354. static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
  355. {
  356. struct virtio_scsi_event_node *event_node = buf;
  357. INIT_WORK(&event_node->work, virtscsi_handle_event);
  358. schedule_work(&event_node->work);
  359. }
  360. static void virtscsi_event_done(struct virtqueue *vq)
  361. {
  362. struct Scsi_Host *sh = virtio_scsi_host(vq->vdev);
  363. struct virtio_scsi *vscsi = shost_priv(sh);
  364. virtscsi_vq_done(vscsi, &vscsi->event_vq, virtscsi_complete_event);
  365. };
  366. /**
  367. * virtscsi_add_cmd - add a virtio_scsi_cmd to a virtqueue
  368. * @vq : the struct virtqueue we're talking about
  369. * @cmd : command structure
  370. * @req_size : size of the request buffer
  371. * @resp_size : size of the response buffer
  372. * @gfp : flags to use for memory allocations
  373. */
  374. static int virtscsi_add_cmd(struct virtqueue *vq,
  375. struct virtio_scsi_cmd *cmd,
  376. size_t req_size, size_t resp_size, gfp_t gfp)
  377. {
  378. struct scsi_cmnd *sc = cmd->sc;
  379. struct scatterlist *sgs[4], req, resp;
  380. struct sg_table *out, *in;
  381. unsigned out_num = 0, in_num = 0;
  382. out = in = NULL;
  383. if (sc && sc->sc_data_direction != DMA_NONE) {
  384. if (sc->sc_data_direction != DMA_FROM_DEVICE)
  385. out = &scsi_out(sc)->table;
  386. if (sc->sc_data_direction != DMA_TO_DEVICE)
  387. in = &scsi_in(sc)->table;
  388. }
  389. /* Request header. */
  390. sg_init_one(&req, &cmd->req, req_size);
  391. sgs[out_num++] = &req;
  392. /* Data-out buffer. */
  393. if (out)
  394. sgs[out_num++] = out->sgl;
  395. /* Response header. */
  396. sg_init_one(&resp, &cmd->resp, resp_size);
  397. sgs[out_num + in_num++] = &resp;
  398. /* Data-in buffer */
  399. if (in)
  400. sgs[out_num + in_num++] = in->sgl;
  401. return virtqueue_add_sgs(vq, sgs, out_num, in_num, cmd, gfp);
  402. }
  403. static int virtscsi_kick_cmd(struct virtio_scsi_vq *vq,
  404. struct virtio_scsi_cmd *cmd,
  405. size_t req_size, size_t resp_size, gfp_t gfp)
  406. {
  407. unsigned long flags;
  408. int err;
  409. bool needs_kick = false;
  410. spin_lock_irqsave(&vq->vq_lock, flags);
  411. err = virtscsi_add_cmd(vq->vq, cmd, req_size, resp_size, gfp);
  412. if (!err)
  413. needs_kick = virtqueue_kick_prepare(vq->vq);
  414. spin_unlock_irqrestore(&vq->vq_lock, flags);
  415. if (needs_kick)
  416. virtqueue_notify(vq->vq);
  417. return err;
  418. }
  419. static int virtscsi_queuecommand(struct virtio_scsi *vscsi,
  420. struct virtio_scsi_vq *req_vq,
  421. struct scsi_cmnd *sc)
  422. {
  423. struct virtio_scsi_cmd *cmd;
  424. int ret;
  425. struct Scsi_Host *shost = virtio_scsi_host(vscsi->vdev);
  426. BUG_ON(scsi_sg_count(sc) > shost->sg_tablesize);
  427. /* TODO: check feature bit and fail if unsupported? */
  428. BUG_ON(sc->sc_data_direction == DMA_BIDIRECTIONAL);
  429. dev_dbg(&sc->device->sdev_gendev,
  430. "cmd %p CDB: %#02x\n", sc, sc->cmnd[0]);
  431. ret = SCSI_MLQUEUE_HOST_BUSY;
  432. cmd = mempool_alloc(virtscsi_cmd_pool, GFP_ATOMIC);
  433. if (!cmd)
  434. goto out;
  435. memset(cmd, 0, sizeof(*cmd));
  436. cmd->sc = sc;
  437. cmd->req.cmd = (struct virtio_scsi_cmd_req){
  438. .lun[0] = 1,
  439. .lun[1] = sc->device->id,
  440. .lun[2] = (sc->device->lun >> 8) | 0x40,
  441. .lun[3] = sc->device->lun & 0xff,
  442. .tag = (unsigned long)sc,
  443. .task_attr = VIRTIO_SCSI_S_SIMPLE,
  444. .prio = 0,
  445. .crn = 0,
  446. };
  447. BUG_ON(sc->cmd_len > VIRTIO_SCSI_CDB_SIZE);
  448. memcpy(cmd->req.cmd.cdb, sc->cmnd, sc->cmd_len);
  449. if (virtscsi_kick_cmd(req_vq, cmd,
  450. sizeof cmd->req.cmd, sizeof cmd->resp.cmd,
  451. GFP_ATOMIC) == 0)
  452. ret = 0;
  453. else
  454. mempool_free(cmd, virtscsi_cmd_pool);
  455. out:
  456. return ret;
  457. }
  458. static int virtscsi_queuecommand_single(struct Scsi_Host *sh,
  459. struct scsi_cmnd *sc)
  460. {
  461. struct virtio_scsi *vscsi = shost_priv(sh);
  462. struct virtio_scsi_target_state *tgt =
  463. scsi_target(sc->device)->hostdata;
  464. atomic_inc(&tgt->reqs);
  465. return virtscsi_queuecommand(vscsi, &vscsi->req_vqs[0], sc);
  466. }
  467. static struct virtio_scsi_vq *virtscsi_pick_vq(struct virtio_scsi *vscsi,
  468. struct virtio_scsi_target_state *tgt)
  469. {
  470. struct virtio_scsi_vq *vq;
  471. unsigned long flags;
  472. u32 queue_num;
  473. spin_lock_irqsave(&tgt->tgt_lock, flags);
  474. /*
  475. * The memory barrier after atomic_inc_return matches
  476. * the smp_read_barrier_depends() in virtscsi_req_done.
  477. */
  478. if (atomic_inc_return(&tgt->reqs) > 1)
  479. vq = ACCESS_ONCE(tgt->req_vq);
  480. else {
  481. queue_num = smp_processor_id();
  482. while (unlikely(queue_num >= vscsi->num_queues))
  483. queue_num -= vscsi->num_queues;
  484. tgt->req_vq = vq = &vscsi->req_vqs[queue_num];
  485. }
  486. spin_unlock_irqrestore(&tgt->tgt_lock, flags);
  487. return vq;
  488. }
  489. static int virtscsi_queuecommand_multi(struct Scsi_Host *sh,
  490. struct scsi_cmnd *sc)
  491. {
  492. struct virtio_scsi *vscsi = shost_priv(sh);
  493. struct virtio_scsi_target_state *tgt =
  494. scsi_target(sc->device)->hostdata;
  495. struct virtio_scsi_vq *req_vq = virtscsi_pick_vq(vscsi, tgt);
  496. return virtscsi_queuecommand(vscsi, req_vq, sc);
  497. }
  498. static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
  499. {
  500. DECLARE_COMPLETION_ONSTACK(comp);
  501. int ret = FAILED;
  502. cmd->comp = &comp;
  503. if (virtscsi_kick_cmd(&vscsi->ctrl_vq, cmd,
  504. sizeof cmd->req.tmf, sizeof cmd->resp.tmf,
  505. GFP_NOIO) < 0)
  506. goto out;
  507. wait_for_completion(&comp);
  508. if (cmd->resp.tmf.response == VIRTIO_SCSI_S_OK ||
  509. cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
  510. ret = SUCCESS;
  511. out:
  512. mempool_free(cmd, virtscsi_cmd_pool);
  513. return ret;
  514. }
  515. static int virtscsi_device_reset(struct scsi_cmnd *sc)
  516. {
  517. struct virtio_scsi *vscsi = shost_priv(sc->device->host);
  518. struct virtio_scsi_cmd *cmd;
  519. sdev_printk(KERN_INFO, sc->device, "device reset\n");
  520. cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
  521. if (!cmd)
  522. return FAILED;
  523. memset(cmd, 0, sizeof(*cmd));
  524. cmd->sc = sc;
  525. cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
  526. .type = VIRTIO_SCSI_T_TMF,
  527. .subtype = VIRTIO_SCSI_T_TMF_LOGICAL_UNIT_RESET,
  528. .lun[0] = 1,
  529. .lun[1] = sc->device->id,
  530. .lun[2] = (sc->device->lun >> 8) | 0x40,
  531. .lun[3] = sc->device->lun & 0xff,
  532. };
  533. return virtscsi_tmf(vscsi, cmd);
  534. }
  535. static int virtscsi_abort(struct scsi_cmnd *sc)
  536. {
  537. struct virtio_scsi *vscsi = shost_priv(sc->device->host);
  538. struct virtio_scsi_cmd *cmd;
  539. scmd_printk(KERN_INFO, sc, "abort\n");
  540. cmd = mempool_alloc(virtscsi_cmd_pool, GFP_NOIO);
  541. if (!cmd)
  542. return FAILED;
  543. memset(cmd, 0, sizeof(*cmd));
  544. cmd->sc = sc;
  545. cmd->req.tmf = (struct virtio_scsi_ctrl_tmf_req){
  546. .type = VIRTIO_SCSI_T_TMF,
  547. .subtype = VIRTIO_SCSI_T_TMF_ABORT_TASK,
  548. .lun[0] = 1,
  549. .lun[1] = sc->device->id,
  550. .lun[2] = (sc->device->lun >> 8) | 0x40,
  551. .lun[3] = sc->device->lun & 0xff,
  552. .tag = (unsigned long)sc,
  553. };
  554. return virtscsi_tmf(vscsi, cmd);
  555. }
  556. static int virtscsi_target_alloc(struct scsi_target *starget)
  557. {
  558. struct virtio_scsi_target_state *tgt =
  559. kmalloc(sizeof(*tgt), GFP_KERNEL);
  560. if (!tgt)
  561. return -ENOMEM;
  562. spin_lock_init(&tgt->tgt_lock);
  563. atomic_set(&tgt->reqs, 0);
  564. tgt->req_vq = NULL;
  565. starget->hostdata = tgt;
  566. return 0;
  567. }
  568. static void virtscsi_target_destroy(struct scsi_target *starget)
  569. {
  570. struct virtio_scsi_target_state *tgt = starget->hostdata;
  571. kfree(tgt);
  572. }
  573. static struct scsi_host_template virtscsi_host_template_single = {
  574. .module = THIS_MODULE,
  575. .name = "Virtio SCSI HBA",
  576. .proc_name = "virtio_scsi",
  577. .this_id = -1,
  578. .queuecommand = virtscsi_queuecommand_single,
  579. .eh_abort_handler = virtscsi_abort,
  580. .eh_device_reset_handler = virtscsi_device_reset,
  581. .can_queue = 1024,
  582. .dma_boundary = UINT_MAX,
  583. .use_clustering = ENABLE_CLUSTERING,
  584. .target_alloc = virtscsi_target_alloc,
  585. .target_destroy = virtscsi_target_destroy,
  586. };
  587. static struct scsi_host_template virtscsi_host_template_multi = {
  588. .module = THIS_MODULE,
  589. .name = "Virtio SCSI HBA",
  590. .proc_name = "virtio_scsi",
  591. .this_id = -1,
  592. .queuecommand = virtscsi_queuecommand_multi,
  593. .eh_abort_handler = virtscsi_abort,
  594. .eh_device_reset_handler = virtscsi_device_reset,
  595. .can_queue = 1024,
  596. .dma_boundary = UINT_MAX,
  597. .use_clustering = ENABLE_CLUSTERING,
  598. .target_alloc = virtscsi_target_alloc,
  599. .target_destroy = virtscsi_target_destroy,
  600. };
  601. #define virtscsi_config_get(vdev, fld) \
  602. ({ \
  603. typeof(((struct virtio_scsi_config *)0)->fld) __val; \
  604. virtio_cread(vdev, struct virtio_scsi_config, fld, &__val); \
  605. __val; \
  606. })
  607. #define virtscsi_config_set(vdev, fld, val) \
  608. do { \
  609. typeof(((struct virtio_scsi_config *)0)->fld) __val = (val); \
  610. virtio_cwrite(vdev, struct virtio_scsi_config, fld, &__val); \
  611. } while(0)
  612. static void __virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
  613. {
  614. int i;
  615. int cpu;
  616. /* In multiqueue mode, when the number of cpu is equal
  617. * to the number of request queues, we let the qeueues
  618. * to be private to one cpu by setting the affinity hint
  619. * to eliminate the contention.
  620. */
  621. if ((vscsi->num_queues == 1 ||
  622. vscsi->num_queues != num_online_cpus()) && affinity) {
  623. if (vscsi->affinity_hint_set)
  624. affinity = false;
  625. else
  626. return;
  627. }
  628. if (affinity) {
  629. i = 0;
  630. for_each_online_cpu(cpu) {
  631. virtqueue_set_affinity(vscsi->req_vqs[i].vq, cpu);
  632. i++;
  633. }
  634. vscsi->affinity_hint_set = true;
  635. } else {
  636. for (i = 0; i < vscsi->num_queues; i++)
  637. virtqueue_set_affinity(vscsi->req_vqs[i].vq, -1);
  638. vscsi->affinity_hint_set = false;
  639. }
  640. }
  641. static void virtscsi_set_affinity(struct virtio_scsi *vscsi, bool affinity)
  642. {
  643. get_online_cpus();
  644. __virtscsi_set_affinity(vscsi, affinity);
  645. put_online_cpus();
  646. }
  647. static int virtscsi_cpu_callback(struct notifier_block *nfb,
  648. unsigned long action, void *hcpu)
  649. {
  650. struct virtio_scsi *vscsi = container_of(nfb, struct virtio_scsi, nb);
  651. switch(action) {
  652. case CPU_ONLINE:
  653. case CPU_ONLINE_FROZEN:
  654. case CPU_DEAD:
  655. case CPU_DEAD_FROZEN:
  656. __virtscsi_set_affinity(vscsi, true);
  657. break;
  658. default:
  659. break;
  660. }
  661. return NOTIFY_OK;
  662. }
  663. static void virtscsi_init_vq(struct virtio_scsi_vq *virtscsi_vq,
  664. struct virtqueue *vq)
  665. {
  666. spin_lock_init(&virtscsi_vq->vq_lock);
  667. virtscsi_vq->vq = vq;
  668. }
  669. static void virtscsi_scan(struct virtio_device *vdev)
  670. {
  671. struct Scsi_Host *shost = (struct Scsi_Host *)vdev->priv;
  672. scsi_scan_host(shost);
  673. }
  674. static void virtscsi_remove_vqs(struct virtio_device *vdev)
  675. {
  676. struct Scsi_Host *sh = virtio_scsi_host(vdev);
  677. struct virtio_scsi *vscsi = shost_priv(sh);
  678. virtscsi_set_affinity(vscsi, false);
  679. /* Stop all the virtqueues. */
  680. vdev->config->reset(vdev);
  681. vdev->config->del_vqs(vdev);
  682. }
  683. static int virtscsi_init(struct virtio_device *vdev,
  684. struct virtio_scsi *vscsi)
  685. {
  686. int err;
  687. u32 i;
  688. u32 num_vqs;
  689. vq_callback_t **callbacks;
  690. const char **names;
  691. struct virtqueue **vqs;
  692. num_vqs = vscsi->num_queues + VIRTIO_SCSI_VQ_BASE;
  693. vqs = kmalloc(num_vqs * sizeof(struct virtqueue *), GFP_KERNEL);
  694. callbacks = kmalloc(num_vqs * sizeof(vq_callback_t *), GFP_KERNEL);
  695. names = kmalloc(num_vqs * sizeof(char *), GFP_KERNEL);
  696. if (!callbacks || !vqs || !names) {
  697. err = -ENOMEM;
  698. goto out;
  699. }
  700. callbacks[0] = virtscsi_ctrl_done;
  701. callbacks[1] = virtscsi_event_done;
  702. names[0] = "control";
  703. names[1] = "event";
  704. for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++) {
  705. callbacks[i] = virtscsi_req_done;
  706. names[i] = "request";
  707. }
  708. /* Discover virtqueues and write information to configuration. */
  709. err = vdev->config->find_vqs(vdev, num_vqs, vqs, callbacks, names);
  710. if (err)
  711. goto out;
  712. virtscsi_init_vq(&vscsi->ctrl_vq, vqs[0]);
  713. virtscsi_init_vq(&vscsi->event_vq, vqs[1]);
  714. for (i = VIRTIO_SCSI_VQ_BASE; i < num_vqs; i++)
  715. virtscsi_init_vq(&vscsi->req_vqs[i - VIRTIO_SCSI_VQ_BASE],
  716. vqs[i]);
  717. virtscsi_set_affinity(vscsi, true);
  718. virtscsi_config_set(vdev, cdb_size, VIRTIO_SCSI_CDB_SIZE);
  719. virtscsi_config_set(vdev, sense_size, VIRTIO_SCSI_SENSE_SIZE);
  720. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
  721. virtscsi_kick_event_all(vscsi);
  722. err = 0;
  723. out:
  724. kfree(names);
  725. kfree(callbacks);
  726. kfree(vqs);
  727. if (err)
  728. virtscsi_remove_vqs(vdev);
  729. return err;
  730. }
  731. static int virtscsi_probe(struct virtio_device *vdev)
  732. {
  733. struct Scsi_Host *shost;
  734. struct virtio_scsi *vscsi;
  735. int err;
  736. u32 sg_elems, num_targets;
  737. u32 cmd_per_lun;
  738. u32 num_queues;
  739. struct scsi_host_template *hostt;
  740. /* We need to know how many queues before we allocate. */
  741. num_queues = virtscsi_config_get(vdev, num_queues) ? : 1;
  742. num_targets = virtscsi_config_get(vdev, max_target) + 1;
  743. if (num_queues == 1)
  744. hostt = &virtscsi_host_template_single;
  745. else
  746. hostt = &virtscsi_host_template_multi;
  747. shost = scsi_host_alloc(hostt,
  748. sizeof(*vscsi) + sizeof(vscsi->req_vqs[0]) * num_queues);
  749. if (!shost)
  750. return -ENOMEM;
  751. sg_elems = virtscsi_config_get(vdev, seg_max) ?: 1;
  752. shost->sg_tablesize = sg_elems;
  753. vscsi = shost_priv(shost);
  754. vscsi->vdev = vdev;
  755. vscsi->num_queues = num_queues;
  756. vdev->priv = shost;
  757. err = virtscsi_init(vdev, vscsi);
  758. if (err)
  759. goto virtscsi_init_failed;
  760. vscsi->nb.notifier_call = &virtscsi_cpu_callback;
  761. err = register_hotcpu_notifier(&vscsi->nb);
  762. if (err) {
  763. pr_err("registering cpu notifier failed\n");
  764. goto scsi_add_host_failed;
  765. }
  766. cmd_per_lun = virtscsi_config_get(vdev, cmd_per_lun) ?: 1;
  767. shost->cmd_per_lun = min_t(u32, cmd_per_lun, shost->can_queue);
  768. shost->max_sectors = virtscsi_config_get(vdev, max_sectors) ?: 0xFFFF;
  769. /* LUNs > 256 are reported with format 1, so they go in the range
  770. * 16640-32767.
  771. */
  772. shost->max_lun = virtscsi_config_get(vdev, max_lun) + 1 + 0x4000;
  773. shost->max_id = num_targets;
  774. shost->max_channel = 0;
  775. shost->max_cmd_len = VIRTIO_SCSI_CDB_SIZE;
  776. err = scsi_add_host(shost, &vdev->dev);
  777. if (err)
  778. goto scsi_add_host_failed;
  779. /*
  780. * scsi_scan_host() happens in virtscsi_scan() via virtio_driver->scan()
  781. * after VIRTIO_CONFIG_S_DRIVER_OK has been set..
  782. */
  783. return 0;
  784. scsi_add_host_failed:
  785. vdev->config->del_vqs(vdev);
  786. virtscsi_init_failed:
  787. scsi_host_put(shost);
  788. return err;
  789. }
  790. static void virtscsi_remove(struct virtio_device *vdev)
  791. {
  792. struct Scsi_Host *shost = virtio_scsi_host(vdev);
  793. struct virtio_scsi *vscsi = shost_priv(shost);
  794. if (virtio_has_feature(vdev, VIRTIO_SCSI_F_HOTPLUG))
  795. virtscsi_cancel_event_work(vscsi);
  796. scsi_remove_host(shost);
  797. unregister_hotcpu_notifier(&vscsi->nb);
  798. virtscsi_remove_vqs(vdev);
  799. scsi_host_put(shost);
  800. }
  801. #ifdef CONFIG_PM_SLEEP
  802. static int virtscsi_freeze(struct virtio_device *vdev)
  803. {
  804. virtscsi_remove_vqs(vdev);
  805. return 0;
  806. }
  807. static int virtscsi_restore(struct virtio_device *vdev)
  808. {
  809. struct Scsi_Host *sh = virtio_scsi_host(vdev);
  810. struct virtio_scsi *vscsi = shost_priv(sh);
  811. return virtscsi_init(vdev, vscsi);
  812. }
  813. #endif
  814. static struct virtio_device_id id_table[] = {
  815. { VIRTIO_ID_SCSI, VIRTIO_DEV_ANY_ID },
  816. { 0 },
  817. };
  818. static unsigned int features[] = {
  819. VIRTIO_SCSI_F_HOTPLUG,
  820. VIRTIO_SCSI_F_CHANGE,
  821. };
  822. static struct virtio_driver virtio_scsi_driver = {
  823. .feature_table = features,
  824. .feature_table_size = ARRAY_SIZE(features),
  825. .driver.name = KBUILD_MODNAME,
  826. .driver.owner = THIS_MODULE,
  827. .id_table = id_table,
  828. .probe = virtscsi_probe,
  829. .scan = virtscsi_scan,
  830. #ifdef CONFIG_PM_SLEEP
  831. .freeze = virtscsi_freeze,
  832. .restore = virtscsi_restore,
  833. #endif
  834. .remove = virtscsi_remove,
  835. };
  836. static int __init init(void)
  837. {
  838. int ret = -ENOMEM;
  839. virtscsi_cmd_cache = KMEM_CACHE(virtio_scsi_cmd, 0);
  840. if (!virtscsi_cmd_cache) {
  841. pr_err("kmem_cache_create() for virtscsi_cmd_cache failed\n");
  842. goto error;
  843. }
  844. virtscsi_cmd_pool =
  845. mempool_create_slab_pool(VIRTIO_SCSI_MEMPOOL_SZ,
  846. virtscsi_cmd_cache);
  847. if (!virtscsi_cmd_pool) {
  848. pr_err("mempool_create() for virtscsi_cmd_pool failed\n");
  849. goto error;
  850. }
  851. ret = register_virtio_driver(&virtio_scsi_driver);
  852. if (ret < 0)
  853. goto error;
  854. return 0;
  855. error:
  856. if (virtscsi_cmd_pool) {
  857. mempool_destroy(virtscsi_cmd_pool);
  858. virtscsi_cmd_pool = NULL;
  859. }
  860. if (virtscsi_cmd_cache) {
  861. kmem_cache_destroy(virtscsi_cmd_cache);
  862. virtscsi_cmd_cache = NULL;
  863. }
  864. return ret;
  865. }
  866. static void __exit fini(void)
  867. {
  868. unregister_virtio_driver(&virtio_scsi_driver);
  869. mempool_destroy(virtscsi_cmd_pool);
  870. kmem_cache_destroy(virtscsi_cmd_cache);
  871. }
  872. module_init(init);
  873. module_exit(fini);
  874. MODULE_DEVICE_TABLE(virtio, id_table);
  875. MODULE_DESCRIPTION("Virtio SCSI HBA driver");
  876. MODULE_LICENSE("GPL");