vmw_pvscsi.c 37 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408
  1. /*
  2. * Linux driver for VMware's para-virtualized SCSI HBA.
  3. *
  4. * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; version 2 of the License and no later version.
  9. *
  10. * This program is distributed in the hope that it will be useful, but
  11. * WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13. * NON INFRINGEMENT. See the GNU General Public License for more
  14. * details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19. *
  20. * Maintained by: Alok N Kataria <akataria@vmware.com>
  21. *
  22. */
  23. #include <linux/kernel.h>
  24. #include <linux/module.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/pci.h>
  28. #include <scsi/scsi.h>
  29. #include <scsi/scsi_host.h>
  30. #include <scsi/scsi_cmnd.h>
  31. #include <scsi/scsi_device.h>
  32. #include "vmw_pvscsi.h"
  33. #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver"
  34. MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC);
  35. MODULE_AUTHOR("VMware, Inc.");
  36. MODULE_LICENSE("GPL");
  37. MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING);
  38. #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8
  39. #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1
  40. #define PVSCSI_DEFAULT_QUEUE_DEPTH 64
  41. #define SGL_SIZE PAGE_SIZE
  42. struct pvscsi_sg_list {
  43. struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT];
  44. };
  45. struct pvscsi_ctx {
  46. /*
  47. * The index of the context in cmd_map serves as the context ID for a
  48. * 1-to-1 mapping completions back to requests.
  49. */
  50. struct scsi_cmnd *cmd;
  51. struct pvscsi_sg_list *sgl;
  52. struct list_head list;
  53. dma_addr_t dataPA;
  54. dma_addr_t sensePA;
  55. dma_addr_t sglPA;
  56. };
  57. struct pvscsi_adapter {
  58. char *mmioBase;
  59. unsigned int irq;
  60. u8 rev;
  61. bool use_msi;
  62. bool use_msix;
  63. bool use_msg;
  64. spinlock_t hw_lock;
  65. struct workqueue_struct *workqueue;
  66. struct work_struct work;
  67. struct PVSCSIRingReqDesc *req_ring;
  68. unsigned req_pages;
  69. unsigned req_depth;
  70. dma_addr_t reqRingPA;
  71. struct PVSCSIRingCmpDesc *cmp_ring;
  72. unsigned cmp_pages;
  73. dma_addr_t cmpRingPA;
  74. struct PVSCSIRingMsgDesc *msg_ring;
  75. unsigned msg_pages;
  76. dma_addr_t msgRingPA;
  77. struct PVSCSIRingsState *rings_state;
  78. dma_addr_t ringStatePA;
  79. struct pci_dev *dev;
  80. struct Scsi_Host *host;
  81. struct list_head cmd_pool;
  82. struct pvscsi_ctx *cmd_map;
  83. };
  84. /* Command line parameters */
  85. static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
  86. static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
  87. static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH;
  88. static bool pvscsi_disable_msi;
  89. static bool pvscsi_disable_msix;
  90. static bool pvscsi_use_msg = true;
  91. #define PVSCSI_RW (S_IRUSR | S_IWUSR)
  92. module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW);
  93. MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default="
  94. __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")");
  95. module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW);
  96. MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default="
  97. __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")");
  98. module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW);
  99. MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default="
  100. __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")");
  101. module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW);
  102. MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)");
  103. module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW);
  104. MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)");
  105. module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW);
  106. MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)");
  107. static const struct pci_device_id pvscsi_pci_tbl[] = {
  108. { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) },
  109. { 0 }
  110. };
  111. MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl);
  112. static struct device *
  113. pvscsi_dev(const struct pvscsi_adapter *adapter)
  114. {
  115. return &(adapter->dev->dev);
  116. }
  117. static struct pvscsi_ctx *
  118. pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
  119. {
  120. struct pvscsi_ctx *ctx, *end;
  121. end = &adapter->cmd_map[adapter->req_depth];
  122. for (ctx = adapter->cmd_map; ctx < end; ctx++)
  123. if (ctx->cmd == cmd)
  124. return ctx;
  125. return NULL;
  126. }
  127. static struct pvscsi_ctx *
  128. pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd)
  129. {
  130. struct pvscsi_ctx *ctx;
  131. if (list_empty(&adapter->cmd_pool))
  132. return NULL;
  133. ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list);
  134. ctx->cmd = cmd;
  135. list_del(&ctx->list);
  136. return ctx;
  137. }
  138. static void pvscsi_release_context(struct pvscsi_adapter *adapter,
  139. struct pvscsi_ctx *ctx)
  140. {
  141. ctx->cmd = NULL;
  142. list_add(&ctx->list, &adapter->cmd_pool);
  143. }
  144. /*
  145. * Map a pvscsi_ctx struct to a context ID field value; we map to a simple
  146. * non-zero integer. ctx always points to an entry in cmd_map array, hence
  147. * the return value is always >=1.
  148. */
  149. static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter,
  150. const struct pvscsi_ctx *ctx)
  151. {
  152. return ctx - adapter->cmd_map + 1;
  153. }
  154. static struct pvscsi_ctx *
  155. pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context)
  156. {
  157. return &adapter->cmd_map[context - 1];
  158. }
  159. static void pvscsi_reg_write(const struct pvscsi_adapter *adapter,
  160. u32 offset, u32 val)
  161. {
  162. writel(val, adapter->mmioBase + offset);
  163. }
  164. static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset)
  165. {
  166. return readl(adapter->mmioBase + offset);
  167. }
  168. static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter)
  169. {
  170. return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS);
  171. }
  172. static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter,
  173. u32 val)
  174. {
  175. pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val);
  176. }
  177. static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter)
  178. {
  179. u32 intr_bits;
  180. intr_bits = PVSCSI_INTR_CMPL_MASK;
  181. if (adapter->use_msg)
  182. intr_bits |= PVSCSI_INTR_MSG_MASK;
  183. pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits);
  184. }
  185. static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter)
  186. {
  187. pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0);
  188. }
  189. static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter,
  190. u32 cmd, const void *desc, size_t len)
  191. {
  192. const u32 *ptr = desc;
  193. size_t i;
  194. len /= sizeof(*ptr);
  195. pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd);
  196. for (i = 0; i < len; i++)
  197. pvscsi_reg_write(adapter,
  198. PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]);
  199. }
  200. static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter,
  201. const struct pvscsi_ctx *ctx)
  202. {
  203. struct PVSCSICmdDescAbortCmd cmd = { 0 };
  204. cmd.target = ctx->cmd->device->id;
  205. cmd.context = pvscsi_map_context(adapter, ctx);
  206. pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd));
  207. }
  208. static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter)
  209. {
  210. pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
  211. }
  212. static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter)
  213. {
  214. pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
  215. }
  216. static int scsi_is_rw(unsigned char op)
  217. {
  218. return op == READ_6 || op == WRITE_6 ||
  219. op == READ_10 || op == WRITE_10 ||
  220. op == READ_12 || op == WRITE_12 ||
  221. op == READ_16 || op == WRITE_16;
  222. }
  223. static void pvscsi_kick_io(const struct pvscsi_adapter *adapter,
  224. unsigned char op)
  225. {
  226. if (scsi_is_rw(op))
  227. pvscsi_kick_rw_io(adapter);
  228. else
  229. pvscsi_process_request_ring(adapter);
  230. }
  231. static void ll_adapter_reset(const struct pvscsi_adapter *adapter)
  232. {
  233. dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter);
  234. pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
  235. }
  236. static void ll_bus_reset(const struct pvscsi_adapter *adapter)
  237. {
  238. dev_dbg(pvscsi_dev(adapter), "Reseting bus on %p\n", adapter);
  239. pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0);
  240. }
  241. static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target)
  242. {
  243. struct PVSCSICmdDescResetDevice cmd = { 0 };
  244. dev_dbg(pvscsi_dev(adapter), "Reseting device: target=%u\n", target);
  245. cmd.target = target;
  246. pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE,
  247. &cmd, sizeof(cmd));
  248. }
  249. static void pvscsi_create_sg(struct pvscsi_ctx *ctx,
  250. struct scatterlist *sg, unsigned count)
  251. {
  252. unsigned i;
  253. struct PVSCSISGElement *sge;
  254. BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT);
  255. sge = &ctx->sgl->sge[0];
  256. for (i = 0; i < count; i++, sg++) {
  257. sge[i].addr = sg_dma_address(sg);
  258. sge[i].length = sg_dma_len(sg);
  259. sge[i].flags = 0;
  260. }
  261. }
  262. /*
  263. * Map all data buffers for a command into PCI space and
  264. * setup the scatter/gather list if needed.
  265. */
  266. static void pvscsi_map_buffers(struct pvscsi_adapter *adapter,
  267. struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd,
  268. struct PVSCSIRingReqDesc *e)
  269. {
  270. unsigned count;
  271. unsigned bufflen = scsi_bufflen(cmd);
  272. struct scatterlist *sg;
  273. e->dataLen = bufflen;
  274. e->dataAddr = 0;
  275. if (bufflen == 0)
  276. return;
  277. sg = scsi_sglist(cmd);
  278. count = scsi_sg_count(cmd);
  279. if (count != 0) {
  280. int segs = scsi_dma_map(cmd);
  281. if (segs > 1) {
  282. pvscsi_create_sg(ctx, sg, segs);
  283. e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
  284. ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl,
  285. SGL_SIZE, PCI_DMA_TODEVICE);
  286. e->dataAddr = ctx->sglPA;
  287. } else
  288. e->dataAddr = sg_dma_address(sg);
  289. } else {
  290. /*
  291. * In case there is no S/G list, scsi_sglist points
  292. * directly to the buffer.
  293. */
  294. ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen,
  295. cmd->sc_data_direction);
  296. e->dataAddr = ctx->dataPA;
  297. }
  298. }
  299. static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter,
  300. struct pvscsi_ctx *ctx)
  301. {
  302. struct scsi_cmnd *cmd;
  303. unsigned bufflen;
  304. cmd = ctx->cmd;
  305. bufflen = scsi_bufflen(cmd);
  306. if (bufflen != 0) {
  307. unsigned count = scsi_sg_count(cmd);
  308. if (count != 0) {
  309. scsi_dma_unmap(cmd);
  310. if (ctx->sglPA) {
  311. pci_unmap_single(adapter->dev, ctx->sglPA,
  312. SGL_SIZE, PCI_DMA_TODEVICE);
  313. ctx->sglPA = 0;
  314. }
  315. } else
  316. pci_unmap_single(adapter->dev, ctx->dataPA, bufflen,
  317. cmd->sc_data_direction);
  318. }
  319. if (cmd->sense_buffer)
  320. pci_unmap_single(adapter->dev, ctx->sensePA,
  321. SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
  322. }
  323. static int __devinit pvscsi_allocate_rings(struct pvscsi_adapter *adapter)
  324. {
  325. adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE,
  326. &adapter->ringStatePA);
  327. if (!adapter->rings_state)
  328. return -ENOMEM;
  329. adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING,
  330. pvscsi_ring_pages);
  331. adapter->req_depth = adapter->req_pages
  332. * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
  333. adapter->req_ring = pci_alloc_consistent(adapter->dev,
  334. adapter->req_pages * PAGE_SIZE,
  335. &adapter->reqRingPA);
  336. if (!adapter->req_ring)
  337. return -ENOMEM;
  338. adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING,
  339. pvscsi_ring_pages);
  340. adapter->cmp_ring = pci_alloc_consistent(adapter->dev,
  341. adapter->cmp_pages * PAGE_SIZE,
  342. &adapter->cmpRingPA);
  343. if (!adapter->cmp_ring)
  344. return -ENOMEM;
  345. BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE));
  346. BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE));
  347. BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE));
  348. if (!adapter->use_msg)
  349. return 0;
  350. adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING,
  351. pvscsi_msg_ring_pages);
  352. adapter->msg_ring = pci_alloc_consistent(adapter->dev,
  353. adapter->msg_pages * PAGE_SIZE,
  354. &adapter->msgRingPA);
  355. if (!adapter->msg_ring)
  356. return -ENOMEM;
  357. BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE));
  358. return 0;
  359. }
  360. static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter)
  361. {
  362. struct PVSCSICmdDescSetupRings cmd = { 0 };
  363. dma_addr_t base;
  364. unsigned i;
  365. cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
  366. cmd.reqRingNumPages = adapter->req_pages;
  367. cmd.cmpRingNumPages = adapter->cmp_pages;
  368. base = adapter->reqRingPA;
  369. for (i = 0; i < adapter->req_pages; i++) {
  370. cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
  371. base += PAGE_SIZE;
  372. }
  373. base = adapter->cmpRingPA;
  374. for (i = 0; i < adapter->cmp_pages; i++) {
  375. cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
  376. base += PAGE_SIZE;
  377. }
  378. memset(adapter->rings_state, 0, PAGE_SIZE);
  379. memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE);
  380. memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE);
  381. pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS,
  382. &cmd, sizeof(cmd));
  383. if (adapter->use_msg) {
  384. struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
  385. cmd_msg.numPages = adapter->msg_pages;
  386. base = adapter->msgRingPA;
  387. for (i = 0; i < adapter->msg_pages; i++) {
  388. cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
  389. base += PAGE_SIZE;
  390. }
  391. memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE);
  392. pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING,
  393. &cmd_msg, sizeof(cmd_msg));
  394. }
  395. }
  396. /*
  397. * Pull a completion descriptor off and pass the completion back
  398. * to the SCSI mid layer.
  399. */
  400. static void pvscsi_complete_request(struct pvscsi_adapter *adapter,
  401. const struct PVSCSIRingCmpDesc *e)
  402. {
  403. struct pvscsi_ctx *ctx;
  404. struct scsi_cmnd *cmd;
  405. u32 btstat = e->hostStatus;
  406. u32 sdstat = e->scsiStatus;
  407. ctx = pvscsi_get_context(adapter, e->context);
  408. cmd = ctx->cmd;
  409. pvscsi_unmap_buffers(adapter, ctx);
  410. pvscsi_release_context(adapter, ctx);
  411. cmd->result = 0;
  412. if (sdstat != SAM_STAT_GOOD &&
  413. (btstat == BTSTAT_SUCCESS ||
  414. btstat == BTSTAT_LINKED_COMMAND_COMPLETED ||
  415. btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) {
  416. cmd->result = (DID_OK << 16) | sdstat;
  417. if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer)
  418. cmd->result |= (DRIVER_SENSE << 24);
  419. } else
  420. switch (btstat) {
  421. case BTSTAT_SUCCESS:
  422. case BTSTAT_LINKED_COMMAND_COMPLETED:
  423. case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
  424. /* If everything went fine, let's move on.. */
  425. cmd->result = (DID_OK << 16);
  426. break;
  427. case BTSTAT_DATARUN:
  428. case BTSTAT_DATA_UNDERRUN:
  429. /* Report residual data in underruns */
  430. scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen);
  431. cmd->result = (DID_ERROR << 16);
  432. break;
  433. case BTSTAT_SELTIMEO:
  434. /* Our emulation returns this for non-connected devs */
  435. cmd->result = (DID_BAD_TARGET << 16);
  436. break;
  437. case BTSTAT_LUNMISMATCH:
  438. case BTSTAT_TAGREJECT:
  439. case BTSTAT_BADMSG:
  440. cmd->result = (DRIVER_INVALID << 24);
  441. /* fall through */
  442. case BTSTAT_HAHARDWARE:
  443. case BTSTAT_INVPHASE:
  444. case BTSTAT_HATIMEOUT:
  445. case BTSTAT_NORESPONSE:
  446. case BTSTAT_DISCONNECT:
  447. case BTSTAT_HASOFTWARE:
  448. case BTSTAT_BUSFREE:
  449. case BTSTAT_SENSFAILED:
  450. cmd->result |= (DID_ERROR << 16);
  451. break;
  452. case BTSTAT_SENTRST:
  453. case BTSTAT_RECVRST:
  454. case BTSTAT_BUSRESET:
  455. cmd->result = (DID_RESET << 16);
  456. break;
  457. case BTSTAT_ABORTQUEUE:
  458. cmd->result = (DID_ABORT << 16);
  459. break;
  460. case BTSTAT_SCSIPARITY:
  461. cmd->result = (DID_PARITY << 16);
  462. break;
  463. default:
  464. cmd->result = (DID_ERROR << 16);
  465. scmd_printk(KERN_DEBUG, cmd,
  466. "Unknown completion status: 0x%x\n",
  467. btstat);
  468. }
  469. dev_dbg(&cmd->device->sdev_gendev,
  470. "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n",
  471. cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat);
  472. cmd->scsi_done(cmd);
  473. }
  474. /*
  475. * barrier usage : Since the PVSCSI device is emulated, there could be cases
  476. * where we may want to serialize some accesses between the driver and the
  477. * emulation layer. We use compiler barriers instead of the more expensive
  478. * memory barriers because PVSCSI is only supported on X86 which has strong
  479. * memory access ordering.
  480. */
  481. static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter)
  482. {
  483. struct PVSCSIRingsState *s = adapter->rings_state;
  484. struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring;
  485. u32 cmp_entries = s->cmpNumEntriesLog2;
  486. while (s->cmpConsIdx != s->cmpProdIdx) {
  487. struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx &
  488. MASK(cmp_entries));
  489. /*
  490. * This barrier() ensures that *e is not dereferenced while
  491. * the device emulation still writes data into the slot.
  492. * Since the device emulation advances s->cmpProdIdx only after
  493. * updating the slot we want to check it first.
  494. */
  495. barrier();
  496. pvscsi_complete_request(adapter, e);
  497. /*
  498. * This barrier() ensures that compiler doesn't reorder write
  499. * to s->cmpConsIdx before the read of (*e) inside
  500. * pvscsi_complete_request. Otherwise, device emulation may
  501. * overwrite *e before we had a chance to read it.
  502. */
  503. barrier();
  504. s->cmpConsIdx++;
  505. }
  506. }
  507. /*
  508. * Translate a Linux SCSI request into a request ring entry.
  509. */
  510. static int pvscsi_queue_ring(struct pvscsi_adapter *adapter,
  511. struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd)
  512. {
  513. struct PVSCSIRingsState *s;
  514. struct PVSCSIRingReqDesc *e;
  515. struct scsi_device *sdev;
  516. u32 req_entries;
  517. s = adapter->rings_state;
  518. sdev = cmd->device;
  519. req_entries = s->reqNumEntriesLog2;
  520. /*
  521. * If this condition holds, we might have room on the request ring, but
  522. * we might not have room on the completion ring for the response.
  523. * However, we have already ruled out this possibility - we would not
  524. * have successfully allocated a context if it were true, since we only
  525. * have one context per request entry. Check for it anyway, since it
  526. * would be a serious bug.
  527. */
  528. if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) {
  529. scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: "
  530. "ring full: reqProdIdx=%d cmpConsIdx=%d\n",
  531. s->reqProdIdx, s->cmpConsIdx);
  532. return -1;
  533. }
  534. e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries));
  535. e->bus = sdev->channel;
  536. e->target = sdev->id;
  537. memset(e->lun, 0, sizeof(e->lun));
  538. e->lun[1] = sdev->lun;
  539. if (cmd->sense_buffer) {
  540. ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer,
  541. SCSI_SENSE_BUFFERSIZE,
  542. PCI_DMA_FROMDEVICE);
  543. e->senseAddr = ctx->sensePA;
  544. e->senseLen = SCSI_SENSE_BUFFERSIZE;
  545. } else {
  546. e->senseLen = 0;
  547. e->senseAddr = 0;
  548. }
  549. e->cdbLen = cmd->cmd_len;
  550. e->vcpuHint = smp_processor_id();
  551. memcpy(e->cdb, cmd->cmnd, e->cdbLen);
  552. e->tag = SIMPLE_QUEUE_TAG;
  553. if (sdev->tagged_supported &&
  554. (cmd->tag == HEAD_OF_QUEUE_TAG ||
  555. cmd->tag == ORDERED_QUEUE_TAG))
  556. e->tag = cmd->tag;
  557. if (cmd->sc_data_direction == DMA_FROM_DEVICE)
  558. e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
  559. else if (cmd->sc_data_direction == DMA_TO_DEVICE)
  560. e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
  561. else if (cmd->sc_data_direction == DMA_NONE)
  562. e->flags = PVSCSI_FLAG_CMD_DIR_NONE;
  563. else
  564. e->flags = 0;
  565. pvscsi_map_buffers(adapter, ctx, cmd, e);
  566. e->context = pvscsi_map_context(adapter, ctx);
  567. barrier();
  568. s->reqProdIdx++;
  569. return 0;
  570. }
  571. static int pvscsi_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
  572. {
  573. struct Scsi_Host *host = cmd->device->host;
  574. struct pvscsi_adapter *adapter = shost_priv(host);
  575. struct pvscsi_ctx *ctx;
  576. unsigned long flags;
  577. spin_lock_irqsave(&adapter->hw_lock, flags);
  578. ctx = pvscsi_acquire_context(adapter, cmd);
  579. if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) {
  580. if (ctx)
  581. pvscsi_release_context(adapter, ctx);
  582. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  583. return SCSI_MLQUEUE_HOST_BUSY;
  584. }
  585. cmd->scsi_done = done;
  586. dev_dbg(&cmd->device->sdev_gendev,
  587. "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]);
  588. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  589. pvscsi_kick_io(adapter, cmd->cmnd[0]);
  590. return 0;
  591. }
  592. static int pvscsi_abort(struct scsi_cmnd *cmd)
  593. {
  594. struct pvscsi_adapter *adapter = shost_priv(cmd->device->host);
  595. struct pvscsi_ctx *ctx;
  596. unsigned long flags;
  597. scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n",
  598. adapter->host->host_no, cmd);
  599. spin_lock_irqsave(&adapter->hw_lock, flags);
  600. /*
  601. * Poll the completion ring first - we might be trying to abort
  602. * a command that is waiting to be dispatched in the completion ring.
  603. */
  604. pvscsi_process_completion_ring(adapter);
  605. /*
  606. * If there is no context for the command, it either already succeeded
  607. * or else was never properly issued. Not our problem.
  608. */
  609. ctx = pvscsi_find_context(adapter, cmd);
  610. if (!ctx) {
  611. scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd);
  612. goto out;
  613. }
  614. pvscsi_abort_cmd(adapter, ctx);
  615. pvscsi_process_completion_ring(adapter);
  616. out:
  617. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  618. return SUCCESS;
  619. }
  620. /*
  621. * Abort all outstanding requests. This is only safe to use if the completion
  622. * ring will never be walked again or the device has been reset, because it
  623. * destroys the 1-1 mapping between context field passed to emulation and our
  624. * request structure.
  625. */
  626. static void pvscsi_reset_all(struct pvscsi_adapter *adapter)
  627. {
  628. unsigned i;
  629. for (i = 0; i < adapter->req_depth; i++) {
  630. struct pvscsi_ctx *ctx = &adapter->cmd_map[i];
  631. struct scsi_cmnd *cmd = ctx->cmd;
  632. if (cmd) {
  633. scmd_printk(KERN_ERR, cmd,
  634. "Forced reset on cmd %p\n", cmd);
  635. pvscsi_unmap_buffers(adapter, ctx);
  636. pvscsi_release_context(adapter, ctx);
  637. cmd->result = (DID_RESET << 16);
  638. cmd->scsi_done(cmd);
  639. }
  640. }
  641. }
  642. static int pvscsi_host_reset(struct scsi_cmnd *cmd)
  643. {
  644. struct Scsi_Host *host = cmd->device->host;
  645. struct pvscsi_adapter *adapter = shost_priv(host);
  646. unsigned long flags;
  647. bool use_msg;
  648. scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n");
  649. spin_lock_irqsave(&adapter->hw_lock, flags);
  650. use_msg = adapter->use_msg;
  651. if (use_msg) {
  652. adapter->use_msg = 0;
  653. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  654. /*
  655. * Now that we know that the ISR won't add more work on the
  656. * workqueue we can safely flush any outstanding work.
  657. */
  658. flush_workqueue(adapter->workqueue);
  659. spin_lock_irqsave(&adapter->hw_lock, flags);
  660. }
  661. /*
  662. * We're going to tear down the entire ring structure and set it back
  663. * up, so stalling new requests until all completions are flushed and
  664. * the rings are back in place.
  665. */
  666. pvscsi_process_request_ring(adapter);
  667. ll_adapter_reset(adapter);
  668. /*
  669. * Now process any completions. Note we do this AFTER adapter reset,
  670. * which is strange, but stops races where completions get posted
  671. * between processing the ring and issuing the reset. The backend will
  672. * not touch the ring memory after reset, so the immediately pre-reset
  673. * completion ring state is still valid.
  674. */
  675. pvscsi_process_completion_ring(adapter);
  676. pvscsi_reset_all(adapter);
  677. adapter->use_msg = use_msg;
  678. pvscsi_setup_all_rings(adapter);
  679. pvscsi_unmask_intr(adapter);
  680. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  681. return SUCCESS;
  682. }
  683. static int pvscsi_bus_reset(struct scsi_cmnd *cmd)
  684. {
  685. struct Scsi_Host *host = cmd->device->host;
  686. struct pvscsi_adapter *adapter = shost_priv(host);
  687. unsigned long flags;
  688. scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n");
  689. /*
  690. * We don't want to queue new requests for this bus after
  691. * flushing all pending requests to emulation, since new
  692. * requests could then sneak in during this bus reset phase,
  693. * so take the lock now.
  694. */
  695. spin_lock_irqsave(&adapter->hw_lock, flags);
  696. pvscsi_process_request_ring(adapter);
  697. ll_bus_reset(adapter);
  698. pvscsi_process_completion_ring(adapter);
  699. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  700. return SUCCESS;
  701. }
  702. static int pvscsi_device_reset(struct scsi_cmnd *cmd)
  703. {
  704. struct Scsi_Host *host = cmd->device->host;
  705. struct pvscsi_adapter *adapter = shost_priv(host);
  706. unsigned long flags;
  707. scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n",
  708. host->host_no, cmd->device->id);
  709. /*
  710. * We don't want to queue new requests for this device after flushing
  711. * all pending requests to emulation, since new requests could then
  712. * sneak in during this device reset phase, so take the lock now.
  713. */
  714. spin_lock_irqsave(&adapter->hw_lock, flags);
  715. pvscsi_process_request_ring(adapter);
  716. ll_device_reset(adapter, cmd->device->id);
  717. pvscsi_process_completion_ring(adapter);
  718. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  719. return SUCCESS;
  720. }
  721. static struct scsi_host_template pvscsi_template;
  722. static const char *pvscsi_info(struct Scsi_Host *host)
  723. {
  724. struct pvscsi_adapter *adapter = shost_priv(host);
  725. static char buf[256];
  726. sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: "
  727. "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev,
  728. adapter->req_pages, adapter->cmp_pages, adapter->msg_pages,
  729. pvscsi_template.cmd_per_lun);
  730. return buf;
  731. }
  732. static struct scsi_host_template pvscsi_template = {
  733. .module = THIS_MODULE,
  734. .name = "VMware PVSCSI Host Adapter",
  735. .proc_name = "vmw_pvscsi",
  736. .info = pvscsi_info,
  737. .queuecommand = pvscsi_queue,
  738. .this_id = -1,
  739. .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT,
  740. .dma_boundary = UINT_MAX,
  741. .max_sectors = 0xffff,
  742. .use_clustering = ENABLE_CLUSTERING,
  743. .eh_abort_handler = pvscsi_abort,
  744. .eh_device_reset_handler = pvscsi_device_reset,
  745. .eh_bus_reset_handler = pvscsi_bus_reset,
  746. .eh_host_reset_handler = pvscsi_host_reset,
  747. };
  748. static void pvscsi_process_msg(const struct pvscsi_adapter *adapter,
  749. const struct PVSCSIRingMsgDesc *e)
  750. {
  751. struct PVSCSIRingsState *s = adapter->rings_state;
  752. struct Scsi_Host *host = adapter->host;
  753. struct scsi_device *sdev;
  754. printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n",
  755. e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2);
  756. BUILD_BUG_ON(PVSCSI_MSG_LAST != 2);
  757. if (e->type == PVSCSI_MSG_DEV_ADDED) {
  758. struct PVSCSIMsgDescDevStatusChanged *desc;
  759. desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
  760. printk(KERN_INFO
  761. "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n",
  762. desc->bus, desc->target, desc->lun[1]);
  763. if (!scsi_host_get(host))
  764. return;
  765. sdev = scsi_device_lookup(host, desc->bus, desc->target,
  766. desc->lun[1]);
  767. if (sdev) {
  768. printk(KERN_INFO "vmw_pvscsi: device already exists\n");
  769. scsi_device_put(sdev);
  770. } else
  771. scsi_add_device(adapter->host, desc->bus,
  772. desc->target, desc->lun[1]);
  773. scsi_host_put(host);
  774. } else if (e->type == PVSCSI_MSG_DEV_REMOVED) {
  775. struct PVSCSIMsgDescDevStatusChanged *desc;
  776. desc = (struct PVSCSIMsgDescDevStatusChanged *)e;
  777. printk(KERN_INFO
  778. "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n",
  779. desc->bus, desc->target, desc->lun[1]);
  780. if (!scsi_host_get(host))
  781. return;
  782. sdev = scsi_device_lookup(host, desc->bus, desc->target,
  783. desc->lun[1]);
  784. if (sdev) {
  785. scsi_remove_device(sdev);
  786. scsi_device_put(sdev);
  787. } else
  788. printk(KERN_INFO
  789. "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n",
  790. desc->bus, desc->target, desc->lun[1]);
  791. scsi_host_put(host);
  792. }
  793. }
  794. static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter)
  795. {
  796. struct PVSCSIRingsState *s = adapter->rings_state;
  797. return s->msgProdIdx != s->msgConsIdx;
  798. }
  799. static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter)
  800. {
  801. struct PVSCSIRingsState *s = adapter->rings_state;
  802. struct PVSCSIRingMsgDesc *ring = adapter->msg_ring;
  803. u32 msg_entries = s->msgNumEntriesLog2;
  804. while (pvscsi_msg_pending(adapter)) {
  805. struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx &
  806. MASK(msg_entries));
  807. barrier();
  808. pvscsi_process_msg(adapter, e);
  809. barrier();
  810. s->msgConsIdx++;
  811. }
  812. }
  813. static void pvscsi_msg_workqueue_handler(struct work_struct *data)
  814. {
  815. struct pvscsi_adapter *adapter;
  816. adapter = container_of(data, struct pvscsi_adapter, work);
  817. pvscsi_process_msg_ring(adapter);
  818. }
  819. static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter)
  820. {
  821. char name[32];
  822. if (!pvscsi_use_msg)
  823. return 0;
  824. pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND,
  825. PVSCSI_CMD_SETUP_MSG_RING);
  826. if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1)
  827. return 0;
  828. snprintf(name, sizeof(name),
  829. "vmw_pvscsi_wq_%u", adapter->host->host_no);
  830. adapter->workqueue = create_singlethread_workqueue(name);
  831. if (!adapter->workqueue) {
  832. printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n");
  833. return 0;
  834. }
  835. INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler);
  836. return 1;
  837. }
  838. static irqreturn_t pvscsi_isr(int irq, void *devp)
  839. {
  840. struct pvscsi_adapter *adapter = devp;
  841. int handled;
  842. if (adapter->use_msi || adapter->use_msix)
  843. handled = true;
  844. else {
  845. u32 val = pvscsi_read_intr_status(adapter);
  846. handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0;
  847. if (handled)
  848. pvscsi_write_intr_status(devp, val);
  849. }
  850. if (handled) {
  851. unsigned long flags;
  852. spin_lock_irqsave(&adapter->hw_lock, flags);
  853. pvscsi_process_completion_ring(adapter);
  854. if (adapter->use_msg && pvscsi_msg_pending(adapter))
  855. queue_work(adapter->workqueue, &adapter->work);
  856. spin_unlock_irqrestore(&adapter->hw_lock, flags);
  857. }
  858. return IRQ_RETVAL(handled);
  859. }
  860. static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter)
  861. {
  862. struct pvscsi_ctx *ctx = adapter->cmd_map;
  863. unsigned i;
  864. for (i = 0; i < adapter->req_depth; ++i, ++ctx)
  865. free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE));
  866. }
  867. static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter,
  868. unsigned int *irq)
  869. {
  870. struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION };
  871. int ret;
  872. ret = pci_enable_msix(adapter->dev, &entry, 1);
  873. if (ret)
  874. return ret;
  875. *irq = entry.vector;
  876. return 0;
  877. }
  878. static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter)
  879. {
  880. if (adapter->irq) {
  881. free_irq(adapter->irq, adapter);
  882. adapter->irq = 0;
  883. }
  884. if (adapter->use_msi) {
  885. pci_disable_msi(adapter->dev);
  886. adapter->use_msi = 0;
  887. } else if (adapter->use_msix) {
  888. pci_disable_msix(adapter->dev);
  889. adapter->use_msix = 0;
  890. }
  891. }
  892. static void pvscsi_release_resources(struct pvscsi_adapter *adapter)
  893. {
  894. pvscsi_shutdown_intr(adapter);
  895. if (adapter->workqueue)
  896. destroy_workqueue(adapter->workqueue);
  897. if (adapter->mmioBase)
  898. pci_iounmap(adapter->dev, adapter->mmioBase);
  899. pci_release_regions(adapter->dev);
  900. if (adapter->cmd_map) {
  901. pvscsi_free_sgls(adapter);
  902. kfree(adapter->cmd_map);
  903. }
  904. if (adapter->rings_state)
  905. pci_free_consistent(adapter->dev, PAGE_SIZE,
  906. adapter->rings_state, adapter->ringStatePA);
  907. if (adapter->req_ring)
  908. pci_free_consistent(adapter->dev,
  909. adapter->req_pages * PAGE_SIZE,
  910. adapter->req_ring, adapter->reqRingPA);
  911. if (adapter->cmp_ring)
  912. pci_free_consistent(adapter->dev,
  913. adapter->cmp_pages * PAGE_SIZE,
  914. adapter->cmp_ring, adapter->cmpRingPA);
  915. if (adapter->msg_ring)
  916. pci_free_consistent(adapter->dev,
  917. adapter->msg_pages * PAGE_SIZE,
  918. adapter->msg_ring, adapter->msgRingPA);
  919. }
  920. /*
  921. * Allocate scatter gather lists.
  922. *
  923. * These are statically allocated. Trying to be clever was not worth it.
  924. *
  925. * Dynamic allocation can fail, and we can't go deeep into the memory
  926. * allocator, since we're a SCSI driver, and trying too hard to allocate
  927. * memory might generate disk I/O. We also don't want to fail disk I/O
  928. * in that case because we can't get an allocation - the I/O could be
  929. * trying to swap out data to free memory. Since that is pathological,
  930. * just use a statically allocated scatter list.
  931. *
  932. */
  933. static int __devinit pvscsi_allocate_sg(struct pvscsi_adapter *adapter)
  934. {
  935. struct pvscsi_ctx *ctx;
  936. int i;
  937. ctx = adapter->cmd_map;
  938. BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE);
  939. for (i = 0; i < adapter->req_depth; ++i, ++ctx) {
  940. ctx->sgl = (void *)__get_free_pages(GFP_KERNEL,
  941. get_order(SGL_SIZE));
  942. ctx->sglPA = 0;
  943. BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE));
  944. if (!ctx->sgl) {
  945. for (; i >= 0; --i, --ctx) {
  946. free_pages((unsigned long)ctx->sgl,
  947. get_order(SGL_SIZE));
  948. ctx->sgl = NULL;
  949. }
  950. return -ENOMEM;
  951. }
  952. }
  953. return 0;
  954. }
  955. static int __devinit pvscsi_probe(struct pci_dev *pdev,
  956. const struct pci_device_id *id)
  957. {
  958. struct pvscsi_adapter *adapter;
  959. struct Scsi_Host *host;
  960. unsigned int i;
  961. unsigned long flags = 0;
  962. int error;
  963. error = -ENODEV;
  964. if (pci_enable_device(pdev))
  965. return error;
  966. if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 &&
  967. pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
  968. printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n");
  969. } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 &&
  970. pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) {
  971. printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n");
  972. } else {
  973. printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n");
  974. goto out_disable_device;
  975. }
  976. pvscsi_template.can_queue =
  977. min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) *
  978. PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
  979. pvscsi_template.cmd_per_lun =
  980. min(pvscsi_template.can_queue, pvscsi_cmd_per_lun);
  981. host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter));
  982. if (!host) {
  983. printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n");
  984. goto out_disable_device;
  985. }
  986. adapter = shost_priv(host);
  987. memset(adapter, 0, sizeof(*adapter));
  988. adapter->dev = pdev;
  989. adapter->host = host;
  990. spin_lock_init(&adapter->hw_lock);
  991. host->max_channel = 0;
  992. host->max_id = 16;
  993. host->max_lun = 1;
  994. host->max_cmd_len = 16;
  995. adapter->rev = pdev->revision;
  996. if (pci_request_regions(pdev, "vmw_pvscsi")) {
  997. printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n");
  998. goto out_free_host;
  999. }
  1000. for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
  1001. if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO))
  1002. continue;
  1003. if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE)
  1004. continue;
  1005. break;
  1006. }
  1007. if (i == DEVICE_COUNT_RESOURCE) {
  1008. printk(KERN_ERR
  1009. "vmw_pvscsi: adapter has no suitable MMIO region\n");
  1010. goto out_release_resources;
  1011. }
  1012. adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE);
  1013. if (!adapter->mmioBase) {
  1014. printk(KERN_ERR
  1015. "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n",
  1016. i, PVSCSI_MEM_SPACE_SIZE);
  1017. goto out_release_resources;
  1018. }
  1019. pci_set_master(pdev);
  1020. pci_set_drvdata(pdev, host);
  1021. ll_adapter_reset(adapter);
  1022. adapter->use_msg = pvscsi_setup_msg_workqueue(adapter);
  1023. error = pvscsi_allocate_rings(adapter);
  1024. if (error) {
  1025. printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n");
  1026. goto out_release_resources;
  1027. }
  1028. /*
  1029. * From this point on we should reset the adapter if anything goes
  1030. * wrong.
  1031. */
  1032. pvscsi_setup_all_rings(adapter);
  1033. adapter->cmd_map = kcalloc(adapter->req_depth,
  1034. sizeof(struct pvscsi_ctx), GFP_KERNEL);
  1035. if (!adapter->cmd_map) {
  1036. printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n");
  1037. error = -ENOMEM;
  1038. goto out_reset_adapter;
  1039. }
  1040. INIT_LIST_HEAD(&adapter->cmd_pool);
  1041. for (i = 0; i < adapter->req_depth; i++) {
  1042. struct pvscsi_ctx *ctx = adapter->cmd_map + i;
  1043. list_add(&ctx->list, &adapter->cmd_pool);
  1044. }
  1045. error = pvscsi_allocate_sg(adapter);
  1046. if (error) {
  1047. printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n");
  1048. goto out_reset_adapter;
  1049. }
  1050. if (!pvscsi_disable_msix &&
  1051. pvscsi_setup_msix(adapter, &adapter->irq) == 0) {
  1052. printk(KERN_INFO "vmw_pvscsi: using MSI-X\n");
  1053. adapter->use_msix = 1;
  1054. } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) {
  1055. printk(KERN_INFO "vmw_pvscsi: using MSI\n");
  1056. adapter->use_msi = 1;
  1057. adapter->irq = pdev->irq;
  1058. } else {
  1059. printk(KERN_INFO "vmw_pvscsi: using INTx\n");
  1060. adapter->irq = pdev->irq;
  1061. flags = IRQF_SHARED;
  1062. }
  1063. error = request_irq(adapter->irq, pvscsi_isr, flags,
  1064. "vmw_pvscsi", adapter);
  1065. if (error) {
  1066. printk(KERN_ERR
  1067. "vmw_pvscsi: unable to request IRQ: %d\n", error);
  1068. adapter->irq = 0;
  1069. goto out_reset_adapter;
  1070. }
  1071. error = scsi_add_host(host, &pdev->dev);
  1072. if (error) {
  1073. printk(KERN_ERR
  1074. "vmw_pvscsi: scsi_add_host failed: %d\n", error);
  1075. goto out_reset_adapter;
  1076. }
  1077. dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n",
  1078. adapter->rev, host->host_no);
  1079. pvscsi_unmask_intr(adapter);
  1080. scsi_scan_host(host);
  1081. return 0;
  1082. out_reset_adapter:
  1083. ll_adapter_reset(adapter);
  1084. out_release_resources:
  1085. pvscsi_release_resources(adapter);
  1086. out_free_host:
  1087. scsi_host_put(host);
  1088. out_disable_device:
  1089. pci_set_drvdata(pdev, NULL);
  1090. pci_disable_device(pdev);
  1091. return error;
  1092. }
  1093. static void __pvscsi_shutdown(struct pvscsi_adapter *adapter)
  1094. {
  1095. pvscsi_mask_intr(adapter);
  1096. if (adapter->workqueue)
  1097. flush_workqueue(adapter->workqueue);
  1098. pvscsi_shutdown_intr(adapter);
  1099. pvscsi_process_request_ring(adapter);
  1100. pvscsi_process_completion_ring(adapter);
  1101. ll_adapter_reset(adapter);
  1102. }
  1103. static void pvscsi_shutdown(struct pci_dev *dev)
  1104. {
  1105. struct Scsi_Host *host = pci_get_drvdata(dev);
  1106. struct pvscsi_adapter *adapter = shost_priv(host);
  1107. __pvscsi_shutdown(adapter);
  1108. }
  1109. static void pvscsi_remove(struct pci_dev *pdev)
  1110. {
  1111. struct Scsi_Host *host = pci_get_drvdata(pdev);
  1112. struct pvscsi_adapter *adapter = shost_priv(host);
  1113. scsi_remove_host(host);
  1114. __pvscsi_shutdown(adapter);
  1115. pvscsi_release_resources(adapter);
  1116. scsi_host_put(host);
  1117. pci_set_drvdata(pdev, NULL);
  1118. pci_disable_device(pdev);
  1119. }
  1120. static struct pci_driver pvscsi_pci_driver = {
  1121. .name = "vmw_pvscsi",
  1122. .id_table = pvscsi_pci_tbl,
  1123. .probe = pvscsi_probe,
  1124. .remove = __devexit_p(pvscsi_remove),
  1125. .shutdown = pvscsi_shutdown,
  1126. };
  1127. static int __init pvscsi_init(void)
  1128. {
  1129. pr_info("%s - version %s\n",
  1130. PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING);
  1131. return pci_register_driver(&pvscsi_pci_driver);
  1132. }
  1133. static void __exit pvscsi_exit(void)
  1134. {
  1135. pci_unregister_driver(&pvscsi_pci_driver);
  1136. }
  1137. module_init(pvscsi_init);
  1138. module_exit(pvscsi_exit);