fnic_scsi.c 48 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822
  1. /*
  2. * Copyright 2008 Cisco Systems, Inc. All rights reserved.
  3. * Copyright 2007 Nuova Systems, Inc. All rights reserved.
  4. *
  5. * This program is free software; you may redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. *
  9. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  10. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  11. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  12. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  13. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  14. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  15. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  16. * SOFTWARE.
  17. */
  18. #include <linux/mempool.h>
  19. #include <linux/errno.h>
  20. #include <linux/init.h>
  21. #include <linux/workqueue.h>
  22. #include <linux/pci.h>
  23. #include <linux/scatterlist.h>
  24. #include <linux/skbuff.h>
  25. #include <linux/spinlock.h>
  26. #include <linux/if_ether.h>
  27. #include <linux/if_vlan.h>
  28. #include <linux/delay.h>
  29. #include <linux/gfp.h>
  30. #include <scsi/scsi.h>
  31. #include <scsi/scsi_host.h>
  32. #include <scsi/scsi_device.h>
  33. #include <scsi/scsi_cmnd.h>
  34. #include <scsi/scsi_tcq.h>
  35. #include <scsi/fc/fc_els.h>
  36. #include <scsi/fc/fc_fcoe.h>
  37. #include <scsi/libfc.h>
  38. #include <scsi/fc_frame.h>
  39. #include "fnic_io.h"
  40. #include "fnic.h"
  41. const char *fnic_state_str[] = {
  42. [FNIC_IN_FC_MODE] = "FNIC_IN_FC_MODE",
  43. [FNIC_IN_FC_TRANS_ETH_MODE] = "FNIC_IN_FC_TRANS_ETH_MODE",
  44. [FNIC_IN_ETH_MODE] = "FNIC_IN_ETH_MODE",
  45. [FNIC_IN_ETH_TRANS_FC_MODE] = "FNIC_IN_ETH_TRANS_FC_MODE",
  46. };
  47. static const char *fnic_ioreq_state_str[] = {
  48. [FNIC_IOREQ_CMD_PENDING] = "FNIC_IOREQ_CMD_PENDING",
  49. [FNIC_IOREQ_ABTS_PENDING] = "FNIC_IOREQ_ABTS_PENDING",
  50. [FNIC_IOREQ_ABTS_COMPLETE] = "FNIC_IOREQ_ABTS_COMPLETE",
  51. [FNIC_IOREQ_CMD_COMPLETE] = "FNIC_IOREQ_CMD_COMPLETE",
  52. };
  53. static const char *fcpio_status_str[] = {
  54. [FCPIO_SUCCESS] = "FCPIO_SUCCESS", /*0x0*/
  55. [FCPIO_INVALID_HEADER] = "FCPIO_INVALID_HEADER",
  56. [FCPIO_OUT_OF_RESOURCE] = "FCPIO_OUT_OF_RESOURCE",
  57. [FCPIO_INVALID_PARAM] = "FCPIO_INVALID_PARAM]",
  58. [FCPIO_REQ_NOT_SUPPORTED] = "FCPIO_REQ_NOT_SUPPORTED",
  59. [FCPIO_IO_NOT_FOUND] = "FCPIO_IO_NOT_FOUND",
  60. [FCPIO_ABORTED] = "FCPIO_ABORTED", /*0x41*/
  61. [FCPIO_TIMEOUT] = "FCPIO_TIMEOUT",
  62. [FCPIO_SGL_INVALID] = "FCPIO_SGL_INVALID",
  63. [FCPIO_MSS_INVALID] = "FCPIO_MSS_INVALID",
  64. [FCPIO_DATA_CNT_MISMATCH] = "FCPIO_DATA_CNT_MISMATCH",
  65. [FCPIO_FW_ERR] = "FCPIO_FW_ERR",
  66. [FCPIO_ITMF_REJECTED] = "FCPIO_ITMF_REJECTED",
  67. [FCPIO_ITMF_FAILED] = "FCPIO_ITMF_FAILED",
  68. [FCPIO_ITMF_INCORRECT_LUN] = "FCPIO_ITMF_INCORRECT_LUN",
  69. [FCPIO_CMND_REJECTED] = "FCPIO_CMND_REJECTED",
  70. [FCPIO_NO_PATH_AVAIL] = "FCPIO_NO_PATH_AVAIL",
  71. [FCPIO_PATH_FAILED] = "FCPIO_PATH_FAILED",
  72. [FCPIO_LUNMAP_CHNG_PEND] = "FCPIO_LUNHMAP_CHNG_PEND",
  73. };
  74. const char *fnic_state_to_str(unsigned int state)
  75. {
  76. if (state >= ARRAY_SIZE(fnic_state_str) || !fnic_state_str[state])
  77. return "unknown";
  78. return fnic_state_str[state];
  79. }
  80. static const char *fnic_ioreq_state_to_str(unsigned int state)
  81. {
  82. if (state >= ARRAY_SIZE(fnic_ioreq_state_str) ||
  83. !fnic_ioreq_state_str[state])
  84. return "unknown";
  85. return fnic_ioreq_state_str[state];
  86. }
  87. static const char *fnic_fcpio_status_to_str(unsigned int status)
  88. {
  89. if (status >= ARRAY_SIZE(fcpio_status_str) || !fcpio_status_str[status])
  90. return "unknown";
  91. return fcpio_status_str[status];
  92. }
  93. static void fnic_cleanup_io(struct fnic *fnic, int exclude_id);
  94. static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic,
  95. struct scsi_cmnd *sc)
  96. {
  97. u32 hash = sc->request->tag & (FNIC_IO_LOCKS - 1);
  98. return &fnic->io_req_lock[hash];
  99. }
  100. /*
  101. * Unmap the data buffer and sense buffer for an io_req,
  102. * also unmap and free the device-private scatter/gather list.
  103. */
  104. static void fnic_release_ioreq_buf(struct fnic *fnic,
  105. struct fnic_io_req *io_req,
  106. struct scsi_cmnd *sc)
  107. {
  108. if (io_req->sgl_list_pa)
  109. pci_unmap_single(fnic->pdev, io_req->sgl_list_pa,
  110. sizeof(io_req->sgl_list[0]) * io_req->sgl_cnt,
  111. PCI_DMA_TODEVICE);
  112. scsi_dma_unmap(sc);
  113. if (io_req->sgl_cnt)
  114. mempool_free(io_req->sgl_list_alloc,
  115. fnic->io_sgl_pool[io_req->sgl_type]);
  116. if (io_req->sense_buf_pa)
  117. pci_unmap_single(fnic->pdev, io_req->sense_buf_pa,
  118. SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE);
  119. }
  120. /* Free up Copy Wq descriptors. Called with copy_wq lock held */
  121. static int free_wq_copy_descs(struct fnic *fnic, struct vnic_wq_copy *wq)
  122. {
  123. /* if no Ack received from firmware, then nothing to clean */
  124. if (!fnic->fw_ack_recd[0])
  125. return 1;
  126. /*
  127. * Update desc_available count based on number of freed descriptors
  128. * Account for wraparound
  129. */
  130. if (wq->to_clean_index <= fnic->fw_ack_index[0])
  131. wq->ring.desc_avail += (fnic->fw_ack_index[0]
  132. - wq->to_clean_index + 1);
  133. else
  134. wq->ring.desc_avail += (wq->ring.desc_count
  135. - wq->to_clean_index
  136. + fnic->fw_ack_index[0] + 1);
  137. /*
  138. * just bump clean index to ack_index+1 accounting for wraparound
  139. * this will essentially free up all descriptors between
  140. * to_clean_index and fw_ack_index, both inclusive
  141. */
  142. wq->to_clean_index =
  143. (fnic->fw_ack_index[0] + 1) % wq->ring.desc_count;
  144. /* we have processed the acks received so far */
  145. fnic->fw_ack_recd[0] = 0;
  146. return 0;
  147. }
  148. /*
  149. * fnic_fw_reset_handler
  150. * Routine to send reset msg to fw
  151. */
  152. int fnic_fw_reset_handler(struct fnic *fnic)
  153. {
  154. struct vnic_wq_copy *wq = &fnic->wq_copy[0];
  155. int ret = 0;
  156. unsigned long flags;
  157. skb_queue_purge(&fnic->frame_queue);
  158. skb_queue_purge(&fnic->tx_queue);
  159. spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
  160. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
  161. free_wq_copy_descs(fnic, wq);
  162. if (!vnic_wq_copy_desc_avail(wq))
  163. ret = -EAGAIN;
  164. else
  165. fnic_queue_wq_copy_desc_fw_reset(wq, SCSI_NO_TAG);
  166. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
  167. if (!ret)
  168. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  169. "Issued fw reset\n");
  170. else
  171. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  172. "Failed to issue fw reset\n");
  173. return ret;
  174. }
  175. /*
  176. * fnic_flogi_reg_handler
  177. * Routine to send flogi register msg to fw
  178. */
  179. int fnic_flogi_reg_handler(struct fnic *fnic, u32 fc_id)
  180. {
  181. struct vnic_wq_copy *wq = &fnic->wq_copy[0];
  182. enum fcpio_flogi_reg_format_type format;
  183. struct fc_lport *lp = fnic->lport;
  184. u8 gw_mac[ETH_ALEN];
  185. int ret = 0;
  186. unsigned long flags;
  187. spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
  188. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
  189. free_wq_copy_descs(fnic, wq);
  190. if (!vnic_wq_copy_desc_avail(wq)) {
  191. ret = -EAGAIN;
  192. goto flogi_reg_ioreq_end;
  193. }
  194. if (fnic->ctlr.map_dest) {
  195. memset(gw_mac, 0xff, ETH_ALEN);
  196. format = FCPIO_FLOGI_REG_DEF_DEST;
  197. } else {
  198. memcpy(gw_mac, fnic->ctlr.dest_addr, ETH_ALEN);
  199. format = FCPIO_FLOGI_REG_GW_DEST;
  200. }
  201. if ((fnic->config.flags & VFCF_FIP_CAPABLE) && !fnic->ctlr.map_dest) {
  202. fnic_queue_wq_copy_desc_fip_reg(wq, SCSI_NO_TAG,
  203. fc_id, gw_mac,
  204. fnic->data_src_addr,
  205. lp->r_a_tov, lp->e_d_tov);
  206. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  207. "FLOGI FIP reg issued fcid %x src %pM dest %pM\n",
  208. fc_id, fnic->data_src_addr, gw_mac);
  209. } else {
  210. fnic_queue_wq_copy_desc_flogi_reg(wq, SCSI_NO_TAG,
  211. format, fc_id, gw_mac);
  212. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  213. "FLOGI reg issued fcid %x map %d dest %pM\n",
  214. fc_id, fnic->ctlr.map_dest, gw_mac);
  215. }
  216. flogi_reg_ioreq_end:
  217. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
  218. return ret;
  219. }
  220. /*
  221. * fnic_queue_wq_copy_desc
  222. * Routine to enqueue a wq copy desc
  223. */
  224. static inline int fnic_queue_wq_copy_desc(struct fnic *fnic,
  225. struct vnic_wq_copy *wq,
  226. struct fnic_io_req *io_req,
  227. struct scsi_cmnd *sc,
  228. int sg_count)
  229. {
  230. struct scatterlist *sg;
  231. struct fc_rport *rport = starget_to_rport(scsi_target(sc->device));
  232. struct fc_rport_libfc_priv *rp = rport->dd_data;
  233. struct host_sg_desc *desc;
  234. u8 pri_tag = 0;
  235. unsigned int i;
  236. unsigned long intr_flags;
  237. int flags;
  238. u8 exch_flags;
  239. struct scsi_lun fc_lun;
  240. char msg[2];
  241. if (sg_count) {
  242. /* For each SGE, create a device desc entry */
  243. desc = io_req->sgl_list;
  244. for_each_sg(scsi_sglist(sc), sg, sg_count, i) {
  245. desc->addr = cpu_to_le64(sg_dma_address(sg));
  246. desc->len = cpu_to_le32(sg_dma_len(sg));
  247. desc->_resvd = 0;
  248. desc++;
  249. }
  250. io_req->sgl_list_pa = pci_map_single
  251. (fnic->pdev,
  252. io_req->sgl_list,
  253. sizeof(io_req->sgl_list[0]) * sg_count,
  254. PCI_DMA_TODEVICE);
  255. }
  256. io_req->sense_buf_pa = pci_map_single(fnic->pdev,
  257. sc->sense_buffer,
  258. SCSI_SENSE_BUFFERSIZE,
  259. PCI_DMA_FROMDEVICE);
  260. int_to_scsilun(sc->device->lun, &fc_lun);
  261. pri_tag = FCPIO_ICMND_PTA_SIMPLE;
  262. msg[0] = MSG_SIMPLE_TAG;
  263. scsi_populate_tag_msg(sc, msg);
  264. if (msg[0] == MSG_ORDERED_TAG)
  265. pri_tag = FCPIO_ICMND_PTA_ORDERED;
  266. /* Enqueue the descriptor in the Copy WQ */
  267. spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
  268. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
  269. free_wq_copy_descs(fnic, wq);
  270. if (unlikely(!vnic_wq_copy_desc_avail(wq))) {
  271. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
  272. return SCSI_MLQUEUE_HOST_BUSY;
  273. }
  274. flags = 0;
  275. if (sc->sc_data_direction == DMA_FROM_DEVICE)
  276. flags = FCPIO_ICMND_RDDATA;
  277. else if (sc->sc_data_direction == DMA_TO_DEVICE)
  278. flags = FCPIO_ICMND_WRDATA;
  279. exch_flags = 0;
  280. if ((fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) &&
  281. (rp->flags & FC_RP_FLAGS_RETRY))
  282. exch_flags |= FCPIO_ICMND_SRFLAG_RETRY;
  283. fnic_queue_wq_copy_desc_icmnd_16(wq, sc->request->tag,
  284. 0, exch_flags, io_req->sgl_cnt,
  285. SCSI_SENSE_BUFFERSIZE,
  286. io_req->sgl_list_pa,
  287. io_req->sense_buf_pa,
  288. 0, /* scsi cmd ref, always 0 */
  289. pri_tag, /* scsi pri and tag */
  290. flags, /* command flags */
  291. sc->cmnd, sc->cmd_len,
  292. scsi_bufflen(sc),
  293. fc_lun.scsi_lun, io_req->port_id,
  294. rport->maxframe_size, rp->r_a_tov,
  295. rp->e_d_tov);
  296. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
  297. return 0;
  298. }
  299. /*
  300. * fnic_queuecommand
  301. * Routine to send a scsi cdb
  302. * Called with host_lock held and interrupts disabled.
  303. */
  304. int fnic_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
  305. {
  306. struct fc_lport *lp;
  307. struct fc_rport *rport;
  308. struct fnic_io_req *io_req;
  309. struct fnic *fnic;
  310. struct vnic_wq_copy *wq;
  311. int ret;
  312. int sg_count;
  313. unsigned long flags;
  314. unsigned long ptr;
  315. rport = starget_to_rport(scsi_target(sc->device));
  316. ret = fc_remote_port_chkready(rport);
  317. if (ret) {
  318. sc->result = ret;
  319. done(sc);
  320. return 0;
  321. }
  322. lp = shost_priv(sc->device->host);
  323. if (lp->state != LPORT_ST_READY || !(lp->link_up))
  324. return SCSI_MLQUEUE_HOST_BUSY;
  325. /*
  326. * Release host lock, use driver resource specific locks from here.
  327. * Don't re-enable interrupts in case they were disabled prior to the
  328. * caller disabling them.
  329. */
  330. spin_unlock(lp->host->host_lock);
  331. /* Get a new io_req for this SCSI IO */
  332. fnic = lport_priv(lp);
  333. io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
  334. if (!io_req) {
  335. ret = SCSI_MLQUEUE_HOST_BUSY;
  336. goto out;
  337. }
  338. memset(io_req, 0, sizeof(*io_req));
  339. /* Map the data buffer */
  340. sg_count = scsi_dma_map(sc);
  341. if (sg_count < 0) {
  342. mempool_free(io_req, fnic->io_req_pool);
  343. goto out;
  344. }
  345. /* Determine the type of scatter/gather list we need */
  346. io_req->sgl_cnt = sg_count;
  347. io_req->sgl_type = FNIC_SGL_CACHE_DFLT;
  348. if (sg_count > FNIC_DFLT_SG_DESC_CNT)
  349. io_req->sgl_type = FNIC_SGL_CACHE_MAX;
  350. if (sg_count) {
  351. io_req->sgl_list =
  352. mempool_alloc(fnic->io_sgl_pool[io_req->sgl_type],
  353. GFP_ATOMIC | GFP_DMA);
  354. if (!io_req->sgl_list) {
  355. ret = SCSI_MLQUEUE_HOST_BUSY;
  356. scsi_dma_unmap(sc);
  357. mempool_free(io_req, fnic->io_req_pool);
  358. goto out;
  359. }
  360. /* Cache sgl list allocated address before alignment */
  361. io_req->sgl_list_alloc = io_req->sgl_list;
  362. ptr = (unsigned long) io_req->sgl_list;
  363. if (ptr % FNIC_SG_DESC_ALIGN) {
  364. io_req->sgl_list = (struct host_sg_desc *)
  365. (((unsigned long) ptr
  366. + FNIC_SG_DESC_ALIGN - 1)
  367. & ~(FNIC_SG_DESC_ALIGN - 1));
  368. }
  369. }
  370. /* initialize rest of io_req */
  371. io_req->port_id = rport->port_id;
  372. CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
  373. CMD_SP(sc) = (char *)io_req;
  374. sc->scsi_done = done;
  375. /* create copy wq desc and enqueue it */
  376. wq = &fnic->wq_copy[0];
  377. ret = fnic_queue_wq_copy_desc(fnic, wq, io_req, sc, sg_count);
  378. if (ret) {
  379. /*
  380. * In case another thread cancelled the request,
  381. * refetch the pointer under the lock.
  382. */
  383. spinlock_t *io_lock = fnic_io_lock_hash(fnic, sc);
  384. spin_lock_irqsave(io_lock, flags);
  385. io_req = (struct fnic_io_req *)CMD_SP(sc);
  386. CMD_SP(sc) = NULL;
  387. CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
  388. spin_unlock_irqrestore(io_lock, flags);
  389. if (io_req) {
  390. fnic_release_ioreq_buf(fnic, io_req, sc);
  391. mempool_free(io_req, fnic->io_req_pool);
  392. }
  393. }
  394. out:
  395. /* acquire host lock before returning to SCSI */
  396. spin_lock(lp->host->host_lock);
  397. return ret;
  398. }
  399. /*
  400. * fnic_fcpio_fw_reset_cmpl_handler
  401. * Routine to handle fw reset completion
  402. */
  403. static int fnic_fcpio_fw_reset_cmpl_handler(struct fnic *fnic,
  404. struct fcpio_fw_req *desc)
  405. {
  406. u8 type;
  407. u8 hdr_status;
  408. struct fcpio_tag tag;
  409. int ret = 0;
  410. unsigned long flags;
  411. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
  412. /* Clean up all outstanding io requests */
  413. fnic_cleanup_io(fnic, SCSI_NO_TAG);
  414. spin_lock_irqsave(&fnic->fnic_lock, flags);
  415. /* fnic should be in FC_TRANS_ETH_MODE */
  416. if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE) {
  417. /* Check status of reset completion */
  418. if (!hdr_status) {
  419. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  420. "reset cmpl success\n");
  421. /* Ready to send flogi out */
  422. fnic->state = FNIC_IN_ETH_MODE;
  423. } else {
  424. FNIC_SCSI_DBG(KERN_DEBUG,
  425. fnic->lport->host,
  426. "fnic fw_reset : failed %s\n",
  427. fnic_fcpio_status_to_str(hdr_status));
  428. /*
  429. * Unable to change to eth mode, cannot send out flogi
  430. * Change state to fc mode, so that subsequent Flogi
  431. * requests from libFC will cause more attempts to
  432. * reset the firmware. Free the cached flogi
  433. */
  434. fnic->state = FNIC_IN_FC_MODE;
  435. ret = -1;
  436. }
  437. } else {
  438. FNIC_SCSI_DBG(KERN_DEBUG,
  439. fnic->lport->host,
  440. "Unexpected state %s while processing"
  441. " reset cmpl\n", fnic_state_to_str(fnic->state));
  442. ret = -1;
  443. }
  444. /* Thread removing device blocks till firmware reset is complete */
  445. if (fnic->remove_wait)
  446. complete(fnic->remove_wait);
  447. /*
  448. * If fnic is being removed, or fw reset failed
  449. * free the flogi frame. Else, send it out
  450. */
  451. if (fnic->remove_wait || ret) {
  452. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  453. skb_queue_purge(&fnic->tx_queue);
  454. goto reset_cmpl_handler_end;
  455. }
  456. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  457. fnic_flush_tx(fnic);
  458. reset_cmpl_handler_end:
  459. return ret;
  460. }
  461. /*
  462. * fnic_fcpio_flogi_reg_cmpl_handler
  463. * Routine to handle flogi register completion
  464. */
  465. static int fnic_fcpio_flogi_reg_cmpl_handler(struct fnic *fnic,
  466. struct fcpio_fw_req *desc)
  467. {
  468. u8 type;
  469. u8 hdr_status;
  470. struct fcpio_tag tag;
  471. int ret = 0;
  472. unsigned long flags;
  473. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
  474. /* Update fnic state based on status of flogi reg completion */
  475. spin_lock_irqsave(&fnic->fnic_lock, flags);
  476. if (fnic->state == FNIC_IN_ETH_TRANS_FC_MODE) {
  477. /* Check flogi registration completion status */
  478. if (!hdr_status) {
  479. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  480. "flog reg succeeded\n");
  481. fnic->state = FNIC_IN_FC_MODE;
  482. } else {
  483. FNIC_SCSI_DBG(KERN_DEBUG,
  484. fnic->lport->host,
  485. "fnic flogi reg :failed %s\n",
  486. fnic_fcpio_status_to_str(hdr_status));
  487. fnic->state = FNIC_IN_ETH_MODE;
  488. ret = -1;
  489. }
  490. } else {
  491. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  492. "Unexpected fnic state %s while"
  493. " processing flogi reg completion\n",
  494. fnic_state_to_str(fnic->state));
  495. ret = -1;
  496. }
  497. if (!ret) {
  498. if (fnic->stop_rx_link_events) {
  499. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  500. goto reg_cmpl_handler_end;
  501. }
  502. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  503. fnic_flush_tx(fnic);
  504. queue_work(fnic_event_queue, &fnic->frame_work);
  505. } else {
  506. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  507. }
  508. reg_cmpl_handler_end:
  509. return ret;
  510. }
  511. static inline int is_ack_index_in_range(struct vnic_wq_copy *wq,
  512. u16 request_out)
  513. {
  514. if (wq->to_clean_index <= wq->to_use_index) {
  515. /* out of range, stale request_out index */
  516. if (request_out < wq->to_clean_index ||
  517. request_out >= wq->to_use_index)
  518. return 0;
  519. } else {
  520. /* out of range, stale request_out index */
  521. if (request_out < wq->to_clean_index &&
  522. request_out >= wq->to_use_index)
  523. return 0;
  524. }
  525. /* request_out index is in range */
  526. return 1;
  527. }
  528. /*
  529. * Mark that ack received and store the Ack index. If there are multiple
  530. * acks received before Tx thread cleans it up, the latest value will be
  531. * used which is correct behavior. This state should be in the copy Wq
  532. * instead of in the fnic
  533. */
  534. static inline void fnic_fcpio_ack_handler(struct fnic *fnic,
  535. unsigned int cq_index,
  536. struct fcpio_fw_req *desc)
  537. {
  538. struct vnic_wq_copy *wq;
  539. u16 request_out = desc->u.ack.request_out;
  540. unsigned long flags;
  541. /* mark the ack state */
  542. wq = &fnic->wq_copy[cq_index - fnic->raw_wq_count - fnic->rq_count];
  543. spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
  544. if (is_ack_index_in_range(wq, request_out)) {
  545. fnic->fw_ack_index[0] = request_out;
  546. fnic->fw_ack_recd[0] = 1;
  547. }
  548. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
  549. }
  550. /*
  551. * fnic_fcpio_icmnd_cmpl_handler
  552. * Routine to handle icmnd completions
  553. */
  554. static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
  555. struct fcpio_fw_req *desc)
  556. {
  557. u8 type;
  558. u8 hdr_status;
  559. struct fcpio_tag tag;
  560. u32 id;
  561. u64 xfer_len = 0;
  562. struct fcpio_icmnd_cmpl *icmnd_cmpl;
  563. struct fnic_io_req *io_req;
  564. struct scsi_cmnd *sc;
  565. unsigned long flags;
  566. spinlock_t *io_lock;
  567. /* Decode the cmpl description to get the io_req id */
  568. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
  569. fcpio_tag_id_dec(&tag, &id);
  570. if (id >= FNIC_MAX_IO_REQ)
  571. return;
  572. sc = scsi_host_find_tag(fnic->lport->host, id);
  573. WARN_ON_ONCE(!sc);
  574. if (!sc)
  575. return;
  576. io_lock = fnic_io_lock_hash(fnic, sc);
  577. spin_lock_irqsave(io_lock, flags);
  578. io_req = (struct fnic_io_req *)CMD_SP(sc);
  579. WARN_ON_ONCE(!io_req);
  580. if (!io_req) {
  581. spin_unlock_irqrestore(io_lock, flags);
  582. return;
  583. }
  584. /* firmware completed the io */
  585. io_req->io_completed = 1;
  586. /*
  587. * if SCSI-ML has already issued abort on this command,
  588. * ignore completion of the IO. The abts path will clean it up
  589. */
  590. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
  591. spin_unlock_irqrestore(io_lock, flags);
  592. return;
  593. }
  594. /* Mark the IO as complete */
  595. CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
  596. icmnd_cmpl = &desc->u.icmnd_cmpl;
  597. switch (hdr_status) {
  598. case FCPIO_SUCCESS:
  599. sc->result = (DID_OK << 16) | icmnd_cmpl->scsi_status;
  600. xfer_len = scsi_bufflen(sc);
  601. scsi_set_resid(sc, icmnd_cmpl->residual);
  602. if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER)
  603. xfer_len -= icmnd_cmpl->residual;
  604. /*
  605. * If queue_full, then try to reduce queue depth for all
  606. * LUNS on the target. Todo: this should be accompanied
  607. * by a periodic queue_depth rampup based on successful
  608. * IO completion.
  609. */
  610. if (icmnd_cmpl->scsi_status == QUEUE_FULL) {
  611. struct scsi_device *t_sdev;
  612. int qd = 0;
  613. shost_for_each_device(t_sdev, sc->device->host) {
  614. if (t_sdev->id != sc->device->id)
  615. continue;
  616. if (t_sdev->queue_depth > 1) {
  617. qd = scsi_track_queue_full
  618. (t_sdev,
  619. t_sdev->queue_depth - 1);
  620. if (qd == -1)
  621. qd = t_sdev->host->cmd_per_lun;
  622. shost_printk(KERN_INFO,
  623. fnic->lport->host,
  624. "scsi[%d:%d:%d:%d"
  625. "] queue full detected,"
  626. "new depth = %d\n",
  627. t_sdev->host->host_no,
  628. t_sdev->channel,
  629. t_sdev->id, t_sdev->lun,
  630. t_sdev->queue_depth);
  631. }
  632. }
  633. }
  634. break;
  635. case FCPIO_TIMEOUT: /* request was timed out */
  636. sc->result = (DID_TIME_OUT << 16) | icmnd_cmpl->scsi_status;
  637. break;
  638. case FCPIO_ABORTED: /* request was aborted */
  639. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  640. break;
  641. case FCPIO_DATA_CNT_MISMATCH: /* recv/sent more/less data than exp. */
  642. scsi_set_resid(sc, icmnd_cmpl->residual);
  643. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  644. break;
  645. case FCPIO_OUT_OF_RESOURCE: /* out of resources to complete request */
  646. sc->result = (DID_REQUEUE << 16) | icmnd_cmpl->scsi_status;
  647. break;
  648. case FCPIO_INVALID_HEADER: /* header contains invalid data */
  649. case FCPIO_INVALID_PARAM: /* some parameter in request invalid */
  650. case FCPIO_REQ_NOT_SUPPORTED:/* request type is not supported */
  651. case FCPIO_IO_NOT_FOUND: /* requested I/O was not found */
  652. case FCPIO_SGL_INVALID: /* request was aborted due to sgl error */
  653. case FCPIO_MSS_INVALID: /* request was aborted due to mss error */
  654. case FCPIO_FW_ERR: /* request was terminated due fw error */
  655. default:
  656. shost_printk(KERN_ERR, fnic->lport->host, "hdr status = %s\n",
  657. fnic_fcpio_status_to_str(hdr_status));
  658. sc->result = (DID_ERROR << 16) | icmnd_cmpl->scsi_status;
  659. break;
  660. }
  661. /* Break link with the SCSI command */
  662. CMD_SP(sc) = NULL;
  663. spin_unlock_irqrestore(io_lock, flags);
  664. fnic_release_ioreq_buf(fnic, io_req, sc);
  665. mempool_free(io_req, fnic->io_req_pool);
  666. if (sc->sc_data_direction == DMA_FROM_DEVICE) {
  667. fnic->lport->host_stats.fcp_input_requests++;
  668. fnic->fcp_input_bytes += xfer_len;
  669. } else if (sc->sc_data_direction == DMA_TO_DEVICE) {
  670. fnic->lport->host_stats.fcp_output_requests++;
  671. fnic->fcp_output_bytes += xfer_len;
  672. } else
  673. fnic->lport->host_stats.fcp_control_requests++;
  674. /* Call SCSI completion function to complete the IO */
  675. if (sc->scsi_done)
  676. sc->scsi_done(sc);
  677. }
  678. /* fnic_fcpio_itmf_cmpl_handler
  679. * Routine to handle itmf completions
  680. */
  681. static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic,
  682. struct fcpio_fw_req *desc)
  683. {
  684. u8 type;
  685. u8 hdr_status;
  686. struct fcpio_tag tag;
  687. u32 id;
  688. struct scsi_cmnd *sc;
  689. struct fnic_io_req *io_req;
  690. unsigned long flags;
  691. spinlock_t *io_lock;
  692. fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag);
  693. fcpio_tag_id_dec(&tag, &id);
  694. if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ)
  695. return;
  696. sc = scsi_host_find_tag(fnic->lport->host, id & FNIC_TAG_MASK);
  697. WARN_ON_ONCE(!sc);
  698. if (!sc)
  699. return;
  700. io_lock = fnic_io_lock_hash(fnic, sc);
  701. spin_lock_irqsave(io_lock, flags);
  702. io_req = (struct fnic_io_req *)CMD_SP(sc);
  703. WARN_ON_ONCE(!io_req);
  704. if (!io_req) {
  705. spin_unlock_irqrestore(io_lock, flags);
  706. return;
  707. }
  708. if (id & FNIC_TAG_ABORT) {
  709. /* Completion of abort cmd */
  710. if (CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING) {
  711. /* This is a late completion. Ignore it */
  712. spin_unlock_irqrestore(io_lock, flags);
  713. return;
  714. }
  715. CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
  716. CMD_ABTS_STATUS(sc) = hdr_status;
  717. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  718. "abts cmpl recd. id %d status %s\n",
  719. (int)(id & FNIC_TAG_MASK),
  720. fnic_fcpio_status_to_str(hdr_status));
  721. /*
  722. * If scsi_eh thread is blocked waiting for abts to complete,
  723. * signal completion to it. IO will be cleaned in the thread
  724. * else clean it in this context
  725. */
  726. if (io_req->abts_done) {
  727. complete(io_req->abts_done);
  728. spin_unlock_irqrestore(io_lock, flags);
  729. } else {
  730. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  731. "abts cmpl, completing IO\n");
  732. CMD_SP(sc) = NULL;
  733. sc->result = (DID_ERROR << 16);
  734. spin_unlock_irqrestore(io_lock, flags);
  735. fnic_release_ioreq_buf(fnic, io_req, sc);
  736. mempool_free(io_req, fnic->io_req_pool);
  737. if (sc->scsi_done)
  738. sc->scsi_done(sc);
  739. }
  740. } else if (id & FNIC_TAG_DEV_RST) {
  741. /* Completion of device reset */
  742. CMD_LR_STATUS(sc) = hdr_status;
  743. CMD_STATE(sc) = FNIC_IOREQ_CMD_COMPLETE;
  744. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  745. "dev reset cmpl recd. id %d status %s\n",
  746. (int)(id & FNIC_TAG_MASK),
  747. fnic_fcpio_status_to_str(hdr_status));
  748. if (io_req->dr_done)
  749. complete(io_req->dr_done);
  750. spin_unlock_irqrestore(io_lock, flags);
  751. } else {
  752. shost_printk(KERN_ERR, fnic->lport->host,
  753. "Unexpected itmf io state %s tag %x\n",
  754. fnic_ioreq_state_to_str(CMD_STATE(sc)), id);
  755. spin_unlock_irqrestore(io_lock, flags);
  756. }
  757. }
  758. /*
  759. * fnic_fcpio_cmpl_handler
  760. * Routine to service the cq for wq_copy
  761. */
  762. static int fnic_fcpio_cmpl_handler(struct vnic_dev *vdev,
  763. unsigned int cq_index,
  764. struct fcpio_fw_req *desc)
  765. {
  766. struct fnic *fnic = vnic_dev_priv(vdev);
  767. int ret = 0;
  768. switch (desc->hdr.type) {
  769. case FCPIO_ACK: /* fw copied copy wq desc to its queue */
  770. fnic_fcpio_ack_handler(fnic, cq_index, desc);
  771. break;
  772. case FCPIO_ICMND_CMPL: /* fw completed a command */
  773. fnic_fcpio_icmnd_cmpl_handler(fnic, desc);
  774. break;
  775. case FCPIO_ITMF_CMPL: /* fw completed itmf (abort cmd, lun reset)*/
  776. fnic_fcpio_itmf_cmpl_handler(fnic, desc);
  777. break;
  778. case FCPIO_FLOGI_REG_CMPL: /* fw completed flogi_reg */
  779. case FCPIO_FLOGI_FIP_REG_CMPL: /* fw completed flogi_fip_reg */
  780. ret = fnic_fcpio_flogi_reg_cmpl_handler(fnic, desc);
  781. break;
  782. case FCPIO_RESET_CMPL: /* fw completed reset */
  783. ret = fnic_fcpio_fw_reset_cmpl_handler(fnic, desc);
  784. break;
  785. default:
  786. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  787. "firmware completion type %d\n",
  788. desc->hdr.type);
  789. break;
  790. }
  791. return ret;
  792. }
  793. /*
  794. * fnic_wq_copy_cmpl_handler
  795. * Routine to process wq copy
  796. */
  797. int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do)
  798. {
  799. unsigned int wq_work_done = 0;
  800. unsigned int i, cq_index;
  801. unsigned int cur_work_done;
  802. for (i = 0; i < fnic->wq_copy_count; i++) {
  803. cq_index = i + fnic->raw_wq_count + fnic->rq_count;
  804. cur_work_done = vnic_cq_copy_service(&fnic->cq[cq_index],
  805. fnic_fcpio_cmpl_handler,
  806. copy_work_to_do);
  807. wq_work_done += cur_work_done;
  808. }
  809. return wq_work_done;
  810. }
  811. static void fnic_cleanup_io(struct fnic *fnic, int exclude_id)
  812. {
  813. unsigned int i;
  814. struct fnic_io_req *io_req;
  815. unsigned long flags = 0;
  816. struct scsi_cmnd *sc;
  817. spinlock_t *io_lock;
  818. for (i = 0; i < FNIC_MAX_IO_REQ; i++) {
  819. if (i == exclude_id)
  820. continue;
  821. sc = scsi_host_find_tag(fnic->lport->host, i);
  822. if (!sc)
  823. continue;
  824. io_lock = fnic_io_lock_hash(fnic, sc);
  825. spin_lock_irqsave(io_lock, flags);
  826. io_req = (struct fnic_io_req *)CMD_SP(sc);
  827. if (!io_req) {
  828. spin_unlock_irqrestore(io_lock, flags);
  829. goto cleanup_scsi_cmd;
  830. }
  831. CMD_SP(sc) = NULL;
  832. spin_unlock_irqrestore(io_lock, flags);
  833. /*
  834. * If there is a scsi_cmnd associated with this io_req, then
  835. * free the corresponding state
  836. */
  837. fnic_release_ioreq_buf(fnic, io_req, sc);
  838. mempool_free(io_req, fnic->io_req_pool);
  839. cleanup_scsi_cmd:
  840. sc->result = DID_TRANSPORT_DISRUPTED << 16;
  841. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "fnic_cleanup_io:"
  842. " DID_TRANSPORT_DISRUPTED\n");
  843. /* Complete the command to SCSI */
  844. if (sc->scsi_done)
  845. sc->scsi_done(sc);
  846. }
  847. }
  848. void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq,
  849. struct fcpio_host_req *desc)
  850. {
  851. u32 id;
  852. struct fnic *fnic = vnic_dev_priv(wq->vdev);
  853. struct fnic_io_req *io_req;
  854. struct scsi_cmnd *sc;
  855. unsigned long flags;
  856. spinlock_t *io_lock;
  857. /* get the tag reference */
  858. fcpio_tag_id_dec(&desc->hdr.tag, &id);
  859. id &= FNIC_TAG_MASK;
  860. if (id >= FNIC_MAX_IO_REQ)
  861. return;
  862. sc = scsi_host_find_tag(fnic->lport->host, id);
  863. if (!sc)
  864. return;
  865. io_lock = fnic_io_lock_hash(fnic, sc);
  866. spin_lock_irqsave(io_lock, flags);
  867. /* Get the IO context which this desc refers to */
  868. io_req = (struct fnic_io_req *)CMD_SP(sc);
  869. /* fnic interrupts are turned off by now */
  870. if (!io_req) {
  871. spin_unlock_irqrestore(io_lock, flags);
  872. goto wq_copy_cleanup_scsi_cmd;
  873. }
  874. CMD_SP(sc) = NULL;
  875. spin_unlock_irqrestore(io_lock, flags);
  876. fnic_release_ioreq_buf(fnic, io_req, sc);
  877. mempool_free(io_req, fnic->io_req_pool);
  878. wq_copy_cleanup_scsi_cmd:
  879. sc->result = DID_NO_CONNECT << 16;
  880. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "wq_copy_cleanup_handler:"
  881. " DID_NO_CONNECT\n");
  882. if (sc->scsi_done)
  883. sc->scsi_done(sc);
  884. }
  885. static inline int fnic_queue_abort_io_req(struct fnic *fnic, int tag,
  886. u32 task_req, u8 *fc_lun,
  887. struct fnic_io_req *io_req)
  888. {
  889. struct vnic_wq_copy *wq = &fnic->wq_copy[0];
  890. unsigned long flags;
  891. spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
  892. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
  893. free_wq_copy_descs(fnic, wq);
  894. if (!vnic_wq_copy_desc_avail(wq)) {
  895. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
  896. return 1;
  897. }
  898. fnic_queue_wq_copy_desc_itmf(wq, tag | FNIC_TAG_ABORT,
  899. 0, task_req, tag, fc_lun, io_req->port_id,
  900. fnic->config.ra_tov, fnic->config.ed_tov);
  901. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], flags);
  902. return 0;
  903. }
  904. void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id)
  905. {
  906. int tag;
  907. struct fnic_io_req *io_req;
  908. spinlock_t *io_lock;
  909. unsigned long flags;
  910. struct scsi_cmnd *sc;
  911. struct scsi_lun fc_lun;
  912. enum fnic_ioreq_state old_ioreq_state;
  913. FNIC_SCSI_DBG(KERN_DEBUG,
  914. fnic->lport->host,
  915. "fnic_rport_reset_exch called portid 0x%06x\n",
  916. port_id);
  917. if (fnic->in_remove)
  918. return;
  919. for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
  920. sc = scsi_host_find_tag(fnic->lport->host, tag);
  921. if (!sc)
  922. continue;
  923. io_lock = fnic_io_lock_hash(fnic, sc);
  924. spin_lock_irqsave(io_lock, flags);
  925. io_req = (struct fnic_io_req *)CMD_SP(sc);
  926. if (!io_req || io_req->port_id != port_id) {
  927. spin_unlock_irqrestore(io_lock, flags);
  928. continue;
  929. }
  930. /*
  931. * Found IO that is still pending with firmware and
  932. * belongs to rport that went away
  933. */
  934. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
  935. spin_unlock_irqrestore(io_lock, flags);
  936. continue;
  937. }
  938. old_ioreq_state = CMD_STATE(sc);
  939. CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
  940. CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
  941. BUG_ON(io_req->abts_done);
  942. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  943. "fnic_rport_reset_exch: Issuing abts\n");
  944. spin_unlock_irqrestore(io_lock, flags);
  945. /* Now queue the abort command to firmware */
  946. int_to_scsilun(sc->device->lun, &fc_lun);
  947. if (fnic_queue_abort_io_req(fnic, tag,
  948. FCPIO_ITMF_ABT_TASK_TERM,
  949. fc_lun.scsi_lun, io_req)) {
  950. /*
  951. * Revert the cmd state back to old state, if
  952. * it hasnt changed in between. This cmd will get
  953. * aborted later by scsi_eh, or cleaned up during
  954. * lun reset
  955. */
  956. io_lock = fnic_io_lock_hash(fnic, sc);
  957. spin_lock_irqsave(io_lock, flags);
  958. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
  959. CMD_STATE(sc) = old_ioreq_state;
  960. spin_unlock_irqrestore(io_lock, flags);
  961. }
  962. }
  963. }
  964. void fnic_terminate_rport_io(struct fc_rport *rport)
  965. {
  966. int tag;
  967. struct fnic_io_req *io_req;
  968. spinlock_t *io_lock;
  969. unsigned long flags;
  970. struct scsi_cmnd *sc;
  971. struct scsi_lun fc_lun;
  972. struct fc_rport_libfc_priv *rdata = rport->dd_data;
  973. struct fc_lport *lport = rdata->local_port;
  974. struct fnic *fnic = lport_priv(lport);
  975. struct fc_rport *cmd_rport;
  976. enum fnic_ioreq_state old_ioreq_state;
  977. FNIC_SCSI_DBG(KERN_DEBUG,
  978. fnic->lport->host, "fnic_terminate_rport_io called"
  979. " wwpn 0x%llx, wwnn0x%llx, portid 0x%06x\n",
  980. rport->port_name, rport->node_name,
  981. rport->port_id);
  982. if (fnic->in_remove)
  983. return;
  984. for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
  985. sc = scsi_host_find_tag(fnic->lport->host, tag);
  986. if (!sc)
  987. continue;
  988. cmd_rport = starget_to_rport(scsi_target(sc->device));
  989. if (rport != cmd_rport)
  990. continue;
  991. io_lock = fnic_io_lock_hash(fnic, sc);
  992. spin_lock_irqsave(io_lock, flags);
  993. io_req = (struct fnic_io_req *)CMD_SP(sc);
  994. if (!io_req || rport != cmd_rport) {
  995. spin_unlock_irqrestore(io_lock, flags);
  996. continue;
  997. }
  998. /*
  999. * Found IO that is still pending with firmware and
  1000. * belongs to rport that went away
  1001. */
  1002. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
  1003. spin_unlock_irqrestore(io_lock, flags);
  1004. continue;
  1005. }
  1006. old_ioreq_state = CMD_STATE(sc);
  1007. CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
  1008. CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
  1009. BUG_ON(io_req->abts_done);
  1010. FNIC_SCSI_DBG(KERN_DEBUG,
  1011. fnic->lport->host,
  1012. "fnic_terminate_rport_io: Issuing abts\n");
  1013. spin_unlock_irqrestore(io_lock, flags);
  1014. /* Now queue the abort command to firmware */
  1015. int_to_scsilun(sc->device->lun, &fc_lun);
  1016. if (fnic_queue_abort_io_req(fnic, tag,
  1017. FCPIO_ITMF_ABT_TASK_TERM,
  1018. fc_lun.scsi_lun, io_req)) {
  1019. /*
  1020. * Revert the cmd state back to old state, if
  1021. * it hasnt changed in between. This cmd will get
  1022. * aborted later by scsi_eh, or cleaned up during
  1023. * lun reset
  1024. */
  1025. io_lock = fnic_io_lock_hash(fnic, sc);
  1026. spin_lock_irqsave(io_lock, flags);
  1027. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING)
  1028. CMD_STATE(sc) = old_ioreq_state;
  1029. spin_unlock_irqrestore(io_lock, flags);
  1030. }
  1031. }
  1032. }
  1033. /*
  1034. * This function is exported to SCSI for sending abort cmnds.
  1035. * A SCSI IO is represented by a io_req in the driver.
  1036. * The ioreq is linked to the SCSI Cmd, thus a link with the ULP's IO.
  1037. */
  1038. int fnic_abort_cmd(struct scsi_cmnd *sc)
  1039. {
  1040. struct fc_lport *lp;
  1041. struct fnic *fnic;
  1042. struct fnic_io_req *io_req;
  1043. struct fc_rport *rport;
  1044. spinlock_t *io_lock;
  1045. unsigned long flags;
  1046. int ret = SUCCESS;
  1047. u32 task_req;
  1048. struct scsi_lun fc_lun;
  1049. DECLARE_COMPLETION_ONSTACK(tm_done);
  1050. /* Wait for rport to unblock */
  1051. fc_block_scsi_eh(sc);
  1052. /* Get local-port, check ready and link up */
  1053. lp = shost_priv(sc->device->host);
  1054. fnic = lport_priv(lp);
  1055. rport = starget_to_rport(scsi_target(sc->device));
  1056. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1057. "Abort Cmd called FCID 0x%x, LUN 0x%x TAG %d\n",
  1058. rport->port_id, sc->device->lun, sc->request->tag);
  1059. if (lp->state != LPORT_ST_READY || !(lp->link_up)) {
  1060. ret = FAILED;
  1061. goto fnic_abort_cmd_end;
  1062. }
  1063. /*
  1064. * Avoid a race between SCSI issuing the abort and the device
  1065. * completing the command.
  1066. *
  1067. * If the command is already completed by the fw cmpl code,
  1068. * we just return SUCCESS from here. This means that the abort
  1069. * succeeded. In the SCSI ML, since the timeout for command has
  1070. * happened, the completion wont actually complete the command
  1071. * and it will be considered as an aborted command
  1072. *
  1073. * The CMD_SP will not be cleared except while holding io_req_lock.
  1074. */
  1075. io_lock = fnic_io_lock_hash(fnic, sc);
  1076. spin_lock_irqsave(io_lock, flags);
  1077. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1078. if (!io_req) {
  1079. spin_unlock_irqrestore(io_lock, flags);
  1080. goto fnic_abort_cmd_end;
  1081. }
  1082. io_req->abts_done = &tm_done;
  1083. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
  1084. spin_unlock_irqrestore(io_lock, flags);
  1085. goto wait_pending;
  1086. }
  1087. /*
  1088. * Command is still pending, need to abort it
  1089. * If the firmware completes the command after this point,
  1090. * the completion wont be done till mid-layer, since abort
  1091. * has already started.
  1092. */
  1093. CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING;
  1094. CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
  1095. spin_unlock_irqrestore(io_lock, flags);
  1096. /*
  1097. * Check readiness of the remote port. If the path to remote
  1098. * port is up, then send abts to the remote port to terminate
  1099. * the IO. Else, just locally terminate the IO in the firmware
  1100. */
  1101. if (fc_remote_port_chkready(rport) == 0)
  1102. task_req = FCPIO_ITMF_ABT_TASK;
  1103. else
  1104. task_req = FCPIO_ITMF_ABT_TASK_TERM;
  1105. /* Now queue the abort command to firmware */
  1106. int_to_scsilun(sc->device->lun, &fc_lun);
  1107. if (fnic_queue_abort_io_req(fnic, sc->request->tag, task_req,
  1108. fc_lun.scsi_lun, io_req)) {
  1109. spin_lock_irqsave(io_lock, flags);
  1110. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1111. if (io_req)
  1112. io_req->abts_done = NULL;
  1113. spin_unlock_irqrestore(io_lock, flags);
  1114. ret = FAILED;
  1115. goto fnic_abort_cmd_end;
  1116. }
  1117. /*
  1118. * We queued an abort IO, wait for its completion.
  1119. * Once the firmware completes the abort command, it will
  1120. * wake up this thread.
  1121. */
  1122. wait_pending:
  1123. wait_for_completion_timeout(&tm_done,
  1124. msecs_to_jiffies
  1125. (2 * fnic->config.ra_tov +
  1126. fnic->config.ed_tov));
  1127. /* Check the abort status */
  1128. spin_lock_irqsave(io_lock, flags);
  1129. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1130. if (!io_req) {
  1131. spin_unlock_irqrestore(io_lock, flags);
  1132. ret = FAILED;
  1133. goto fnic_abort_cmd_end;
  1134. }
  1135. io_req->abts_done = NULL;
  1136. /* fw did not complete abort, timed out */
  1137. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
  1138. spin_unlock_irqrestore(io_lock, flags);
  1139. ret = FAILED;
  1140. goto fnic_abort_cmd_end;
  1141. }
  1142. /*
  1143. * firmware completed the abort, check the status,
  1144. * free the io_req irrespective of failure or success
  1145. */
  1146. if (CMD_ABTS_STATUS(sc) != FCPIO_SUCCESS)
  1147. ret = FAILED;
  1148. CMD_SP(sc) = NULL;
  1149. spin_unlock_irqrestore(io_lock, flags);
  1150. fnic_release_ioreq_buf(fnic, io_req, sc);
  1151. mempool_free(io_req, fnic->io_req_pool);
  1152. fnic_abort_cmd_end:
  1153. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1154. "Returning from abort cmd %s\n",
  1155. (ret == SUCCESS) ?
  1156. "SUCCESS" : "FAILED");
  1157. return ret;
  1158. }
  1159. static inline int fnic_queue_dr_io_req(struct fnic *fnic,
  1160. struct scsi_cmnd *sc,
  1161. struct fnic_io_req *io_req)
  1162. {
  1163. struct vnic_wq_copy *wq = &fnic->wq_copy[0];
  1164. struct scsi_lun fc_lun;
  1165. int ret = 0;
  1166. unsigned long intr_flags;
  1167. spin_lock_irqsave(&fnic->wq_copy_lock[0], intr_flags);
  1168. if (vnic_wq_copy_desc_avail(wq) <= fnic->wq_copy_desc_low[0])
  1169. free_wq_copy_descs(fnic, wq);
  1170. if (!vnic_wq_copy_desc_avail(wq)) {
  1171. ret = -EAGAIN;
  1172. goto lr_io_req_end;
  1173. }
  1174. /* fill in the lun info */
  1175. int_to_scsilun(sc->device->lun, &fc_lun);
  1176. fnic_queue_wq_copy_desc_itmf(wq, sc->request->tag | FNIC_TAG_DEV_RST,
  1177. 0, FCPIO_ITMF_LUN_RESET, SCSI_NO_TAG,
  1178. fc_lun.scsi_lun, io_req->port_id,
  1179. fnic->config.ra_tov, fnic->config.ed_tov);
  1180. lr_io_req_end:
  1181. spin_unlock_irqrestore(&fnic->wq_copy_lock[0], intr_flags);
  1182. return ret;
  1183. }
  1184. /*
  1185. * Clean up any pending aborts on the lun
  1186. * For each outstanding IO on this lun, whose abort is not completed by fw,
  1187. * issue a local abort. Wait for abort to complete. Return 0 if all commands
  1188. * successfully aborted, 1 otherwise
  1189. */
  1190. static int fnic_clean_pending_aborts(struct fnic *fnic,
  1191. struct scsi_cmnd *lr_sc)
  1192. {
  1193. int tag;
  1194. struct fnic_io_req *io_req;
  1195. spinlock_t *io_lock;
  1196. unsigned long flags;
  1197. int ret = 0;
  1198. struct scsi_cmnd *sc;
  1199. struct scsi_lun fc_lun;
  1200. struct scsi_device *lun_dev = lr_sc->device;
  1201. DECLARE_COMPLETION_ONSTACK(tm_done);
  1202. for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) {
  1203. sc = scsi_host_find_tag(fnic->lport->host, tag);
  1204. /*
  1205. * ignore this lun reset cmd or cmds that do not belong to
  1206. * this lun
  1207. */
  1208. if (!sc || sc == lr_sc || sc->device != lun_dev)
  1209. continue;
  1210. io_lock = fnic_io_lock_hash(fnic, sc);
  1211. spin_lock_irqsave(io_lock, flags);
  1212. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1213. if (!io_req || sc->device != lun_dev) {
  1214. spin_unlock_irqrestore(io_lock, flags);
  1215. continue;
  1216. }
  1217. /*
  1218. * Found IO that is still pending with firmware and
  1219. * belongs to the LUN that we are resetting
  1220. */
  1221. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1222. "Found IO in %s on lun\n",
  1223. fnic_ioreq_state_to_str(CMD_STATE(sc)));
  1224. BUG_ON(CMD_STATE(sc) != FNIC_IOREQ_ABTS_PENDING);
  1225. CMD_ABTS_STATUS(sc) = FCPIO_INVALID_CODE;
  1226. io_req->abts_done = &tm_done;
  1227. spin_unlock_irqrestore(io_lock, flags);
  1228. /* Now queue the abort command to firmware */
  1229. int_to_scsilun(sc->device->lun, &fc_lun);
  1230. if (fnic_queue_abort_io_req(fnic, tag,
  1231. FCPIO_ITMF_ABT_TASK_TERM,
  1232. fc_lun.scsi_lun, io_req)) {
  1233. spin_lock_irqsave(io_lock, flags);
  1234. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1235. if (io_req)
  1236. io_req->abts_done = NULL;
  1237. spin_unlock_irqrestore(io_lock, flags);
  1238. ret = 1;
  1239. goto clean_pending_aborts_end;
  1240. }
  1241. wait_for_completion_timeout(&tm_done,
  1242. msecs_to_jiffies
  1243. (fnic->config.ed_tov));
  1244. /* Recheck cmd state to check if it is now aborted */
  1245. spin_lock_irqsave(io_lock, flags);
  1246. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1247. if (!io_req) {
  1248. spin_unlock_irqrestore(io_lock, flags);
  1249. ret = 1;
  1250. goto clean_pending_aborts_end;
  1251. }
  1252. io_req->abts_done = NULL;
  1253. /* if abort is still pending with fw, fail */
  1254. if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) {
  1255. spin_unlock_irqrestore(io_lock, flags);
  1256. ret = 1;
  1257. goto clean_pending_aborts_end;
  1258. }
  1259. CMD_SP(sc) = NULL;
  1260. spin_unlock_irqrestore(io_lock, flags);
  1261. fnic_release_ioreq_buf(fnic, io_req, sc);
  1262. mempool_free(io_req, fnic->io_req_pool);
  1263. }
  1264. clean_pending_aborts_end:
  1265. return ret;
  1266. }
  1267. /*
  1268. * SCSI Eh thread issues a Lun Reset when one or more commands on a LUN
  1269. * fail to get aborted. It calls driver's eh_device_reset with a SCSI command
  1270. * on the LUN.
  1271. */
  1272. int fnic_device_reset(struct scsi_cmnd *sc)
  1273. {
  1274. struct fc_lport *lp;
  1275. struct fnic *fnic;
  1276. struct fnic_io_req *io_req;
  1277. struct fc_rport *rport;
  1278. int status;
  1279. int ret = FAILED;
  1280. spinlock_t *io_lock;
  1281. unsigned long flags;
  1282. DECLARE_COMPLETION_ONSTACK(tm_done);
  1283. /* Wait for rport to unblock */
  1284. fc_block_scsi_eh(sc);
  1285. /* Get local-port, check ready and link up */
  1286. lp = shost_priv(sc->device->host);
  1287. fnic = lport_priv(lp);
  1288. rport = starget_to_rport(scsi_target(sc->device));
  1289. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1290. "Device reset called FCID 0x%x, LUN 0x%x\n",
  1291. rport->port_id, sc->device->lun);
  1292. if (lp->state != LPORT_ST_READY || !(lp->link_up))
  1293. goto fnic_device_reset_end;
  1294. /* Check if remote port up */
  1295. if (fc_remote_port_chkready(rport))
  1296. goto fnic_device_reset_end;
  1297. io_lock = fnic_io_lock_hash(fnic, sc);
  1298. spin_lock_irqsave(io_lock, flags);
  1299. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1300. /*
  1301. * If there is a io_req attached to this command, then use it,
  1302. * else allocate a new one.
  1303. */
  1304. if (!io_req) {
  1305. io_req = mempool_alloc(fnic->io_req_pool, GFP_ATOMIC);
  1306. if (!io_req) {
  1307. spin_unlock_irqrestore(io_lock, flags);
  1308. goto fnic_device_reset_end;
  1309. }
  1310. memset(io_req, 0, sizeof(*io_req));
  1311. io_req->port_id = rport->port_id;
  1312. CMD_SP(sc) = (char *)io_req;
  1313. }
  1314. io_req->dr_done = &tm_done;
  1315. CMD_STATE(sc) = FNIC_IOREQ_CMD_PENDING;
  1316. CMD_LR_STATUS(sc) = FCPIO_INVALID_CODE;
  1317. spin_unlock_irqrestore(io_lock, flags);
  1318. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, "TAG %d\n",
  1319. sc->request->tag);
  1320. /*
  1321. * issue the device reset, if enqueue failed, clean up the ioreq
  1322. * and break assoc with scsi cmd
  1323. */
  1324. if (fnic_queue_dr_io_req(fnic, sc, io_req)) {
  1325. spin_lock_irqsave(io_lock, flags);
  1326. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1327. if (io_req)
  1328. io_req->dr_done = NULL;
  1329. goto fnic_device_reset_clean;
  1330. }
  1331. /*
  1332. * Wait on the local completion for LUN reset. The io_req may be
  1333. * freed while we wait since we hold no lock.
  1334. */
  1335. wait_for_completion_timeout(&tm_done,
  1336. msecs_to_jiffies(FNIC_LUN_RESET_TIMEOUT));
  1337. spin_lock_irqsave(io_lock, flags);
  1338. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1339. if (!io_req) {
  1340. spin_unlock_irqrestore(io_lock, flags);
  1341. goto fnic_device_reset_end;
  1342. }
  1343. io_req->dr_done = NULL;
  1344. status = CMD_LR_STATUS(sc);
  1345. spin_unlock_irqrestore(io_lock, flags);
  1346. /*
  1347. * If lun reset not completed, bail out with failed. io_req
  1348. * gets cleaned up during higher levels of EH
  1349. */
  1350. if (status == FCPIO_INVALID_CODE) {
  1351. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1352. "Device reset timed out\n");
  1353. goto fnic_device_reset_end;
  1354. }
  1355. /* Completed, but not successful, clean up the io_req, return fail */
  1356. if (status != FCPIO_SUCCESS) {
  1357. spin_lock_irqsave(io_lock, flags);
  1358. FNIC_SCSI_DBG(KERN_DEBUG,
  1359. fnic->lport->host,
  1360. "Device reset completed - failed\n");
  1361. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1362. goto fnic_device_reset_clean;
  1363. }
  1364. /*
  1365. * Clean up any aborts on this lun that have still not
  1366. * completed. If any of these fail, then LUN reset fails.
  1367. * clean_pending_aborts cleans all cmds on this lun except
  1368. * the lun reset cmd. If all cmds get cleaned, the lun reset
  1369. * succeeds
  1370. */
  1371. if (fnic_clean_pending_aborts(fnic, sc)) {
  1372. spin_lock_irqsave(io_lock, flags);
  1373. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1374. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1375. "Device reset failed"
  1376. " since could not abort all IOs\n");
  1377. goto fnic_device_reset_clean;
  1378. }
  1379. /* Clean lun reset command */
  1380. spin_lock_irqsave(io_lock, flags);
  1381. io_req = (struct fnic_io_req *)CMD_SP(sc);
  1382. if (io_req)
  1383. /* Completed, and successful */
  1384. ret = SUCCESS;
  1385. fnic_device_reset_clean:
  1386. if (io_req)
  1387. CMD_SP(sc) = NULL;
  1388. spin_unlock_irqrestore(io_lock, flags);
  1389. if (io_req) {
  1390. fnic_release_ioreq_buf(fnic, io_req, sc);
  1391. mempool_free(io_req, fnic->io_req_pool);
  1392. }
  1393. fnic_device_reset_end:
  1394. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1395. "Returning from device reset %s\n",
  1396. (ret == SUCCESS) ?
  1397. "SUCCESS" : "FAILED");
  1398. return ret;
  1399. }
  1400. /* Clean up all IOs, clean up libFC local port */
  1401. int fnic_reset(struct Scsi_Host *shost)
  1402. {
  1403. struct fc_lport *lp;
  1404. struct fnic *fnic;
  1405. int ret = SUCCESS;
  1406. lp = shost_priv(shost);
  1407. fnic = lport_priv(lp);
  1408. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1409. "fnic_reset called\n");
  1410. /*
  1411. * Reset local port, this will clean up libFC exchanges,
  1412. * reset remote port sessions, and if link is up, begin flogi
  1413. */
  1414. if (lp->tt.lport_reset(lp))
  1415. ret = FAILED;
  1416. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1417. "Returning from fnic reset %s\n",
  1418. (ret == SUCCESS) ?
  1419. "SUCCESS" : "FAILED");
  1420. return ret;
  1421. }
  1422. /*
  1423. * SCSI Error handling calls driver's eh_host_reset if all prior
  1424. * error handling levels return FAILED. If host reset completes
  1425. * successfully, and if link is up, then Fabric login begins.
  1426. *
  1427. * Host Reset is the highest level of error recovery. If this fails, then
  1428. * host is offlined by SCSI.
  1429. *
  1430. */
  1431. int fnic_host_reset(struct scsi_cmnd *sc)
  1432. {
  1433. int ret;
  1434. unsigned long wait_host_tmo;
  1435. struct Scsi_Host *shost = sc->device->host;
  1436. struct fc_lport *lp = shost_priv(shost);
  1437. /*
  1438. * If fnic_reset is successful, wait for fabric login to complete
  1439. * scsi-ml tries to send a TUR to every device if host reset is
  1440. * successful, so before returning to scsi, fabric should be up
  1441. */
  1442. ret = fnic_reset(shost);
  1443. if (ret == SUCCESS) {
  1444. wait_host_tmo = jiffies + FNIC_HOST_RESET_SETTLE_TIME * HZ;
  1445. ret = FAILED;
  1446. while (time_before(jiffies, wait_host_tmo)) {
  1447. if ((lp->state == LPORT_ST_READY) &&
  1448. (lp->link_up)) {
  1449. ret = SUCCESS;
  1450. break;
  1451. }
  1452. ssleep(1);
  1453. }
  1454. }
  1455. return ret;
  1456. }
  1457. /*
  1458. * This fxn is called from libFC when host is removed
  1459. */
  1460. void fnic_scsi_abort_io(struct fc_lport *lp)
  1461. {
  1462. int err = 0;
  1463. unsigned long flags;
  1464. enum fnic_state old_state;
  1465. struct fnic *fnic = lport_priv(lp);
  1466. DECLARE_COMPLETION_ONSTACK(remove_wait);
  1467. /* Issue firmware reset for fnic, wait for reset to complete */
  1468. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1469. fnic->remove_wait = &remove_wait;
  1470. old_state = fnic->state;
  1471. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1472. fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
  1473. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1474. err = fnic_fw_reset_handler(fnic);
  1475. if (err) {
  1476. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1477. if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
  1478. fnic->state = old_state;
  1479. fnic->remove_wait = NULL;
  1480. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1481. return;
  1482. }
  1483. /* Wait for firmware reset to complete */
  1484. wait_for_completion_timeout(&remove_wait,
  1485. msecs_to_jiffies(FNIC_RMDEVICE_TIMEOUT));
  1486. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1487. fnic->remove_wait = NULL;
  1488. FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
  1489. "fnic_scsi_abort_io %s\n",
  1490. (fnic->state == FNIC_IN_ETH_MODE) ?
  1491. "SUCCESS" : "FAILED");
  1492. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1493. }
  1494. /*
  1495. * This fxn called from libFC to clean up driver IO state on link down
  1496. */
  1497. void fnic_scsi_cleanup(struct fc_lport *lp)
  1498. {
  1499. unsigned long flags;
  1500. enum fnic_state old_state;
  1501. struct fnic *fnic = lport_priv(lp);
  1502. /* issue fw reset */
  1503. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1504. old_state = fnic->state;
  1505. fnic->state = FNIC_IN_FC_TRANS_ETH_MODE;
  1506. fnic_update_mac_locked(fnic, fnic->ctlr.ctl_src_addr);
  1507. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1508. if (fnic_fw_reset_handler(fnic)) {
  1509. spin_lock_irqsave(&fnic->fnic_lock, flags);
  1510. if (fnic->state == FNIC_IN_FC_TRANS_ETH_MODE)
  1511. fnic->state = old_state;
  1512. spin_unlock_irqrestore(&fnic->fnic_lock, flags);
  1513. }
  1514. }
  1515. void fnic_empty_scsi_cleanup(struct fc_lport *lp)
  1516. {
  1517. }
  1518. void fnic_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
  1519. {
  1520. struct fnic *fnic = lport_priv(lp);
  1521. /* Non-zero sid, nothing to do */
  1522. if (sid)
  1523. goto call_fc_exch_mgr_reset;
  1524. if (did) {
  1525. fnic_rport_exch_reset(fnic, did);
  1526. goto call_fc_exch_mgr_reset;
  1527. }
  1528. /*
  1529. * sid = 0, did = 0
  1530. * link down or device being removed
  1531. */
  1532. if (!fnic->in_remove)
  1533. fnic_scsi_cleanup(lp);
  1534. else
  1535. fnic_scsi_abort_io(lp);
  1536. /* call libFC exch mgr reset to reset its exchanges */
  1537. call_fc_exch_mgr_reset:
  1538. fc_exch_mgr_reset(lp, sid, did);
  1539. }