lpfc_bsg.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2009 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * *
  8. * This program is free software; you can redistribute it and/or *
  9. * modify it under the terms of version 2 of the GNU General *
  10. * Public License as published by the Free Software Foundation. *
  11. * This program is distributed in the hope that it will be useful. *
  12. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  13. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  14. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  15. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  16. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  17. * more details, a copy of which can be found in the file COPYING *
  18. * included with this package. *
  19. *******************************************************************/
  20. #include <linux/interrupt.h>
  21. #include <linux/mempool.h>
  22. #include <linux/pci.h>
  23. #include <scsi/scsi.h>
  24. #include <scsi/scsi_host.h>
  25. #include <scsi/scsi_transport_fc.h>
  26. #include <scsi/scsi_bsg_fc.h>
  27. #include <scsi/fc/fc_fs.h>
  28. #include "lpfc_hw4.h"
  29. #include "lpfc_hw.h"
  30. #include "lpfc_sli.h"
  31. #include "lpfc_sli4.h"
  32. #include "lpfc_nl.h"
  33. #include "lpfc_disc.h"
  34. #include "lpfc_scsi.h"
  35. #include "lpfc.h"
  36. #include "lpfc_logmsg.h"
  37. #include "lpfc_crtn.h"
  38. #include "lpfc_vport.h"
  39. #include "lpfc_version.h"
  40. /**
  41. * lpfc_bsg_rport_ct - send a CT command from a bsg request
  42. * @job: fc_bsg_job to handle
  43. */
  44. static int
  45. lpfc_bsg_rport_ct(struct fc_bsg_job *job)
  46. {
  47. struct Scsi_Host *shost = job->shost;
  48. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  49. struct lpfc_hba *phba = vport->phba;
  50. struct lpfc_rport_data *rdata = job->rport->dd_data;
  51. struct lpfc_nodelist *ndlp = rdata->pnode;
  52. struct ulp_bde64 *bpl = NULL;
  53. uint32_t timeout;
  54. struct lpfc_iocbq *cmdiocbq = NULL;
  55. struct lpfc_iocbq *rspiocbq = NULL;
  56. IOCB_t *cmd;
  57. IOCB_t *rsp;
  58. struct lpfc_dmabuf *bmp = NULL;
  59. int request_nseg;
  60. int reply_nseg;
  61. struct scatterlist *sgel = NULL;
  62. int numbde;
  63. dma_addr_t busaddr;
  64. int rc = 0;
  65. /* in case no data is transferred */
  66. job->reply->reply_payload_rcv_len = 0;
  67. if (!lpfc_nlp_get(ndlp)) {
  68. job->reply->result = -ENODEV;
  69. return 0;
  70. }
  71. if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
  72. rc = -ENODEV;
  73. goto free_ndlp_exit;
  74. }
  75. spin_lock_irq(shost->host_lock);
  76. cmdiocbq = lpfc_sli_get_iocbq(phba);
  77. if (!cmdiocbq) {
  78. rc = -ENOMEM;
  79. spin_unlock_irq(shost->host_lock);
  80. goto free_ndlp_exit;
  81. }
  82. cmd = &cmdiocbq->iocb;
  83. rspiocbq = lpfc_sli_get_iocbq(phba);
  84. if (!rspiocbq) {
  85. rc = -ENOMEM;
  86. goto free_cmdiocbq;
  87. }
  88. spin_unlock_irq(shost->host_lock);
  89. rsp = &rspiocbq->iocb;
  90. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  91. if (!bmp) {
  92. rc = -ENOMEM;
  93. spin_lock_irq(shost->host_lock);
  94. goto free_rspiocbq;
  95. }
  96. spin_lock_irq(shost->host_lock);
  97. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  98. if (!bmp->virt) {
  99. rc = -ENOMEM;
  100. goto free_bmp;
  101. }
  102. spin_unlock_irq(shost->host_lock);
  103. INIT_LIST_HEAD(&bmp->list);
  104. bpl = (struct ulp_bde64 *) bmp->virt;
  105. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  106. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  107. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  108. busaddr = sg_dma_address(sgel);
  109. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  110. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  111. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  112. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  113. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  114. bpl++;
  115. }
  116. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  117. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  118. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  119. busaddr = sg_dma_address(sgel);
  120. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  121. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  122. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  123. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  124. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  125. bpl++;
  126. }
  127. cmd->un.genreq64.bdl.ulpIoTag32 = 0;
  128. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  129. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  130. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  131. cmd->un.genreq64.bdl.bdeSize =
  132. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  133. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  134. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  135. cmd->un.genreq64.w5.hcsw.Dfctl = 0;
  136. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  137. cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
  138. cmd->ulpBdeCount = 1;
  139. cmd->ulpLe = 1;
  140. cmd->ulpClass = CLASS3;
  141. cmd->ulpContext = ndlp->nlp_rpi;
  142. cmd->ulpOwner = OWN_CHIP;
  143. cmdiocbq->vport = phba->pport;
  144. cmdiocbq->context1 = NULL;
  145. cmdiocbq->context2 = NULL;
  146. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  147. timeout = phba->fc_ratov * 2;
  148. job->dd_data = cmdiocbq;
  149. rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
  150. timeout + LPFC_DRVR_TIMEOUT);
  151. if (rc != IOCB_TIMEDOUT) {
  152. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  153. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  154. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  155. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  156. }
  157. if (rc == IOCB_TIMEDOUT) {
  158. lpfc_sli_release_iocbq(phba, rspiocbq);
  159. rc = -EACCES;
  160. goto free_ndlp_exit;
  161. }
  162. if (rc != IOCB_SUCCESS) {
  163. rc = -EACCES;
  164. goto free_outdmp;
  165. }
  166. if (rsp->ulpStatus) {
  167. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  168. switch (rsp->un.ulpWord[4] & 0xff) {
  169. case IOERR_SEQUENCE_TIMEOUT:
  170. rc = -ETIMEDOUT;
  171. break;
  172. case IOERR_INVALID_RPI:
  173. rc = -EFAULT;
  174. break;
  175. default:
  176. rc = -EACCES;
  177. break;
  178. }
  179. goto free_outdmp;
  180. }
  181. } else
  182. job->reply->reply_payload_rcv_len =
  183. rsp->un.genreq64.bdl.bdeSize;
  184. free_outdmp:
  185. spin_lock_irq(shost->host_lock);
  186. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  187. free_bmp:
  188. kfree(bmp);
  189. free_rspiocbq:
  190. lpfc_sli_release_iocbq(phba, rspiocbq);
  191. free_cmdiocbq:
  192. lpfc_sli_release_iocbq(phba, cmdiocbq);
  193. spin_unlock_irq(shost->host_lock);
  194. free_ndlp_exit:
  195. lpfc_nlp_put(ndlp);
  196. /* make error code available to userspace */
  197. job->reply->result = rc;
  198. /* complete the job back to userspace */
  199. job->job_done(job);
  200. return 0;
  201. }
  202. /**
  203. * lpfc_bsg_rport_els - send an ELS command from a bsg request
  204. * @job: fc_bsg_job to handle
  205. */
  206. static int
  207. lpfc_bsg_rport_els(struct fc_bsg_job *job)
  208. {
  209. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  210. struct lpfc_hba *phba = vport->phba;
  211. struct lpfc_rport_data *rdata = job->rport->dd_data;
  212. struct lpfc_nodelist *ndlp = rdata->pnode;
  213. uint32_t elscmd;
  214. uint32_t cmdsize;
  215. uint32_t rspsize;
  216. struct lpfc_iocbq *rspiocbq;
  217. struct lpfc_iocbq *cmdiocbq;
  218. IOCB_t *rsp;
  219. uint16_t rpi = 0;
  220. struct lpfc_dmabuf *pcmd;
  221. struct lpfc_dmabuf *prsp;
  222. struct lpfc_dmabuf *pbuflist = NULL;
  223. struct ulp_bde64 *bpl;
  224. int iocb_status;
  225. int request_nseg;
  226. int reply_nseg;
  227. struct scatterlist *sgel = NULL;
  228. int numbde;
  229. dma_addr_t busaddr;
  230. int rc = 0;
  231. /* in case no data is transferred */
  232. job->reply->reply_payload_rcv_len = 0;
  233. if (!lpfc_nlp_get(ndlp)) {
  234. rc = -ENODEV;
  235. goto out;
  236. }
  237. elscmd = job->request->rqst_data.r_els.els_code;
  238. cmdsize = job->request_payload.payload_len;
  239. rspsize = job->reply_payload.payload_len;
  240. rspiocbq = lpfc_sli_get_iocbq(phba);
  241. if (!rspiocbq) {
  242. lpfc_nlp_put(ndlp);
  243. rc = -ENOMEM;
  244. goto out;
  245. }
  246. rsp = &rspiocbq->iocb;
  247. rpi = ndlp->nlp_rpi;
  248. cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
  249. ndlp->nlp_DID, elscmd);
  250. if (!cmdiocbq) {
  251. lpfc_sli_release_iocbq(phba, rspiocbq);
  252. return -EIO;
  253. }
  254. job->dd_data = cmdiocbq;
  255. pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
  256. prsp = (struct lpfc_dmabuf *) pcmd->list.next;
  257. lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  258. kfree(pcmd);
  259. lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
  260. kfree(prsp);
  261. cmdiocbq->context2 = NULL;
  262. pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
  263. bpl = (struct ulp_bde64 *) pbuflist->virt;
  264. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  265. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  266. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  267. busaddr = sg_dma_address(sgel);
  268. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  269. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  270. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  271. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  272. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  273. bpl++;
  274. }
  275. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  276. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  277. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  278. busaddr = sg_dma_address(sgel);
  279. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  280. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  281. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  282. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  283. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  284. bpl++;
  285. }
  286. cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
  287. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  288. cmdiocbq->iocb.ulpContext = rpi;
  289. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  290. cmdiocbq->context1 = NULL;
  291. cmdiocbq->context2 = NULL;
  292. iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
  293. rspiocbq, (phba->fc_ratov * 2)
  294. + LPFC_DRVR_TIMEOUT);
  295. /* release the new ndlp once the iocb completes */
  296. lpfc_nlp_put(ndlp);
  297. if (iocb_status != IOCB_TIMEDOUT) {
  298. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  299. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  300. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  301. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  302. }
  303. if (iocb_status == IOCB_SUCCESS) {
  304. if (rsp->ulpStatus == IOSTAT_SUCCESS) {
  305. job->reply->reply_payload_rcv_len =
  306. rsp->un.elsreq64.bdl.bdeSize;
  307. rc = 0;
  308. } else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
  309. struct fc_bsg_ctels_reply *els_reply;
  310. /* LS_RJT data returned in word 4 */
  311. uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
  312. els_reply = &job->reply->reply_data.ctels_reply;
  313. job->reply->result = 0;
  314. els_reply->status = FC_CTELS_STATUS_REJECT;
  315. els_reply->rjt_data.action = rjt_data[0];
  316. els_reply->rjt_data.reason_code = rjt_data[1];
  317. els_reply->rjt_data.reason_explanation = rjt_data[2];
  318. els_reply->rjt_data.vendor_unique = rjt_data[3];
  319. } else
  320. rc = -EIO;
  321. } else
  322. rc = -EIO;
  323. if (iocb_status != IOCB_TIMEDOUT)
  324. lpfc_els_free_iocb(phba, cmdiocbq);
  325. lpfc_sli_release_iocbq(phba, rspiocbq);
  326. out:
  327. /* make error code available to userspace */
  328. job->reply->result = rc;
  329. /* complete the job back to userspace */
  330. job->job_done(job);
  331. return 0;
  332. }
  333. struct lpfc_ct_event {
  334. struct list_head node;
  335. int ref;
  336. wait_queue_head_t wq;
  337. /* Event type and waiter identifiers */
  338. uint32_t type_mask;
  339. uint32_t req_id;
  340. uint32_t reg_id;
  341. /* next two flags are here for the auto-delete logic */
  342. unsigned long wait_time_stamp;
  343. int waiting;
  344. /* seen and not seen events */
  345. struct list_head events_to_get;
  346. struct list_head events_to_see;
  347. };
  348. struct event_data {
  349. struct list_head node;
  350. uint32_t type;
  351. uint32_t immed_dat;
  352. void *data;
  353. uint32_t len;
  354. };
  355. static struct lpfc_ct_event *
  356. lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
  357. {
  358. struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
  359. if (!evt)
  360. return NULL;
  361. INIT_LIST_HEAD(&evt->events_to_get);
  362. INIT_LIST_HEAD(&evt->events_to_see);
  363. evt->req_id = ev_req_id;
  364. evt->reg_id = ev_reg_id;
  365. evt->wait_time_stamp = jiffies;
  366. init_waitqueue_head(&evt->wq);
  367. return evt;
  368. }
  369. static void
  370. lpfc_ct_event_free(struct lpfc_ct_event *evt)
  371. {
  372. struct event_data *ed;
  373. list_del(&evt->node);
  374. while (!list_empty(&evt->events_to_get)) {
  375. ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
  376. list_del(&ed->node);
  377. kfree(ed->data);
  378. kfree(ed);
  379. }
  380. while (!list_empty(&evt->events_to_see)) {
  381. ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
  382. list_del(&ed->node);
  383. kfree(ed->data);
  384. kfree(ed);
  385. }
  386. kfree(evt);
  387. }
  388. static inline void
  389. lpfc_ct_event_ref(struct lpfc_ct_event *evt)
  390. {
  391. evt->ref++;
  392. }
  393. static inline void
  394. lpfc_ct_event_unref(struct lpfc_ct_event *evt)
  395. {
  396. if (--evt->ref < 0)
  397. lpfc_ct_event_free(evt);
  398. }
  399. #define SLI_CT_ELX_LOOPBACK 0x10
  400. enum ELX_LOOPBACK_CMD {
  401. ELX_LOOPBACK_XRI_SETUP,
  402. ELX_LOOPBACK_DATA,
  403. };
  404. /**
  405. * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
  406. * @phba:
  407. * @pring:
  408. * @piocbq:
  409. *
  410. * This function is called when an unsolicited CT command is received. It
  411. * forwards the event to any processes registerd to receive CT events.
  412. */
  413. void
  414. lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  415. struct lpfc_iocbq *piocbq)
  416. {
  417. uint32_t evt_req_id = 0;
  418. uint32_t cmd;
  419. uint32_t len;
  420. struct lpfc_dmabuf *dmabuf = NULL;
  421. struct lpfc_ct_event *evt;
  422. struct event_data *evt_dat = NULL;
  423. struct lpfc_iocbq *iocbq;
  424. size_t offset = 0;
  425. struct list_head head;
  426. struct ulp_bde64 *bde;
  427. dma_addr_t dma_addr;
  428. int i;
  429. struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
  430. struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
  431. struct lpfc_hbq_entry *hbqe;
  432. struct lpfc_sli_ct_request *ct_req;
  433. INIT_LIST_HEAD(&head);
  434. list_add_tail(&head, &piocbq->list);
  435. if (piocbq->iocb.ulpBdeCount == 0 ||
  436. piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
  437. goto error_ct_unsol_exit;
  438. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  439. dmabuf = bdeBuf1;
  440. else {
  441. dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
  442. piocbq->iocb.un.cont64[0].addrLow);
  443. dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
  444. }
  445. ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
  446. evt_req_id = ct_req->FsType;
  447. cmd = ct_req->CommandResponse.bits.CmdRsp;
  448. len = ct_req->CommandResponse.bits.Size;
  449. if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
  450. lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
  451. mutex_lock(&phba->ct_event_mutex);
  452. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  453. if (evt->req_id != evt_req_id)
  454. continue;
  455. lpfc_ct_event_ref(evt);
  456. evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
  457. if (!evt_dat) {
  458. lpfc_ct_event_unref(evt);
  459. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  460. "2614 Memory allocation failed for "
  461. "CT event\n");
  462. break;
  463. }
  464. mutex_unlock(&phba->ct_event_mutex);
  465. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  466. /* take accumulated byte count from the last iocbq */
  467. iocbq = list_entry(head.prev, typeof(*iocbq), list);
  468. evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
  469. } else {
  470. list_for_each_entry(iocbq, &head, list) {
  471. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
  472. evt_dat->len +=
  473. iocbq->iocb.un.cont64[i].tus.f.bdeSize;
  474. }
  475. }
  476. evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
  477. if (!evt_dat->data) {
  478. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  479. "2615 Memory allocation failed for "
  480. "CT event data, size %d\n",
  481. evt_dat->len);
  482. kfree(evt_dat);
  483. mutex_lock(&phba->ct_event_mutex);
  484. lpfc_ct_event_unref(evt);
  485. mutex_unlock(&phba->ct_event_mutex);
  486. goto error_ct_unsol_exit;
  487. }
  488. list_for_each_entry(iocbq, &head, list) {
  489. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  490. bdeBuf1 = iocbq->context2;
  491. bdeBuf2 = iocbq->context3;
  492. }
  493. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
  494. int size = 0;
  495. if (phba->sli3_options &
  496. LPFC_SLI3_HBQ_ENABLED) {
  497. if (i == 0) {
  498. hbqe = (struct lpfc_hbq_entry *)
  499. &iocbq->iocb.un.ulpWord[0];
  500. size = hbqe->bde.tus.f.bdeSize;
  501. dmabuf = bdeBuf1;
  502. } else if (i == 1) {
  503. hbqe = (struct lpfc_hbq_entry *)
  504. &iocbq->iocb.unsli3.
  505. sli3Words[4];
  506. size = hbqe->bde.tus.f.bdeSize;
  507. dmabuf = bdeBuf2;
  508. }
  509. if ((offset + size) > evt_dat->len)
  510. size = evt_dat->len - offset;
  511. } else {
  512. size = iocbq->iocb.un.cont64[i].
  513. tus.f.bdeSize;
  514. bde = &iocbq->iocb.un.cont64[i];
  515. dma_addr = getPaddr(bde->addrHigh,
  516. bde->addrLow);
  517. dmabuf = lpfc_sli_ringpostbuf_get(phba,
  518. pring, dma_addr);
  519. }
  520. if (!dmabuf) {
  521. lpfc_printf_log(phba, KERN_ERR,
  522. LOG_LIBDFC, "2616 No dmabuf "
  523. "found for iocbq 0x%p\n",
  524. iocbq);
  525. kfree(evt_dat->data);
  526. kfree(evt_dat);
  527. mutex_lock(&phba->ct_event_mutex);
  528. lpfc_ct_event_unref(evt);
  529. mutex_unlock(&phba->ct_event_mutex);
  530. goto error_ct_unsol_exit;
  531. }
  532. memcpy((char *)(evt_dat->data) + offset,
  533. dmabuf->virt, size);
  534. offset += size;
  535. if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
  536. !(phba->sli3_options &
  537. LPFC_SLI3_HBQ_ENABLED)) {
  538. lpfc_sli_ringpostbuf_put(phba, pring,
  539. dmabuf);
  540. } else {
  541. switch (cmd) {
  542. case ELX_LOOPBACK_XRI_SETUP:
  543. if (!(phba->sli3_options &
  544. LPFC_SLI3_HBQ_ENABLED))
  545. lpfc_post_buffer(phba,
  546. pring,
  547. 1);
  548. else
  549. lpfc_in_buf_free(phba,
  550. dmabuf);
  551. break;
  552. default:
  553. if (!(phba->sli3_options &
  554. LPFC_SLI3_HBQ_ENABLED))
  555. lpfc_post_buffer(phba,
  556. pring,
  557. 1);
  558. break;
  559. }
  560. }
  561. }
  562. }
  563. mutex_lock(&phba->ct_event_mutex);
  564. if (phba->sli_rev == LPFC_SLI_REV4) {
  565. evt_dat->immed_dat = phba->ctx_idx;
  566. phba->ctx_idx = (phba->ctx_idx + 1) % 64;
  567. phba->ct_ctx[evt_dat->immed_dat].oxid =
  568. piocbq->iocb.ulpContext;
  569. phba->ct_ctx[evt_dat->immed_dat].SID =
  570. piocbq->iocb.un.rcvels.remoteID;
  571. } else
  572. evt_dat->immed_dat = piocbq->iocb.ulpContext;
  573. evt_dat->type = FC_REG_CT_EVENT;
  574. list_add(&evt_dat->node, &evt->events_to_see);
  575. wake_up_interruptible(&evt->wq);
  576. lpfc_ct_event_unref(evt);
  577. if (evt_req_id == SLI_CT_ELX_LOOPBACK)
  578. break;
  579. }
  580. mutex_unlock(&phba->ct_event_mutex);
  581. error_ct_unsol_exit:
  582. if (!list_empty(&head))
  583. list_del(&head);
  584. return;
  585. }
  586. /**
  587. * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
  588. * @job: SET_EVENT fc_bsg_job
  589. */
  590. static int
  591. lpfc_bsg_set_event(struct fc_bsg_job *job)
  592. {
  593. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  594. struct lpfc_hba *phba = vport->phba;
  595. struct set_ct_event *event_req;
  596. struct lpfc_ct_event *evt;
  597. int rc = 0;
  598. if (job->request_len <
  599. sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
  600. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  601. "2612 Received SET_CT_EVENT below minimum "
  602. "size\n");
  603. return -EINVAL;
  604. }
  605. event_req = (struct set_ct_event *)
  606. job->request->rqst_data.h_vendor.vendor_cmd;
  607. mutex_lock(&phba->ct_event_mutex);
  608. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  609. if (evt->reg_id == event_req->ev_reg_id) {
  610. lpfc_ct_event_ref(evt);
  611. evt->wait_time_stamp = jiffies;
  612. break;
  613. }
  614. }
  615. mutex_unlock(&phba->ct_event_mutex);
  616. if (&evt->node == &phba->ct_ev_waiters) {
  617. /* no event waiting struct yet - first call */
  618. evt = lpfc_ct_event_new(event_req->ev_reg_id,
  619. event_req->ev_req_id);
  620. if (!evt) {
  621. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  622. "2617 Failed allocation of event "
  623. "waiter\n");
  624. return -ENOMEM;
  625. }
  626. mutex_lock(&phba->ct_event_mutex);
  627. list_add(&evt->node, &phba->ct_ev_waiters);
  628. lpfc_ct_event_ref(evt);
  629. mutex_unlock(&phba->ct_event_mutex);
  630. }
  631. evt->waiting = 1;
  632. if (wait_event_interruptible(evt->wq,
  633. !list_empty(&evt->events_to_see))) {
  634. mutex_lock(&phba->ct_event_mutex);
  635. lpfc_ct_event_unref(evt); /* release ref */
  636. lpfc_ct_event_unref(evt); /* delete */
  637. mutex_unlock(&phba->ct_event_mutex);
  638. rc = -EINTR;
  639. goto set_event_out;
  640. }
  641. evt->wait_time_stamp = jiffies;
  642. evt->waiting = 0;
  643. mutex_lock(&phba->ct_event_mutex);
  644. list_move(evt->events_to_see.prev, &evt->events_to_get);
  645. lpfc_ct_event_unref(evt); /* release ref */
  646. mutex_unlock(&phba->ct_event_mutex);
  647. set_event_out:
  648. /* set_event carries no reply payload */
  649. job->reply->reply_payload_rcv_len = 0;
  650. /* make error code available to userspace */
  651. job->reply->result = rc;
  652. /* complete the job back to userspace */
  653. job->job_done(job);
  654. return 0;
  655. }
  656. /**
  657. * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
  658. * @job: GET_EVENT fc_bsg_job
  659. */
  660. static int
  661. lpfc_bsg_get_event(struct fc_bsg_job *job)
  662. {
  663. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  664. struct lpfc_hba *phba = vport->phba;
  665. struct get_ct_event *event_req;
  666. struct get_ct_event_reply *event_reply;
  667. struct lpfc_ct_event *evt;
  668. struct event_data *evt_dat = NULL;
  669. int rc = 0;
  670. if (job->request_len <
  671. sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
  672. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  673. "2613 Received GET_CT_EVENT request below "
  674. "minimum size\n");
  675. return -EINVAL;
  676. }
  677. event_req = (struct get_ct_event *)
  678. job->request->rqst_data.h_vendor.vendor_cmd;
  679. event_reply = (struct get_ct_event_reply *)
  680. job->reply->reply_data.vendor_reply.vendor_rsp;
  681. mutex_lock(&phba->ct_event_mutex);
  682. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  683. if (evt->reg_id == event_req->ev_reg_id) {
  684. if (list_empty(&evt->events_to_get))
  685. break;
  686. lpfc_ct_event_ref(evt);
  687. evt->wait_time_stamp = jiffies;
  688. evt_dat = list_entry(evt->events_to_get.prev,
  689. struct event_data, node);
  690. list_del(&evt_dat->node);
  691. break;
  692. }
  693. }
  694. mutex_unlock(&phba->ct_event_mutex);
  695. if (!evt_dat) {
  696. job->reply->reply_payload_rcv_len = 0;
  697. rc = -ENOENT;
  698. goto error_get_event_exit;
  699. }
  700. if (evt_dat->len > job->reply_payload.payload_len) {
  701. evt_dat->len = job->reply_payload.payload_len;
  702. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  703. "2618 Truncated event data at %d "
  704. "bytes\n",
  705. job->reply_payload.payload_len);
  706. }
  707. event_reply->immed_data = evt_dat->immed_dat;
  708. if (evt_dat->len > 0)
  709. job->reply->reply_payload_rcv_len =
  710. sg_copy_from_buffer(job->reply_payload.sg_list,
  711. job->reply_payload.sg_cnt,
  712. evt_dat->data, evt_dat->len);
  713. else
  714. job->reply->reply_payload_rcv_len = 0;
  715. rc = 0;
  716. if (evt_dat)
  717. kfree(evt_dat->data);
  718. kfree(evt_dat);
  719. mutex_lock(&phba->ct_event_mutex);
  720. lpfc_ct_event_unref(evt);
  721. mutex_unlock(&phba->ct_event_mutex);
  722. error_get_event_exit:
  723. /* make error code available to userspace */
  724. job->reply->result = rc;
  725. /* complete the job back to userspace */
  726. job->job_done(job);
  727. return rc;
  728. }
  729. /**
  730. * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  731. * @job: fc_bsg_job to handle
  732. */
  733. static int
  734. lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
  735. {
  736. int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
  737. switch (command) {
  738. case LPFC_BSG_VENDOR_SET_CT_EVENT:
  739. return lpfc_bsg_set_event(job);
  740. break;
  741. case LPFC_BSG_VENDOR_GET_CT_EVENT:
  742. return lpfc_bsg_get_event(job);
  743. break;
  744. default:
  745. return -EINVAL;
  746. }
  747. }
  748. /**
  749. * lpfc_bsg_request - handle a bsg request from the FC transport
  750. * @job: fc_bsg_job to handle
  751. */
  752. int
  753. lpfc_bsg_request(struct fc_bsg_job *job)
  754. {
  755. uint32_t msgcode;
  756. int rc = -EINVAL;
  757. msgcode = job->request->msgcode;
  758. switch (msgcode) {
  759. case FC_BSG_HST_VENDOR:
  760. rc = lpfc_bsg_hst_vendor(job);
  761. break;
  762. case FC_BSG_RPT_ELS:
  763. rc = lpfc_bsg_rport_els(job);
  764. break;
  765. case FC_BSG_RPT_CT:
  766. rc = lpfc_bsg_rport_ct(job);
  767. break;
  768. default:
  769. break;
  770. }
  771. return rc;
  772. }
  773. /**
  774. * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
  775. * @job: fc_bsg_job that has timed out
  776. *
  777. * This function just aborts the job's IOCB. The aborted IOCB will return to
  778. * the waiting function which will handle passing the error back to userspace
  779. */
  780. int
  781. lpfc_bsg_timeout(struct fc_bsg_job *job)
  782. {
  783. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  784. struct lpfc_hba *phba = vport->phba;
  785. struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
  786. struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
  787. if (cmdiocb)
  788. lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
  789. return 0;
  790. }