lpfc_bsg.c 89 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2009-2010 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * *
  8. * This program is free software; you can redistribute it and/or *
  9. * modify it under the terms of version 2 of the GNU General *
  10. * Public License as published by the Free Software Foundation. *
  11. * This program is distributed in the hope that it will be useful. *
  12. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  13. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  14. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  15. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  16. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  17. * more details, a copy of which can be found in the file COPYING *
  18. * included with this package. *
  19. *******************************************************************/
  20. #include <linux/interrupt.h>
  21. #include <linux/mempool.h>
  22. #include <linux/pci.h>
  23. #include <linux/slab.h>
  24. #include <linux/delay.h>
  25. #include <scsi/scsi.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_transport_fc.h>
  28. #include <scsi/scsi_bsg_fc.h>
  29. #include <scsi/fc/fc_fs.h>
  30. #include "lpfc_hw4.h"
  31. #include "lpfc_hw.h"
  32. #include "lpfc_sli.h"
  33. #include "lpfc_sli4.h"
  34. #include "lpfc_nl.h"
  35. #include "lpfc_bsg.h"
  36. #include "lpfc_disc.h"
  37. #include "lpfc_scsi.h"
  38. #include "lpfc.h"
  39. #include "lpfc_logmsg.h"
  40. #include "lpfc_crtn.h"
  41. #include "lpfc_vport.h"
  42. #include "lpfc_version.h"
  43. struct lpfc_bsg_event {
  44. struct list_head node;
  45. struct kref kref;
  46. wait_queue_head_t wq;
  47. /* Event type and waiter identifiers */
  48. uint32_t type_mask;
  49. uint32_t req_id;
  50. uint32_t reg_id;
  51. /* next two flags are here for the auto-delete logic */
  52. unsigned long wait_time_stamp;
  53. int waiting;
  54. /* seen and not seen events */
  55. struct list_head events_to_get;
  56. struct list_head events_to_see;
  57. /* job waiting for this event to finish */
  58. struct fc_bsg_job *set_job;
  59. };
  60. struct lpfc_bsg_iocb {
  61. struct lpfc_iocbq *cmdiocbq;
  62. struct lpfc_iocbq *rspiocbq;
  63. struct lpfc_dmabuf *bmp;
  64. struct lpfc_nodelist *ndlp;
  65. /* job waiting for this iocb to finish */
  66. struct fc_bsg_job *set_job;
  67. };
  68. struct lpfc_bsg_mbox {
  69. LPFC_MBOXQ_t *pmboxq;
  70. MAILBOX_t *mb;
  71. struct lpfc_dmabuf *rxbmp; /* for BIU diags */
  72. struct lpfc_dmabufext *dmp; /* for BIU diags */
  73. uint8_t *ext; /* extended mailbox data */
  74. uint32_t mbOffset; /* from app */
  75. uint32_t inExtWLen; /* from app */
  76. uint32_t outWxtWLen; /* from app */
  77. /* job waiting for this mbox command to finish */
  78. struct fc_bsg_job *set_job;
  79. };
  80. #define MENLO_DID 0x0000FC0E
  81. struct lpfc_bsg_menlo {
  82. struct lpfc_iocbq *cmdiocbq;
  83. struct lpfc_iocbq *rspiocbq;
  84. struct lpfc_dmabuf *bmp;
  85. /* job waiting for this iocb to finish */
  86. struct fc_bsg_job *set_job;
  87. };
  88. #define TYPE_EVT 1
  89. #define TYPE_IOCB 2
  90. #define TYPE_MBOX 3
  91. #define TYPE_MENLO 4
  92. struct bsg_job_data {
  93. uint32_t type;
  94. union {
  95. struct lpfc_bsg_event *evt;
  96. struct lpfc_bsg_iocb iocb;
  97. struct lpfc_bsg_mbox mbox;
  98. struct lpfc_bsg_menlo menlo;
  99. } context_un;
  100. };
  101. struct event_data {
  102. struct list_head node;
  103. uint32_t type;
  104. uint32_t immed_dat;
  105. void *data;
  106. uint32_t len;
  107. };
  108. #define BUF_SZ_4K 4096
  109. #define SLI_CT_ELX_LOOPBACK 0x10
  110. enum ELX_LOOPBACK_CMD {
  111. ELX_LOOPBACK_XRI_SETUP,
  112. ELX_LOOPBACK_DATA,
  113. };
  114. #define ELX_LOOPBACK_HEADER_SZ \
  115. (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
  116. struct lpfc_dmabufext {
  117. struct lpfc_dmabuf dma;
  118. uint32_t size;
  119. uint32_t flag;
  120. };
  121. /**
  122. * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
  123. * @phba: Pointer to HBA context object.
  124. * @cmdiocbq: Pointer to command iocb.
  125. * @rspiocbq: Pointer to response iocb.
  126. *
  127. * This function is the completion handler for iocbs issued using
  128. * lpfc_bsg_send_mgmt_cmd function. This function is called by the
  129. * ring event handler function without any lock held. This function
  130. * can be called from both worker thread context and interrupt
  131. * context. This function also can be called from another thread which
  132. * cleans up the SLI layer objects.
  133. * This function copies the contents of the response iocb to the
  134. * response iocb memory object provided by the caller of
  135. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  136. * sleeps for the iocb completion.
  137. **/
  138. static void
  139. lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
  140. struct lpfc_iocbq *cmdiocbq,
  141. struct lpfc_iocbq *rspiocbq)
  142. {
  143. unsigned long iflags;
  144. struct bsg_job_data *dd_data;
  145. struct fc_bsg_job *job;
  146. IOCB_t *rsp;
  147. struct lpfc_dmabuf *bmp;
  148. struct lpfc_nodelist *ndlp;
  149. struct lpfc_bsg_iocb *iocb;
  150. unsigned long flags;
  151. int rc = 0;
  152. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  153. dd_data = cmdiocbq->context1;
  154. if (!dd_data) {
  155. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  156. return;
  157. }
  158. iocb = &dd_data->context_un.iocb;
  159. job = iocb->set_job;
  160. job->dd_data = NULL; /* so timeout handler does not reply */
  161. spin_lock_irqsave(&phba->hbalock, iflags);
  162. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  163. if (cmdiocbq->context2 && rspiocbq)
  164. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  165. &rspiocbq->iocb, sizeof(IOCB_t));
  166. spin_unlock_irqrestore(&phba->hbalock, iflags);
  167. bmp = iocb->bmp;
  168. rspiocbq = iocb->rspiocbq;
  169. rsp = &rspiocbq->iocb;
  170. ndlp = iocb->ndlp;
  171. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  172. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  173. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  174. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  175. if (rsp->ulpStatus) {
  176. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  177. switch (rsp->un.ulpWord[4] & 0xff) {
  178. case IOERR_SEQUENCE_TIMEOUT:
  179. rc = -ETIMEDOUT;
  180. break;
  181. case IOERR_INVALID_RPI:
  182. rc = -EFAULT;
  183. break;
  184. default:
  185. rc = -EACCES;
  186. break;
  187. }
  188. } else
  189. rc = -EACCES;
  190. } else
  191. job->reply->reply_payload_rcv_len =
  192. rsp->un.genreq64.bdl.bdeSize;
  193. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  194. lpfc_sli_release_iocbq(phba, rspiocbq);
  195. lpfc_sli_release_iocbq(phba, cmdiocbq);
  196. lpfc_nlp_put(ndlp);
  197. kfree(bmp);
  198. kfree(dd_data);
  199. /* make error code available to userspace */
  200. job->reply->result = rc;
  201. /* complete the job back to userspace */
  202. job->job_done(job);
  203. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  204. return;
  205. }
  206. /**
  207. * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
  208. * @job: fc_bsg_job to handle
  209. **/
  210. static int
  211. lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
  212. {
  213. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  214. struct lpfc_hba *phba = vport->phba;
  215. struct lpfc_rport_data *rdata = job->rport->dd_data;
  216. struct lpfc_nodelist *ndlp = rdata->pnode;
  217. struct ulp_bde64 *bpl = NULL;
  218. uint32_t timeout;
  219. struct lpfc_iocbq *cmdiocbq = NULL;
  220. struct lpfc_iocbq *rspiocbq = NULL;
  221. IOCB_t *cmd;
  222. IOCB_t *rsp;
  223. struct lpfc_dmabuf *bmp = NULL;
  224. int request_nseg;
  225. int reply_nseg;
  226. struct scatterlist *sgel = NULL;
  227. int numbde;
  228. dma_addr_t busaddr;
  229. struct bsg_job_data *dd_data;
  230. uint32_t creg_val;
  231. int rc = 0;
  232. /* in case no data is transferred */
  233. job->reply->reply_payload_rcv_len = 0;
  234. /* allocate our bsg tracking structure */
  235. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  236. if (!dd_data) {
  237. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  238. "2733 Failed allocation of dd_data\n");
  239. rc = -ENOMEM;
  240. goto no_dd_data;
  241. }
  242. if (!lpfc_nlp_get(ndlp)) {
  243. rc = -ENODEV;
  244. goto no_ndlp;
  245. }
  246. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  247. if (!bmp) {
  248. rc = -ENOMEM;
  249. goto free_ndlp;
  250. }
  251. if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
  252. rc = -ENODEV;
  253. goto free_bmp;
  254. }
  255. cmdiocbq = lpfc_sli_get_iocbq(phba);
  256. if (!cmdiocbq) {
  257. rc = -ENOMEM;
  258. goto free_bmp;
  259. }
  260. cmd = &cmdiocbq->iocb;
  261. rspiocbq = lpfc_sli_get_iocbq(phba);
  262. if (!rspiocbq) {
  263. rc = -ENOMEM;
  264. goto free_cmdiocbq;
  265. }
  266. rsp = &rspiocbq->iocb;
  267. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  268. if (!bmp->virt) {
  269. rc = -ENOMEM;
  270. goto free_rspiocbq;
  271. }
  272. INIT_LIST_HEAD(&bmp->list);
  273. bpl = (struct ulp_bde64 *) bmp->virt;
  274. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  275. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  276. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  277. busaddr = sg_dma_address(sgel);
  278. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  279. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  280. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  281. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  282. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  283. bpl++;
  284. }
  285. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  286. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  287. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  288. busaddr = sg_dma_address(sgel);
  289. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  290. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  291. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  292. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  293. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  294. bpl++;
  295. }
  296. cmd->un.genreq64.bdl.ulpIoTag32 = 0;
  297. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  298. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  299. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  300. cmd->un.genreq64.bdl.bdeSize =
  301. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  302. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  303. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  304. cmd->un.genreq64.w5.hcsw.Dfctl = 0;
  305. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  306. cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
  307. cmd->ulpBdeCount = 1;
  308. cmd->ulpLe = 1;
  309. cmd->ulpClass = CLASS3;
  310. cmd->ulpContext = ndlp->nlp_rpi;
  311. cmd->ulpOwner = OWN_CHIP;
  312. cmdiocbq->vport = phba->pport;
  313. cmdiocbq->context3 = bmp;
  314. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  315. timeout = phba->fc_ratov * 2;
  316. cmd->ulpTimeout = timeout;
  317. cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
  318. cmdiocbq->context1 = dd_data;
  319. cmdiocbq->context2 = rspiocbq;
  320. dd_data->type = TYPE_IOCB;
  321. dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
  322. dd_data->context_un.iocb.rspiocbq = rspiocbq;
  323. dd_data->context_un.iocb.set_job = job;
  324. dd_data->context_un.iocb.bmp = bmp;
  325. dd_data->context_un.iocb.ndlp = ndlp;
  326. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  327. creg_val = readl(phba->HCregaddr);
  328. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  329. writel(creg_val, phba->HCregaddr);
  330. readl(phba->HCregaddr); /* flush */
  331. }
  332. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  333. if (rc == IOCB_SUCCESS)
  334. return 0; /* done for now */
  335. /* iocb failed so cleanup */
  336. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  337. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  338. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  339. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  340. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  341. free_rspiocbq:
  342. lpfc_sli_release_iocbq(phba, rspiocbq);
  343. free_cmdiocbq:
  344. lpfc_sli_release_iocbq(phba, cmdiocbq);
  345. free_bmp:
  346. kfree(bmp);
  347. free_ndlp:
  348. lpfc_nlp_put(ndlp);
  349. no_ndlp:
  350. kfree(dd_data);
  351. no_dd_data:
  352. /* make error code available to userspace */
  353. job->reply->result = rc;
  354. job->dd_data = NULL;
  355. return rc;
  356. }
  357. /**
  358. * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
  359. * @phba: Pointer to HBA context object.
  360. * @cmdiocbq: Pointer to command iocb.
  361. * @rspiocbq: Pointer to response iocb.
  362. *
  363. * This function is the completion handler for iocbs issued using
  364. * lpfc_bsg_rport_els_cmp function. This function is called by the
  365. * ring event handler function without any lock held. This function
  366. * can be called from both worker thread context and interrupt
  367. * context. This function also can be called from other thread which
  368. * cleans up the SLI layer objects.
  369. * This function copies the contents of the response iocb to the
  370. * response iocb memory object provided by the caller of
  371. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  372. * sleeps for the iocb completion.
  373. **/
  374. static void
  375. lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
  376. struct lpfc_iocbq *cmdiocbq,
  377. struct lpfc_iocbq *rspiocbq)
  378. {
  379. struct bsg_job_data *dd_data;
  380. struct fc_bsg_job *job;
  381. IOCB_t *rsp;
  382. struct lpfc_nodelist *ndlp;
  383. struct lpfc_dmabuf *pbuflist = NULL;
  384. struct fc_bsg_ctels_reply *els_reply;
  385. uint8_t *rjt_data;
  386. unsigned long flags;
  387. int rc = 0;
  388. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  389. dd_data = cmdiocbq->context1;
  390. /* normal completion and timeout crossed paths, already done */
  391. if (!dd_data) {
  392. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  393. return;
  394. }
  395. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  396. if (cmdiocbq->context2 && rspiocbq)
  397. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  398. &rspiocbq->iocb, sizeof(IOCB_t));
  399. job = dd_data->context_un.iocb.set_job;
  400. cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
  401. rspiocbq = dd_data->context_un.iocb.rspiocbq;
  402. rsp = &rspiocbq->iocb;
  403. ndlp = dd_data->context_un.iocb.ndlp;
  404. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  405. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  406. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  407. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  408. if (job->reply->result == -EAGAIN)
  409. rc = -EAGAIN;
  410. else if (rsp->ulpStatus == IOSTAT_SUCCESS)
  411. job->reply->reply_payload_rcv_len =
  412. rsp->un.elsreq64.bdl.bdeSize;
  413. else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
  414. job->reply->reply_payload_rcv_len =
  415. sizeof(struct fc_bsg_ctels_reply);
  416. /* LS_RJT data returned in word 4 */
  417. rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
  418. els_reply = &job->reply->reply_data.ctels_reply;
  419. els_reply->status = FC_CTELS_STATUS_REJECT;
  420. els_reply->rjt_data.action = rjt_data[3];
  421. els_reply->rjt_data.reason_code = rjt_data[2];
  422. els_reply->rjt_data.reason_explanation = rjt_data[1];
  423. els_reply->rjt_data.vendor_unique = rjt_data[0];
  424. } else
  425. rc = -EIO;
  426. pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
  427. lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
  428. lpfc_sli_release_iocbq(phba, rspiocbq);
  429. lpfc_sli_release_iocbq(phba, cmdiocbq);
  430. lpfc_nlp_put(ndlp);
  431. kfree(dd_data);
  432. /* make error code available to userspace */
  433. job->reply->result = rc;
  434. job->dd_data = NULL;
  435. /* complete the job back to userspace */
  436. job->job_done(job);
  437. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  438. return;
  439. }
  440. /**
  441. * lpfc_bsg_rport_els - send an ELS command from a bsg request
  442. * @job: fc_bsg_job to handle
  443. **/
  444. static int
  445. lpfc_bsg_rport_els(struct fc_bsg_job *job)
  446. {
  447. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  448. struct lpfc_hba *phba = vport->phba;
  449. struct lpfc_rport_data *rdata = job->rport->dd_data;
  450. struct lpfc_nodelist *ndlp = rdata->pnode;
  451. uint32_t elscmd;
  452. uint32_t cmdsize;
  453. uint32_t rspsize;
  454. struct lpfc_iocbq *rspiocbq;
  455. struct lpfc_iocbq *cmdiocbq;
  456. IOCB_t *rsp;
  457. uint16_t rpi = 0;
  458. struct lpfc_dmabuf *pcmd;
  459. struct lpfc_dmabuf *prsp;
  460. struct lpfc_dmabuf *pbuflist = NULL;
  461. struct ulp_bde64 *bpl;
  462. int request_nseg;
  463. int reply_nseg;
  464. struct scatterlist *sgel = NULL;
  465. int numbde;
  466. dma_addr_t busaddr;
  467. struct bsg_job_data *dd_data;
  468. uint32_t creg_val;
  469. int rc = 0;
  470. /* in case no data is transferred */
  471. job->reply->reply_payload_rcv_len = 0;
  472. /* allocate our bsg tracking structure */
  473. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  474. if (!dd_data) {
  475. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  476. "2735 Failed allocation of dd_data\n");
  477. rc = -ENOMEM;
  478. goto no_dd_data;
  479. }
  480. if (!lpfc_nlp_get(ndlp)) {
  481. rc = -ENODEV;
  482. goto free_dd_data;
  483. }
  484. elscmd = job->request->rqst_data.r_els.els_code;
  485. cmdsize = job->request_payload.payload_len;
  486. rspsize = job->reply_payload.payload_len;
  487. rspiocbq = lpfc_sli_get_iocbq(phba);
  488. if (!rspiocbq) {
  489. lpfc_nlp_put(ndlp);
  490. rc = -ENOMEM;
  491. goto free_dd_data;
  492. }
  493. rsp = &rspiocbq->iocb;
  494. rpi = ndlp->nlp_rpi;
  495. cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
  496. ndlp->nlp_DID, elscmd);
  497. if (!cmdiocbq) {
  498. rc = -EIO;
  499. goto free_rspiocbq;
  500. }
  501. /* prep els iocb set context1 to the ndlp, context2 to the command
  502. * dmabuf, context3 holds the data dmabuf
  503. */
  504. pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
  505. prsp = (struct lpfc_dmabuf *) pcmd->list.next;
  506. lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  507. kfree(pcmd);
  508. lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
  509. kfree(prsp);
  510. cmdiocbq->context2 = NULL;
  511. pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
  512. bpl = (struct ulp_bde64 *) pbuflist->virt;
  513. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  514. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  515. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  516. busaddr = sg_dma_address(sgel);
  517. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  518. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  519. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  520. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  521. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  522. bpl++;
  523. }
  524. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  525. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  526. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  527. busaddr = sg_dma_address(sgel);
  528. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  529. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  530. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  531. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  532. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  533. bpl++;
  534. }
  535. cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
  536. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  537. cmdiocbq->iocb.ulpContext = rpi;
  538. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  539. cmdiocbq->context1 = NULL;
  540. cmdiocbq->context2 = NULL;
  541. cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
  542. cmdiocbq->context1 = dd_data;
  543. cmdiocbq->context2 = rspiocbq;
  544. dd_data->type = TYPE_IOCB;
  545. dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
  546. dd_data->context_un.iocb.rspiocbq = rspiocbq;
  547. dd_data->context_un.iocb.set_job = job;
  548. dd_data->context_un.iocb.bmp = NULL;;
  549. dd_data->context_un.iocb.ndlp = ndlp;
  550. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  551. creg_val = readl(phba->HCregaddr);
  552. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  553. writel(creg_val, phba->HCregaddr);
  554. readl(phba->HCregaddr); /* flush */
  555. }
  556. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  557. lpfc_nlp_put(ndlp);
  558. if (rc == IOCB_SUCCESS)
  559. return 0; /* done for now */
  560. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  561. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  562. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  563. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  564. lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
  565. lpfc_sli_release_iocbq(phba, cmdiocbq);
  566. free_rspiocbq:
  567. lpfc_sli_release_iocbq(phba, rspiocbq);
  568. free_dd_data:
  569. kfree(dd_data);
  570. no_dd_data:
  571. /* make error code available to userspace */
  572. job->reply->result = rc;
  573. job->dd_data = NULL;
  574. return rc;
  575. }
  576. /**
  577. * lpfc_bsg_event_free - frees an allocated event structure
  578. * @kref: Pointer to a kref.
  579. *
  580. * Called from kref_put. Back cast the kref into an event structure address.
  581. * Free any events to get, delete associated nodes, free any events to see,
  582. * free any data then free the event itself.
  583. **/
  584. static void
  585. lpfc_bsg_event_free(struct kref *kref)
  586. {
  587. struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
  588. kref);
  589. struct event_data *ed;
  590. list_del(&evt->node);
  591. while (!list_empty(&evt->events_to_get)) {
  592. ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
  593. list_del(&ed->node);
  594. kfree(ed->data);
  595. kfree(ed);
  596. }
  597. while (!list_empty(&evt->events_to_see)) {
  598. ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
  599. list_del(&ed->node);
  600. kfree(ed->data);
  601. kfree(ed);
  602. }
  603. kfree(evt);
  604. }
  605. /**
  606. * lpfc_bsg_event_ref - increments the kref for an event
  607. * @evt: Pointer to an event structure.
  608. **/
  609. static inline void
  610. lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
  611. {
  612. kref_get(&evt->kref);
  613. }
  614. /**
  615. * lpfc_bsg_event_unref - Uses kref_put to free an event structure
  616. * @evt: Pointer to an event structure.
  617. **/
  618. static inline void
  619. lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
  620. {
  621. kref_put(&evt->kref, lpfc_bsg_event_free);
  622. }
  623. /**
  624. * lpfc_bsg_event_new - allocate and initialize a event structure
  625. * @ev_mask: Mask of events.
  626. * @ev_reg_id: Event reg id.
  627. * @ev_req_id: Event request id.
  628. **/
  629. static struct lpfc_bsg_event *
  630. lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
  631. {
  632. struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
  633. if (!evt)
  634. return NULL;
  635. INIT_LIST_HEAD(&evt->events_to_get);
  636. INIT_LIST_HEAD(&evt->events_to_see);
  637. evt->type_mask = ev_mask;
  638. evt->req_id = ev_req_id;
  639. evt->reg_id = ev_reg_id;
  640. evt->wait_time_stamp = jiffies;
  641. init_waitqueue_head(&evt->wq);
  642. kref_init(&evt->kref);
  643. return evt;
  644. }
  645. /**
  646. * diag_cmd_data_free - Frees an lpfc dma buffer extension
  647. * @phba: Pointer to HBA context object.
  648. * @mlist: Pointer to an lpfc dma buffer extension.
  649. **/
  650. static int
  651. diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
  652. {
  653. struct lpfc_dmabufext *mlast;
  654. struct pci_dev *pcidev;
  655. struct list_head head, *curr, *next;
  656. if ((!mlist) || (!lpfc_is_link_up(phba) &&
  657. (phba->link_flag & LS_LOOPBACK_MODE))) {
  658. return 0;
  659. }
  660. pcidev = phba->pcidev;
  661. list_add_tail(&head, &mlist->dma.list);
  662. list_for_each_safe(curr, next, &head) {
  663. mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
  664. if (mlast->dma.virt)
  665. dma_free_coherent(&pcidev->dev,
  666. mlast->size,
  667. mlast->dma.virt,
  668. mlast->dma.phys);
  669. kfree(mlast);
  670. }
  671. return 0;
  672. }
  673. /**
  674. * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
  675. * @phba:
  676. * @pring:
  677. * @piocbq:
  678. *
  679. * This function is called when an unsolicited CT command is received. It
  680. * forwards the event to any processes registered to receive CT events.
  681. **/
  682. int
  683. lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  684. struct lpfc_iocbq *piocbq)
  685. {
  686. uint32_t evt_req_id = 0;
  687. uint32_t cmd;
  688. uint32_t len;
  689. struct lpfc_dmabuf *dmabuf = NULL;
  690. struct lpfc_bsg_event *evt;
  691. struct event_data *evt_dat = NULL;
  692. struct lpfc_iocbq *iocbq;
  693. size_t offset = 0;
  694. struct list_head head;
  695. struct ulp_bde64 *bde;
  696. dma_addr_t dma_addr;
  697. int i;
  698. struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
  699. struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
  700. struct lpfc_hbq_entry *hbqe;
  701. struct lpfc_sli_ct_request *ct_req;
  702. struct fc_bsg_job *job = NULL;
  703. unsigned long flags;
  704. int size = 0;
  705. INIT_LIST_HEAD(&head);
  706. list_add_tail(&head, &piocbq->list);
  707. if (piocbq->iocb.ulpBdeCount == 0 ||
  708. piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
  709. goto error_ct_unsol_exit;
  710. if (phba->link_state == LPFC_HBA_ERROR ||
  711. (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
  712. goto error_ct_unsol_exit;
  713. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  714. dmabuf = bdeBuf1;
  715. else {
  716. dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
  717. piocbq->iocb.un.cont64[0].addrLow);
  718. dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
  719. }
  720. if (dmabuf == NULL)
  721. goto error_ct_unsol_exit;
  722. ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
  723. evt_req_id = ct_req->FsType;
  724. cmd = ct_req->CommandResponse.bits.CmdRsp;
  725. len = ct_req->CommandResponse.bits.Size;
  726. if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
  727. lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
  728. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  729. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  730. if (!(evt->type_mask & FC_REG_CT_EVENT) ||
  731. evt->req_id != evt_req_id)
  732. continue;
  733. lpfc_bsg_event_ref(evt);
  734. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  735. evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
  736. if (evt_dat == NULL) {
  737. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  738. lpfc_bsg_event_unref(evt);
  739. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  740. "2614 Memory allocation failed for "
  741. "CT event\n");
  742. break;
  743. }
  744. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  745. /* take accumulated byte count from the last iocbq */
  746. iocbq = list_entry(head.prev, typeof(*iocbq), list);
  747. evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
  748. } else {
  749. list_for_each_entry(iocbq, &head, list) {
  750. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
  751. evt_dat->len +=
  752. iocbq->iocb.un.cont64[i].tus.f.bdeSize;
  753. }
  754. }
  755. evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
  756. if (evt_dat->data == NULL) {
  757. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  758. "2615 Memory allocation failed for "
  759. "CT event data, size %d\n",
  760. evt_dat->len);
  761. kfree(evt_dat);
  762. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  763. lpfc_bsg_event_unref(evt);
  764. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  765. goto error_ct_unsol_exit;
  766. }
  767. list_for_each_entry(iocbq, &head, list) {
  768. size = 0;
  769. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  770. bdeBuf1 = iocbq->context2;
  771. bdeBuf2 = iocbq->context3;
  772. }
  773. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
  774. if (phba->sli3_options &
  775. LPFC_SLI3_HBQ_ENABLED) {
  776. if (i == 0) {
  777. hbqe = (struct lpfc_hbq_entry *)
  778. &iocbq->iocb.un.ulpWord[0];
  779. size = hbqe->bde.tus.f.bdeSize;
  780. dmabuf = bdeBuf1;
  781. } else if (i == 1) {
  782. hbqe = (struct lpfc_hbq_entry *)
  783. &iocbq->iocb.unsli3.
  784. sli3Words[4];
  785. size = hbqe->bde.tus.f.bdeSize;
  786. dmabuf = bdeBuf2;
  787. }
  788. if ((offset + size) > evt_dat->len)
  789. size = evt_dat->len - offset;
  790. } else {
  791. size = iocbq->iocb.un.cont64[i].
  792. tus.f.bdeSize;
  793. bde = &iocbq->iocb.un.cont64[i];
  794. dma_addr = getPaddr(bde->addrHigh,
  795. bde->addrLow);
  796. dmabuf = lpfc_sli_ringpostbuf_get(phba,
  797. pring, dma_addr);
  798. }
  799. if (!dmabuf) {
  800. lpfc_printf_log(phba, KERN_ERR,
  801. LOG_LIBDFC, "2616 No dmabuf "
  802. "found for iocbq 0x%p\n",
  803. iocbq);
  804. kfree(evt_dat->data);
  805. kfree(evt_dat);
  806. spin_lock_irqsave(&phba->ct_ev_lock,
  807. flags);
  808. lpfc_bsg_event_unref(evt);
  809. spin_unlock_irqrestore(
  810. &phba->ct_ev_lock, flags);
  811. goto error_ct_unsol_exit;
  812. }
  813. memcpy((char *)(evt_dat->data) + offset,
  814. dmabuf->virt, size);
  815. offset += size;
  816. if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
  817. !(phba->sli3_options &
  818. LPFC_SLI3_HBQ_ENABLED)) {
  819. lpfc_sli_ringpostbuf_put(phba, pring,
  820. dmabuf);
  821. } else {
  822. switch (cmd) {
  823. case ELX_LOOPBACK_DATA:
  824. diag_cmd_data_free(phba,
  825. (struct lpfc_dmabufext *)
  826. dmabuf);
  827. break;
  828. case ELX_LOOPBACK_XRI_SETUP:
  829. if ((phba->sli_rev ==
  830. LPFC_SLI_REV2) ||
  831. (phba->sli3_options &
  832. LPFC_SLI3_HBQ_ENABLED
  833. )) {
  834. lpfc_in_buf_free(phba,
  835. dmabuf);
  836. } else {
  837. lpfc_post_buffer(phba,
  838. pring,
  839. 1);
  840. }
  841. break;
  842. default:
  843. if (!(phba->sli3_options &
  844. LPFC_SLI3_HBQ_ENABLED))
  845. lpfc_post_buffer(phba,
  846. pring,
  847. 1);
  848. break;
  849. }
  850. }
  851. }
  852. }
  853. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  854. if (phba->sli_rev == LPFC_SLI_REV4) {
  855. evt_dat->immed_dat = phba->ctx_idx;
  856. phba->ctx_idx = (phba->ctx_idx + 1) % 64;
  857. phba->ct_ctx[evt_dat->immed_dat].oxid =
  858. piocbq->iocb.ulpContext;
  859. phba->ct_ctx[evt_dat->immed_dat].SID =
  860. piocbq->iocb.un.rcvels.remoteID;
  861. } else
  862. evt_dat->immed_dat = piocbq->iocb.ulpContext;
  863. evt_dat->type = FC_REG_CT_EVENT;
  864. list_add(&evt_dat->node, &evt->events_to_see);
  865. if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
  866. wake_up_interruptible(&evt->wq);
  867. lpfc_bsg_event_unref(evt);
  868. break;
  869. }
  870. list_move(evt->events_to_see.prev, &evt->events_to_get);
  871. lpfc_bsg_event_unref(evt);
  872. job = evt->set_job;
  873. evt->set_job = NULL;
  874. if (job) {
  875. job->reply->reply_payload_rcv_len = size;
  876. /* make error code available to userspace */
  877. job->reply->result = 0;
  878. job->dd_data = NULL;
  879. /* complete the job back to userspace */
  880. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  881. job->job_done(job);
  882. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  883. }
  884. }
  885. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  886. error_ct_unsol_exit:
  887. if (!list_empty(&head))
  888. list_del(&head);
  889. if (evt_req_id == SLI_CT_ELX_LOOPBACK)
  890. return 0;
  891. return 1;
  892. }
  893. /**
  894. * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
  895. * @job: SET_EVENT fc_bsg_job
  896. **/
  897. static int
  898. lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
  899. {
  900. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  901. struct lpfc_hba *phba = vport->phba;
  902. struct set_ct_event *event_req;
  903. struct lpfc_bsg_event *evt;
  904. int rc = 0;
  905. struct bsg_job_data *dd_data = NULL;
  906. uint32_t ev_mask;
  907. unsigned long flags;
  908. if (job->request_len <
  909. sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
  910. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  911. "2612 Received SET_CT_EVENT below minimum "
  912. "size\n");
  913. rc = -EINVAL;
  914. goto job_error;
  915. }
  916. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  917. if (dd_data == NULL) {
  918. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  919. "2734 Failed allocation of dd_data\n");
  920. rc = -ENOMEM;
  921. goto job_error;
  922. }
  923. event_req = (struct set_ct_event *)
  924. job->request->rqst_data.h_vendor.vendor_cmd;
  925. ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
  926. FC_REG_EVENT_MASK);
  927. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  928. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  929. if (evt->reg_id == event_req->ev_reg_id) {
  930. lpfc_bsg_event_ref(evt);
  931. evt->wait_time_stamp = jiffies;
  932. break;
  933. }
  934. }
  935. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  936. if (&evt->node == &phba->ct_ev_waiters) {
  937. /* no event waiting struct yet - first call */
  938. evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
  939. event_req->ev_req_id);
  940. if (!evt) {
  941. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  942. "2617 Failed allocation of event "
  943. "waiter\n");
  944. rc = -ENOMEM;
  945. goto job_error;
  946. }
  947. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  948. list_add(&evt->node, &phba->ct_ev_waiters);
  949. lpfc_bsg_event_ref(evt);
  950. evt->wait_time_stamp = jiffies;
  951. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  952. }
  953. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  954. evt->waiting = 1;
  955. dd_data->type = TYPE_EVT;
  956. dd_data->context_un.evt = evt;
  957. evt->set_job = job; /* for unsolicited command */
  958. job->dd_data = dd_data; /* for fc transport timeout callback*/
  959. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  960. return 0; /* call job done later */
  961. job_error:
  962. if (dd_data != NULL)
  963. kfree(dd_data);
  964. job->dd_data = NULL;
  965. return rc;
  966. }
  967. /**
  968. * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
  969. * @job: GET_EVENT fc_bsg_job
  970. **/
  971. static int
  972. lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
  973. {
  974. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  975. struct lpfc_hba *phba = vport->phba;
  976. struct get_ct_event *event_req;
  977. struct get_ct_event_reply *event_reply;
  978. struct lpfc_bsg_event *evt;
  979. struct event_data *evt_dat = NULL;
  980. unsigned long flags;
  981. uint32_t rc = 0;
  982. if (job->request_len <
  983. sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
  984. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  985. "2613 Received GET_CT_EVENT request below "
  986. "minimum size\n");
  987. rc = -EINVAL;
  988. goto job_error;
  989. }
  990. event_req = (struct get_ct_event *)
  991. job->request->rqst_data.h_vendor.vendor_cmd;
  992. event_reply = (struct get_ct_event_reply *)
  993. job->reply->reply_data.vendor_reply.vendor_rsp;
  994. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  995. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  996. if (evt->reg_id == event_req->ev_reg_id) {
  997. if (list_empty(&evt->events_to_get))
  998. break;
  999. lpfc_bsg_event_ref(evt);
  1000. evt->wait_time_stamp = jiffies;
  1001. evt_dat = list_entry(evt->events_to_get.prev,
  1002. struct event_data, node);
  1003. list_del(&evt_dat->node);
  1004. break;
  1005. }
  1006. }
  1007. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1008. /* The app may continue to ask for event data until it gets
  1009. * an error indicating that there isn't anymore
  1010. */
  1011. if (evt_dat == NULL) {
  1012. job->reply->reply_payload_rcv_len = 0;
  1013. rc = -ENOENT;
  1014. goto job_error;
  1015. }
  1016. if (evt_dat->len > job->request_payload.payload_len) {
  1017. evt_dat->len = job->request_payload.payload_len;
  1018. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1019. "2618 Truncated event data at %d "
  1020. "bytes\n",
  1021. job->request_payload.payload_len);
  1022. }
  1023. event_reply->type = evt_dat->type;
  1024. event_reply->immed_data = evt_dat->immed_dat;
  1025. if (evt_dat->len > 0)
  1026. job->reply->reply_payload_rcv_len =
  1027. sg_copy_from_buffer(job->request_payload.sg_list,
  1028. job->request_payload.sg_cnt,
  1029. evt_dat->data, evt_dat->len);
  1030. else
  1031. job->reply->reply_payload_rcv_len = 0;
  1032. if (evt_dat) {
  1033. kfree(evt_dat->data);
  1034. kfree(evt_dat);
  1035. }
  1036. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1037. lpfc_bsg_event_unref(evt);
  1038. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1039. job->dd_data = NULL;
  1040. job->reply->result = 0;
  1041. job->job_done(job);
  1042. return 0;
  1043. job_error:
  1044. job->dd_data = NULL;
  1045. job->reply->result = rc;
  1046. return rc;
  1047. }
  1048. /**
  1049. * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
  1050. * @phba: Pointer to HBA context object.
  1051. * @cmdiocbq: Pointer to command iocb.
  1052. * @rspiocbq: Pointer to response iocb.
  1053. *
  1054. * This function is the completion handler for iocbs issued using
  1055. * lpfc_issue_ct_rsp_cmp function. This function is called by the
  1056. * ring event handler function without any lock held. This function
  1057. * can be called from both worker thread context and interrupt
  1058. * context. This function also can be called from other thread which
  1059. * cleans up the SLI layer objects.
  1060. * This function copy the contents of the response iocb to the
  1061. * response iocb memory object provided by the caller of
  1062. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  1063. * sleeps for the iocb completion.
  1064. **/
  1065. static void
  1066. lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
  1067. struct lpfc_iocbq *cmdiocbq,
  1068. struct lpfc_iocbq *rspiocbq)
  1069. {
  1070. struct bsg_job_data *dd_data;
  1071. struct fc_bsg_job *job;
  1072. IOCB_t *rsp;
  1073. struct lpfc_dmabuf *bmp;
  1074. struct lpfc_nodelist *ndlp;
  1075. unsigned long flags;
  1076. int rc = 0;
  1077. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1078. dd_data = cmdiocbq->context1;
  1079. /* normal completion and timeout crossed paths, already done */
  1080. if (!dd_data) {
  1081. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1082. return;
  1083. }
  1084. job = dd_data->context_un.iocb.set_job;
  1085. bmp = dd_data->context_un.iocb.bmp;
  1086. rsp = &rspiocbq->iocb;
  1087. ndlp = dd_data->context_un.iocb.ndlp;
  1088. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  1089. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1090. if (rsp->ulpStatus) {
  1091. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  1092. switch (rsp->un.ulpWord[4] & 0xff) {
  1093. case IOERR_SEQUENCE_TIMEOUT:
  1094. rc = -ETIMEDOUT;
  1095. break;
  1096. case IOERR_INVALID_RPI:
  1097. rc = -EFAULT;
  1098. break;
  1099. default:
  1100. rc = -EACCES;
  1101. break;
  1102. }
  1103. } else
  1104. rc = -EACCES;
  1105. } else
  1106. job->reply->reply_payload_rcv_len =
  1107. rsp->un.genreq64.bdl.bdeSize;
  1108. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  1109. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1110. lpfc_nlp_put(ndlp);
  1111. kfree(bmp);
  1112. kfree(dd_data);
  1113. /* make error code available to userspace */
  1114. job->reply->result = rc;
  1115. job->dd_data = NULL;
  1116. /* complete the job back to userspace */
  1117. job->job_done(job);
  1118. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1119. return;
  1120. }
  1121. /**
  1122. * lpfc_issue_ct_rsp - issue a ct response
  1123. * @phba: Pointer to HBA context object.
  1124. * @job: Pointer to the job object.
  1125. * @tag: tag index value into the ports context exchange array.
  1126. * @bmp: Pointer to a dma buffer descriptor.
  1127. * @num_entry: Number of enties in the bde.
  1128. **/
  1129. static int
  1130. lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
  1131. struct lpfc_dmabuf *bmp, int num_entry)
  1132. {
  1133. IOCB_t *icmd;
  1134. struct lpfc_iocbq *ctiocb = NULL;
  1135. int rc = 0;
  1136. struct lpfc_nodelist *ndlp = NULL;
  1137. struct bsg_job_data *dd_data;
  1138. uint32_t creg_val;
  1139. /* allocate our bsg tracking structure */
  1140. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  1141. if (!dd_data) {
  1142. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1143. "2736 Failed allocation of dd_data\n");
  1144. rc = -ENOMEM;
  1145. goto no_dd_data;
  1146. }
  1147. /* Allocate buffer for command iocb */
  1148. ctiocb = lpfc_sli_get_iocbq(phba);
  1149. if (!ctiocb) {
  1150. rc = ENOMEM;
  1151. goto no_ctiocb;
  1152. }
  1153. icmd = &ctiocb->iocb;
  1154. icmd->un.xseq64.bdl.ulpIoTag32 = 0;
  1155. icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  1156. icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
  1157. icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1158. icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
  1159. icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
  1160. icmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1161. icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
  1162. icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1163. /* Fill in rest of iocb */
  1164. icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  1165. icmd->ulpBdeCount = 1;
  1166. icmd->ulpLe = 1;
  1167. icmd->ulpClass = CLASS3;
  1168. if (phba->sli_rev == LPFC_SLI_REV4) {
  1169. /* Do not issue unsol response if oxid not marked as valid */
  1170. if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
  1171. rc = IOCB_ERROR;
  1172. goto issue_ct_rsp_exit;
  1173. }
  1174. icmd->ulpContext = phba->ct_ctx[tag].oxid;
  1175. ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
  1176. if (!ndlp) {
  1177. lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
  1178. "2721 ndlp null for oxid %x SID %x\n",
  1179. icmd->ulpContext,
  1180. phba->ct_ctx[tag].SID);
  1181. rc = IOCB_ERROR;
  1182. goto issue_ct_rsp_exit;
  1183. }
  1184. icmd->un.ulpWord[3] = ndlp->nlp_rpi;
  1185. /* The exchange is done, mark the entry as invalid */
  1186. phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
  1187. } else
  1188. icmd->ulpContext = (ushort) tag;
  1189. icmd->ulpTimeout = phba->fc_ratov * 2;
  1190. /* Xmit CT response on exchange <xid> */
  1191. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  1192. "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
  1193. icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
  1194. ctiocb->iocb_cmpl = NULL;
  1195. ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
  1196. ctiocb->vport = phba->pport;
  1197. ctiocb->context3 = bmp;
  1198. ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
  1199. ctiocb->context1 = dd_data;
  1200. ctiocb->context2 = NULL;
  1201. dd_data->type = TYPE_IOCB;
  1202. dd_data->context_un.iocb.cmdiocbq = ctiocb;
  1203. dd_data->context_un.iocb.rspiocbq = NULL;
  1204. dd_data->context_un.iocb.set_job = job;
  1205. dd_data->context_un.iocb.bmp = bmp;
  1206. dd_data->context_un.iocb.ndlp = ndlp;
  1207. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  1208. creg_val = readl(phba->HCregaddr);
  1209. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  1210. writel(creg_val, phba->HCregaddr);
  1211. readl(phba->HCregaddr); /* flush */
  1212. }
  1213. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
  1214. if (rc == IOCB_SUCCESS)
  1215. return 0; /* done for now */
  1216. issue_ct_rsp_exit:
  1217. lpfc_sli_release_iocbq(phba, ctiocb);
  1218. no_ctiocb:
  1219. kfree(dd_data);
  1220. no_dd_data:
  1221. return rc;
  1222. }
  1223. /**
  1224. * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
  1225. * @job: SEND_MGMT_RESP fc_bsg_job
  1226. **/
  1227. static int
  1228. lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
  1229. {
  1230. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1231. struct lpfc_hba *phba = vport->phba;
  1232. struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
  1233. job->request->rqst_data.h_vendor.vendor_cmd;
  1234. struct ulp_bde64 *bpl;
  1235. struct lpfc_dmabuf *bmp = NULL;
  1236. struct scatterlist *sgel = NULL;
  1237. int request_nseg;
  1238. int numbde;
  1239. dma_addr_t busaddr;
  1240. uint32_t tag = mgmt_resp->tag;
  1241. unsigned long reqbfrcnt =
  1242. (unsigned long)job->request_payload.payload_len;
  1243. int rc = 0;
  1244. /* in case no data is transferred */
  1245. job->reply->reply_payload_rcv_len = 0;
  1246. if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
  1247. rc = -ERANGE;
  1248. goto send_mgmt_rsp_exit;
  1249. }
  1250. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1251. if (!bmp) {
  1252. rc = -ENOMEM;
  1253. goto send_mgmt_rsp_exit;
  1254. }
  1255. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  1256. if (!bmp->virt) {
  1257. rc = -ENOMEM;
  1258. goto send_mgmt_rsp_free_bmp;
  1259. }
  1260. INIT_LIST_HEAD(&bmp->list);
  1261. bpl = (struct ulp_bde64 *) bmp->virt;
  1262. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  1263. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1264. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  1265. busaddr = sg_dma_address(sgel);
  1266. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  1267. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  1268. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  1269. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  1270. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  1271. bpl++;
  1272. }
  1273. rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
  1274. if (rc == IOCB_SUCCESS)
  1275. return 0; /* done for now */
  1276. /* TBD need to handle a timeout */
  1277. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  1278. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1279. rc = -EACCES;
  1280. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  1281. send_mgmt_rsp_free_bmp:
  1282. kfree(bmp);
  1283. send_mgmt_rsp_exit:
  1284. /* make error code available to userspace */
  1285. job->reply->result = rc;
  1286. job->dd_data = NULL;
  1287. return rc;
  1288. }
  1289. /**
  1290. * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
  1291. * @job: LPFC_BSG_VENDOR_DIAG_MODE
  1292. *
  1293. * This function is responsible for placing a port into diagnostic loopback
  1294. * mode in order to perform a diagnostic loopback test.
  1295. * All new scsi requests are blocked, a small delay is used to allow the
  1296. * scsi requests to complete then the link is brought down. If the link is
  1297. * is placed in loopback mode then scsi requests are again allowed
  1298. * so the scsi mid-layer doesn't give up on the port.
  1299. * All of this is done in-line.
  1300. */
  1301. static int
  1302. lpfc_bsg_diag_mode(struct fc_bsg_job *job)
  1303. {
  1304. struct Scsi_Host *shost = job->shost;
  1305. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1306. struct lpfc_hba *phba = vport->phba;
  1307. struct diag_mode_set *loopback_mode;
  1308. struct lpfc_sli *psli = &phba->sli;
  1309. struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
  1310. uint32_t link_flags;
  1311. uint32_t timeout;
  1312. struct lpfc_vport **vports;
  1313. LPFC_MBOXQ_t *pmboxq;
  1314. int mbxstatus;
  1315. int i = 0;
  1316. int rc = 0;
  1317. /* no data to return just the return code */
  1318. job->reply->reply_payload_rcv_len = 0;
  1319. if (job->request_len <
  1320. sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
  1321. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1322. "2738 Received DIAG MODE request below minimum "
  1323. "size\n");
  1324. rc = -EINVAL;
  1325. goto job_error;
  1326. }
  1327. loopback_mode = (struct diag_mode_set *)
  1328. job->request->rqst_data.h_vendor.vendor_cmd;
  1329. link_flags = loopback_mode->type;
  1330. timeout = loopback_mode->timeout;
  1331. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1332. (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
  1333. (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
  1334. rc = -EACCES;
  1335. goto job_error;
  1336. }
  1337. pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1338. if (!pmboxq) {
  1339. rc = -ENOMEM;
  1340. goto job_error;
  1341. }
  1342. vports = lpfc_create_vport_work_array(phba);
  1343. if (vports) {
  1344. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  1345. shost = lpfc_shost_from_vport(vports[i]);
  1346. scsi_block_requests(shost);
  1347. }
  1348. lpfc_destroy_vport_work_array(phba, vports);
  1349. } else {
  1350. shost = lpfc_shost_from_vport(phba->pport);
  1351. scsi_block_requests(shost);
  1352. }
  1353. while (pring->txcmplq_cnt) {
  1354. if (i++ > 500) /* wait up to 5 seconds */
  1355. break;
  1356. msleep(10);
  1357. }
  1358. memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  1359. pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
  1360. pmboxq->u.mb.mbxOwner = OWN_HOST;
  1361. mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
  1362. if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
  1363. /* wait for link down before proceeding */
  1364. i = 0;
  1365. while (phba->link_state != LPFC_LINK_DOWN) {
  1366. if (i++ > timeout) {
  1367. rc = -ETIMEDOUT;
  1368. goto loopback_mode_exit;
  1369. }
  1370. msleep(10);
  1371. }
  1372. memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  1373. if (link_flags == INTERNAL_LOOP_BACK)
  1374. pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
  1375. else
  1376. pmboxq->u.mb.un.varInitLnk.link_flags =
  1377. FLAGS_TOPOLOGY_MODE_LOOP;
  1378. pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
  1379. pmboxq->u.mb.mbxOwner = OWN_HOST;
  1380. mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
  1381. LPFC_MBOX_TMO);
  1382. if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
  1383. rc = -ENODEV;
  1384. else {
  1385. phba->link_flag |= LS_LOOPBACK_MODE;
  1386. /* wait for the link attention interrupt */
  1387. msleep(100);
  1388. i = 0;
  1389. while (phba->link_state != LPFC_HBA_READY) {
  1390. if (i++ > timeout) {
  1391. rc = -ETIMEDOUT;
  1392. break;
  1393. }
  1394. msleep(10);
  1395. }
  1396. }
  1397. } else
  1398. rc = -ENODEV;
  1399. loopback_mode_exit:
  1400. vports = lpfc_create_vport_work_array(phba);
  1401. if (vports) {
  1402. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  1403. shost = lpfc_shost_from_vport(vports[i]);
  1404. scsi_unblock_requests(shost);
  1405. }
  1406. lpfc_destroy_vport_work_array(phba, vports);
  1407. } else {
  1408. shost = lpfc_shost_from_vport(phba->pport);
  1409. scsi_unblock_requests(shost);
  1410. }
  1411. /*
  1412. * Let SLI layer release mboxq if mbox command completed after timeout.
  1413. */
  1414. if (mbxstatus != MBX_TIMEOUT)
  1415. mempool_free(pmboxq, phba->mbox_mem_pool);
  1416. job_error:
  1417. /* make error code available to userspace */
  1418. job->reply->result = rc;
  1419. /* complete the job back to userspace if no error */
  1420. if (rc == 0)
  1421. job->job_done(job);
  1422. return rc;
  1423. }
  1424. /**
  1425. * lpfcdiag_loop_self_reg - obtains a remote port login id
  1426. * @phba: Pointer to HBA context object
  1427. * @rpi: Pointer to a remote port login id
  1428. *
  1429. * This function obtains a remote port login id so the diag loopback test
  1430. * can send and receive its own unsolicited CT command.
  1431. **/
  1432. static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
  1433. {
  1434. LPFC_MBOXQ_t *mbox;
  1435. struct lpfc_dmabuf *dmabuff;
  1436. int status;
  1437. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1438. if (!mbox)
  1439. return ENOMEM;
  1440. status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
  1441. (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
  1442. if (status) {
  1443. mempool_free(mbox, phba->mbox_mem_pool);
  1444. return ENOMEM;
  1445. }
  1446. dmabuff = (struct lpfc_dmabuf *) mbox->context1;
  1447. mbox->context1 = NULL;
  1448. status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
  1449. if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
  1450. lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
  1451. kfree(dmabuff);
  1452. if (status != MBX_TIMEOUT)
  1453. mempool_free(mbox, phba->mbox_mem_pool);
  1454. return ENODEV;
  1455. }
  1456. *rpi = mbox->u.mb.un.varWords[0];
  1457. lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
  1458. kfree(dmabuff);
  1459. mempool_free(mbox, phba->mbox_mem_pool);
  1460. return 0;
  1461. }
  1462. /**
  1463. * lpfcdiag_loop_self_unreg - unregs from the rpi
  1464. * @phba: Pointer to HBA context object
  1465. * @rpi: Remote port login id
  1466. *
  1467. * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
  1468. **/
  1469. static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
  1470. {
  1471. LPFC_MBOXQ_t *mbox;
  1472. int status;
  1473. /* Allocate mboxq structure */
  1474. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1475. if (mbox == NULL)
  1476. return ENOMEM;
  1477. lpfc_unreg_login(phba, 0, rpi, mbox);
  1478. status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
  1479. if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
  1480. if (status != MBX_TIMEOUT)
  1481. mempool_free(mbox, phba->mbox_mem_pool);
  1482. return EIO;
  1483. }
  1484. mempool_free(mbox, phba->mbox_mem_pool);
  1485. return 0;
  1486. }
  1487. /**
  1488. * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
  1489. * @phba: Pointer to HBA context object
  1490. * @rpi: Remote port login id
  1491. * @txxri: Pointer to transmit exchange id
  1492. * @rxxri: Pointer to response exchabge id
  1493. *
  1494. * This function obtains the transmit and receive ids required to send
  1495. * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
  1496. * flags are used to the unsolicted response handler is able to process
  1497. * the ct command sent on the same port.
  1498. **/
  1499. static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
  1500. uint16_t *txxri, uint16_t * rxxri)
  1501. {
  1502. struct lpfc_bsg_event *evt;
  1503. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  1504. IOCB_t *cmd, *rsp;
  1505. struct lpfc_dmabuf *dmabuf;
  1506. struct ulp_bde64 *bpl = NULL;
  1507. struct lpfc_sli_ct_request *ctreq = NULL;
  1508. int ret_val = 0;
  1509. unsigned long flags;
  1510. *txxri = 0;
  1511. *rxxri = 0;
  1512. evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
  1513. SLI_CT_ELX_LOOPBACK);
  1514. if (!evt)
  1515. return ENOMEM;
  1516. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1517. list_add(&evt->node, &phba->ct_ev_waiters);
  1518. lpfc_bsg_event_ref(evt);
  1519. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1520. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1521. rspiocbq = lpfc_sli_get_iocbq(phba);
  1522. dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1523. if (dmabuf) {
  1524. dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
  1525. INIT_LIST_HEAD(&dmabuf->list);
  1526. bpl = (struct ulp_bde64 *) dmabuf->virt;
  1527. memset(bpl, 0, sizeof(*bpl));
  1528. ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
  1529. bpl->addrHigh =
  1530. le32_to_cpu(putPaddrHigh(dmabuf->phys + sizeof(*bpl)));
  1531. bpl->addrLow =
  1532. le32_to_cpu(putPaddrLow(dmabuf->phys + sizeof(*bpl)));
  1533. bpl->tus.f.bdeFlags = 0;
  1534. bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
  1535. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  1536. }
  1537. if (cmdiocbq == NULL || rspiocbq == NULL ||
  1538. dmabuf == NULL || bpl == NULL || ctreq == NULL) {
  1539. ret_val = ENOMEM;
  1540. goto err_get_xri_exit;
  1541. }
  1542. cmd = &cmdiocbq->iocb;
  1543. rsp = &rspiocbq->iocb;
  1544. memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
  1545. ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
  1546. ctreq->RevisionId.bits.InId = 0;
  1547. ctreq->FsType = SLI_CT_ELX_LOOPBACK;
  1548. ctreq->FsSubType = 0;
  1549. ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
  1550. ctreq->CommandResponse.bits.Size = 0;
  1551. cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
  1552. cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
  1553. cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1554. cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
  1555. cmd->un.xseq64.w5.hcsw.Fctl = LA;
  1556. cmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1557. cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  1558. cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1559. cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
  1560. cmd->ulpBdeCount = 1;
  1561. cmd->ulpLe = 1;
  1562. cmd->ulpClass = CLASS3;
  1563. cmd->ulpContext = rpi;
  1564. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  1565. cmdiocbq->vport = phba->pport;
  1566. ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
  1567. rspiocbq,
  1568. (phba->fc_ratov * 2)
  1569. + LPFC_DRVR_TIMEOUT);
  1570. if (ret_val)
  1571. goto err_get_xri_exit;
  1572. *txxri = rsp->ulpContext;
  1573. evt->waiting = 1;
  1574. evt->wait_time_stamp = jiffies;
  1575. ret_val = wait_event_interruptible_timeout(
  1576. evt->wq, !list_empty(&evt->events_to_see),
  1577. ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
  1578. if (list_empty(&evt->events_to_see))
  1579. ret_val = (ret_val) ? EINTR : ETIMEDOUT;
  1580. else {
  1581. ret_val = IOCB_SUCCESS;
  1582. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1583. list_move(evt->events_to_see.prev, &evt->events_to_get);
  1584. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1585. *rxxri = (list_entry(evt->events_to_get.prev,
  1586. typeof(struct event_data),
  1587. node))->immed_dat;
  1588. }
  1589. evt->waiting = 0;
  1590. err_get_xri_exit:
  1591. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1592. lpfc_bsg_event_unref(evt); /* release ref */
  1593. lpfc_bsg_event_unref(evt); /* delete */
  1594. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1595. if (dmabuf) {
  1596. if (dmabuf->virt)
  1597. lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
  1598. kfree(dmabuf);
  1599. }
  1600. if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
  1601. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1602. if (rspiocbq)
  1603. lpfc_sli_release_iocbq(phba, rspiocbq);
  1604. return ret_val;
  1605. }
  1606. /**
  1607. * diag_cmd_data_alloc - fills in a bde struct with dma buffers
  1608. * @phba: Pointer to HBA context object
  1609. * @bpl: Pointer to 64 bit bde structure
  1610. * @size: Number of bytes to process
  1611. * @nocopydata: Flag to copy user data into the allocated buffer
  1612. *
  1613. * This function allocates page size buffers and populates an lpfc_dmabufext.
  1614. * If allowed the user data pointed to with indataptr is copied into the kernel
  1615. * memory. The chained list of page size buffers is returned.
  1616. **/
  1617. static struct lpfc_dmabufext *
  1618. diag_cmd_data_alloc(struct lpfc_hba *phba,
  1619. struct ulp_bde64 *bpl, uint32_t size,
  1620. int nocopydata)
  1621. {
  1622. struct lpfc_dmabufext *mlist = NULL;
  1623. struct lpfc_dmabufext *dmp;
  1624. int cnt, offset = 0, i = 0;
  1625. struct pci_dev *pcidev;
  1626. pcidev = phba->pcidev;
  1627. while (size) {
  1628. /* We get chunks of 4K */
  1629. if (size > BUF_SZ_4K)
  1630. cnt = BUF_SZ_4K;
  1631. else
  1632. cnt = size;
  1633. /* allocate struct lpfc_dmabufext buffer header */
  1634. dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
  1635. if (!dmp)
  1636. goto out;
  1637. INIT_LIST_HEAD(&dmp->dma.list);
  1638. /* Queue it to a linked list */
  1639. if (mlist)
  1640. list_add_tail(&dmp->dma.list, &mlist->dma.list);
  1641. else
  1642. mlist = dmp;
  1643. /* allocate buffer */
  1644. dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
  1645. cnt,
  1646. &(dmp->dma.phys),
  1647. GFP_KERNEL);
  1648. if (!dmp->dma.virt)
  1649. goto out;
  1650. dmp->size = cnt;
  1651. if (nocopydata) {
  1652. bpl->tus.f.bdeFlags = 0;
  1653. pci_dma_sync_single_for_device(phba->pcidev,
  1654. dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
  1655. } else {
  1656. memset((uint8_t *)dmp->dma.virt, 0, cnt);
  1657. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  1658. }
  1659. /* build buffer ptr list for IOCB */
  1660. bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
  1661. bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
  1662. bpl->tus.f.bdeSize = (ushort) cnt;
  1663. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  1664. bpl++;
  1665. i++;
  1666. offset += cnt;
  1667. size -= cnt;
  1668. }
  1669. mlist->flag = i;
  1670. return mlist;
  1671. out:
  1672. diag_cmd_data_free(phba, mlist);
  1673. return NULL;
  1674. }
  1675. /**
  1676. * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
  1677. * @phba: Pointer to HBA context object
  1678. * @rxxri: Receive exchange id
  1679. * @len: Number of data bytes
  1680. *
  1681. * This function allocates and posts a data buffer of sufficient size to recieve
  1682. * an unsolicted CT command.
  1683. **/
  1684. static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
  1685. size_t len)
  1686. {
  1687. struct lpfc_sli *psli = &phba->sli;
  1688. struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
  1689. struct lpfc_iocbq *cmdiocbq;
  1690. IOCB_t *cmd = NULL;
  1691. struct list_head head, *curr, *next;
  1692. struct lpfc_dmabuf *rxbmp;
  1693. struct lpfc_dmabuf *dmp;
  1694. struct lpfc_dmabuf *mp[2] = {NULL, NULL};
  1695. struct ulp_bde64 *rxbpl = NULL;
  1696. uint32_t num_bde;
  1697. struct lpfc_dmabufext *rxbuffer = NULL;
  1698. int ret_val = 0;
  1699. int i = 0;
  1700. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1701. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1702. if (rxbmp != NULL) {
  1703. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  1704. INIT_LIST_HEAD(&rxbmp->list);
  1705. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  1706. rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
  1707. }
  1708. if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
  1709. ret_val = ENOMEM;
  1710. goto err_post_rxbufs_exit;
  1711. }
  1712. /* Queue buffers for the receive exchange */
  1713. num_bde = (uint32_t)rxbuffer->flag;
  1714. dmp = &rxbuffer->dma;
  1715. cmd = &cmdiocbq->iocb;
  1716. i = 0;
  1717. INIT_LIST_HEAD(&head);
  1718. list_add_tail(&head, &dmp->list);
  1719. list_for_each_safe(curr, next, &head) {
  1720. mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
  1721. list_del(curr);
  1722. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  1723. mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
  1724. cmd->un.quexri64cx.buff.bde.addrHigh =
  1725. putPaddrHigh(mp[i]->phys);
  1726. cmd->un.quexri64cx.buff.bde.addrLow =
  1727. putPaddrLow(mp[i]->phys);
  1728. cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
  1729. ((struct lpfc_dmabufext *)mp[i])->size;
  1730. cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
  1731. cmd->ulpCommand = CMD_QUE_XRI64_CX;
  1732. cmd->ulpPU = 0;
  1733. cmd->ulpLe = 1;
  1734. cmd->ulpBdeCount = 1;
  1735. cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
  1736. } else {
  1737. cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
  1738. cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
  1739. cmd->un.cont64[i].tus.f.bdeSize =
  1740. ((struct lpfc_dmabufext *)mp[i])->size;
  1741. cmd->ulpBdeCount = ++i;
  1742. if ((--num_bde > 0) && (i < 2))
  1743. continue;
  1744. cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
  1745. cmd->ulpLe = 1;
  1746. }
  1747. cmd->ulpClass = CLASS3;
  1748. cmd->ulpContext = rxxri;
  1749. ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  1750. if (ret_val == IOCB_ERROR) {
  1751. diag_cmd_data_free(phba,
  1752. (struct lpfc_dmabufext *)mp[0]);
  1753. if (mp[1])
  1754. diag_cmd_data_free(phba,
  1755. (struct lpfc_dmabufext *)mp[1]);
  1756. dmp = list_entry(next, struct lpfc_dmabuf, list);
  1757. ret_val = EIO;
  1758. goto err_post_rxbufs_exit;
  1759. }
  1760. lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
  1761. if (mp[1]) {
  1762. lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
  1763. mp[1] = NULL;
  1764. }
  1765. /* The iocb was freed by lpfc_sli_issue_iocb */
  1766. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1767. if (!cmdiocbq) {
  1768. dmp = list_entry(next, struct lpfc_dmabuf, list);
  1769. ret_val = EIO;
  1770. goto err_post_rxbufs_exit;
  1771. }
  1772. cmd = &cmdiocbq->iocb;
  1773. i = 0;
  1774. }
  1775. list_del(&head);
  1776. err_post_rxbufs_exit:
  1777. if (rxbmp) {
  1778. if (rxbmp->virt)
  1779. lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
  1780. kfree(rxbmp);
  1781. }
  1782. if (cmdiocbq)
  1783. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1784. return ret_val;
  1785. }
  1786. /**
  1787. * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
  1788. * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
  1789. *
  1790. * This function receives a user data buffer to be transmitted and received on
  1791. * the same port, the link must be up and in loopback mode prior
  1792. * to being called.
  1793. * 1. A kernel buffer is allocated to copy the user data into.
  1794. * 2. The port registers with "itself".
  1795. * 3. The transmit and receive exchange ids are obtained.
  1796. * 4. The receive exchange id is posted.
  1797. * 5. A new els loopback event is created.
  1798. * 6. The command and response iocbs are allocated.
  1799. * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
  1800. *
  1801. * This function is meant to be called n times while the port is in loopback
  1802. * so it is the apps responsibility to issue a reset to take the port out
  1803. * of loopback mode.
  1804. **/
  1805. static int
  1806. lpfc_bsg_diag_test(struct fc_bsg_job *job)
  1807. {
  1808. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1809. struct lpfc_hba *phba = vport->phba;
  1810. struct diag_mode_test *diag_mode;
  1811. struct lpfc_bsg_event *evt;
  1812. struct event_data *evdat;
  1813. struct lpfc_sli *psli = &phba->sli;
  1814. uint32_t size;
  1815. uint32_t full_size;
  1816. size_t segment_len = 0, segment_offset = 0, current_offset = 0;
  1817. uint16_t rpi;
  1818. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  1819. IOCB_t *cmd, *rsp;
  1820. struct lpfc_sli_ct_request *ctreq;
  1821. struct lpfc_dmabuf *txbmp;
  1822. struct ulp_bde64 *txbpl = NULL;
  1823. struct lpfc_dmabufext *txbuffer = NULL;
  1824. struct list_head head;
  1825. struct lpfc_dmabuf *curr;
  1826. uint16_t txxri, rxxri;
  1827. uint32_t num_bde;
  1828. uint8_t *ptr = NULL, *rx_databuf = NULL;
  1829. int rc = 0;
  1830. unsigned long flags;
  1831. void *dataout = NULL;
  1832. uint32_t total_mem;
  1833. /* in case no data is returned return just the return code */
  1834. job->reply->reply_payload_rcv_len = 0;
  1835. if (job->request_len <
  1836. sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
  1837. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1838. "2739 Received DIAG TEST request below minimum "
  1839. "size\n");
  1840. rc = -EINVAL;
  1841. goto loopback_test_exit;
  1842. }
  1843. if (job->request_payload.payload_len !=
  1844. job->reply_payload.payload_len) {
  1845. rc = -EINVAL;
  1846. goto loopback_test_exit;
  1847. }
  1848. diag_mode = (struct diag_mode_test *)
  1849. job->request->rqst_data.h_vendor.vendor_cmd;
  1850. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1851. (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
  1852. (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
  1853. rc = -EACCES;
  1854. goto loopback_test_exit;
  1855. }
  1856. if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
  1857. rc = -EACCES;
  1858. goto loopback_test_exit;
  1859. }
  1860. size = job->request_payload.payload_len;
  1861. full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
  1862. if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
  1863. rc = -ERANGE;
  1864. goto loopback_test_exit;
  1865. }
  1866. if (size >= BUF_SZ_4K) {
  1867. /*
  1868. * Allocate memory for ioctl data. If buffer is bigger than 64k,
  1869. * then we allocate 64k and re-use that buffer over and over to
  1870. * xfer the whole block. This is because Linux kernel has a
  1871. * problem allocating more than 120k of kernel space memory. Saw
  1872. * problem with GET_FCPTARGETMAPPING...
  1873. */
  1874. if (size <= (64 * 1024))
  1875. total_mem = size;
  1876. else
  1877. total_mem = 64 * 1024;
  1878. } else
  1879. /* Allocate memory for ioctl data */
  1880. total_mem = BUF_SZ_4K;
  1881. dataout = kmalloc(total_mem, GFP_KERNEL);
  1882. if (dataout == NULL) {
  1883. rc = -ENOMEM;
  1884. goto loopback_test_exit;
  1885. }
  1886. ptr = dataout;
  1887. ptr += ELX_LOOPBACK_HEADER_SZ;
  1888. sg_copy_to_buffer(job->request_payload.sg_list,
  1889. job->request_payload.sg_cnt,
  1890. ptr, size);
  1891. rc = lpfcdiag_loop_self_reg(phba, &rpi);
  1892. if (rc) {
  1893. rc = -ENOMEM;
  1894. goto loopback_test_exit;
  1895. }
  1896. rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
  1897. if (rc) {
  1898. lpfcdiag_loop_self_unreg(phba, rpi);
  1899. rc = -ENOMEM;
  1900. goto loopback_test_exit;
  1901. }
  1902. rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
  1903. if (rc) {
  1904. lpfcdiag_loop_self_unreg(phba, rpi);
  1905. rc = -ENOMEM;
  1906. goto loopback_test_exit;
  1907. }
  1908. evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
  1909. SLI_CT_ELX_LOOPBACK);
  1910. if (!evt) {
  1911. lpfcdiag_loop_self_unreg(phba, rpi);
  1912. rc = -ENOMEM;
  1913. goto loopback_test_exit;
  1914. }
  1915. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1916. list_add(&evt->node, &phba->ct_ev_waiters);
  1917. lpfc_bsg_event_ref(evt);
  1918. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1919. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1920. rspiocbq = lpfc_sli_get_iocbq(phba);
  1921. txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1922. if (txbmp) {
  1923. txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
  1924. INIT_LIST_HEAD(&txbmp->list);
  1925. txbpl = (struct ulp_bde64 *) txbmp->virt;
  1926. if (txbpl)
  1927. txbuffer = diag_cmd_data_alloc(phba,
  1928. txbpl, full_size, 0);
  1929. }
  1930. if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer) {
  1931. rc = -ENOMEM;
  1932. goto err_loopback_test_exit;
  1933. }
  1934. cmd = &cmdiocbq->iocb;
  1935. rsp = &rspiocbq->iocb;
  1936. INIT_LIST_HEAD(&head);
  1937. list_add_tail(&head, &txbuffer->dma.list);
  1938. list_for_each_entry(curr, &head, list) {
  1939. segment_len = ((struct lpfc_dmabufext *)curr)->size;
  1940. if (current_offset == 0) {
  1941. ctreq = curr->virt;
  1942. memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
  1943. ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
  1944. ctreq->RevisionId.bits.InId = 0;
  1945. ctreq->FsType = SLI_CT_ELX_LOOPBACK;
  1946. ctreq->FsSubType = 0;
  1947. ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
  1948. ctreq->CommandResponse.bits.Size = size;
  1949. segment_offset = ELX_LOOPBACK_HEADER_SZ;
  1950. } else
  1951. segment_offset = 0;
  1952. BUG_ON(segment_offset >= segment_len);
  1953. memcpy(curr->virt + segment_offset,
  1954. ptr + current_offset,
  1955. segment_len - segment_offset);
  1956. current_offset += segment_len - segment_offset;
  1957. BUG_ON(current_offset > size);
  1958. }
  1959. list_del(&head);
  1960. /* Build the XMIT_SEQUENCE iocb */
  1961. num_bde = (uint32_t)txbuffer->flag;
  1962. cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
  1963. cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
  1964. cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1965. cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
  1966. cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
  1967. cmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1968. cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  1969. cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1970. cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  1971. cmd->ulpBdeCount = 1;
  1972. cmd->ulpLe = 1;
  1973. cmd->ulpClass = CLASS3;
  1974. cmd->ulpContext = txxri;
  1975. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  1976. cmdiocbq->vport = phba->pport;
  1977. rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
  1978. (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
  1979. if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
  1980. rc = -EIO;
  1981. goto err_loopback_test_exit;
  1982. }
  1983. evt->waiting = 1;
  1984. rc = wait_event_interruptible_timeout(
  1985. evt->wq, !list_empty(&evt->events_to_see),
  1986. ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
  1987. evt->waiting = 0;
  1988. if (list_empty(&evt->events_to_see))
  1989. rc = (rc) ? -EINTR : -ETIMEDOUT;
  1990. else {
  1991. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1992. list_move(evt->events_to_see.prev, &evt->events_to_get);
  1993. evdat = list_entry(evt->events_to_get.prev,
  1994. typeof(*evdat), node);
  1995. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1996. rx_databuf = evdat->data;
  1997. if (evdat->len != full_size) {
  1998. lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
  1999. "1603 Loopback test did not receive expected "
  2000. "data length. actual length 0x%x expected "
  2001. "length 0x%x\n",
  2002. evdat->len, full_size);
  2003. rc = -EIO;
  2004. } else if (rx_databuf == NULL)
  2005. rc = -EIO;
  2006. else {
  2007. rc = IOCB_SUCCESS;
  2008. /* skip over elx loopback header */
  2009. rx_databuf += ELX_LOOPBACK_HEADER_SZ;
  2010. job->reply->reply_payload_rcv_len =
  2011. sg_copy_from_buffer(job->reply_payload.sg_list,
  2012. job->reply_payload.sg_cnt,
  2013. rx_databuf, size);
  2014. job->reply->reply_payload_rcv_len = size;
  2015. }
  2016. }
  2017. err_loopback_test_exit:
  2018. lpfcdiag_loop_self_unreg(phba, rpi);
  2019. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2020. lpfc_bsg_event_unref(evt); /* release ref */
  2021. lpfc_bsg_event_unref(evt); /* delete */
  2022. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2023. if (cmdiocbq != NULL)
  2024. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2025. if (rspiocbq != NULL)
  2026. lpfc_sli_release_iocbq(phba, rspiocbq);
  2027. if (txbmp != NULL) {
  2028. if (txbpl != NULL) {
  2029. if (txbuffer != NULL)
  2030. diag_cmd_data_free(phba, txbuffer);
  2031. lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
  2032. }
  2033. kfree(txbmp);
  2034. }
  2035. loopback_test_exit:
  2036. kfree(dataout);
  2037. /* make error code available to userspace */
  2038. job->reply->result = rc;
  2039. job->dd_data = NULL;
  2040. /* complete the job back to userspace if no error */
  2041. if (rc == 0)
  2042. job->job_done(job);
  2043. return rc;
  2044. }
  2045. /**
  2046. * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
  2047. * @job: GET_DFC_REV fc_bsg_job
  2048. **/
  2049. static int
  2050. lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
  2051. {
  2052. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2053. struct lpfc_hba *phba = vport->phba;
  2054. struct get_mgmt_rev *event_req;
  2055. struct get_mgmt_rev_reply *event_reply;
  2056. int rc = 0;
  2057. if (job->request_len <
  2058. sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
  2059. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2060. "2740 Received GET_DFC_REV request below "
  2061. "minimum size\n");
  2062. rc = -EINVAL;
  2063. goto job_error;
  2064. }
  2065. event_req = (struct get_mgmt_rev *)
  2066. job->request->rqst_data.h_vendor.vendor_cmd;
  2067. event_reply = (struct get_mgmt_rev_reply *)
  2068. job->reply->reply_data.vendor_reply.vendor_rsp;
  2069. if (job->reply_len <
  2070. sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
  2071. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2072. "2741 Received GET_DFC_REV reply below "
  2073. "minimum size\n");
  2074. rc = -EINVAL;
  2075. goto job_error;
  2076. }
  2077. event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
  2078. event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
  2079. job_error:
  2080. job->reply->result = rc;
  2081. if (rc == 0)
  2082. job->job_done(job);
  2083. return rc;
  2084. }
  2085. /**
  2086. * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
  2087. * @phba: Pointer to HBA context object.
  2088. * @pmboxq: Pointer to mailbox command.
  2089. *
  2090. * This is completion handler function for mailbox commands issued from
  2091. * lpfc_bsg_issue_mbox function. This function is called by the
  2092. * mailbox event handler function with no lock held. This function
  2093. * will wake up thread waiting on the wait queue pointed by context1
  2094. * of the mailbox.
  2095. **/
  2096. void
  2097. lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  2098. {
  2099. struct bsg_job_data *dd_data;
  2100. struct fc_bsg_job *job;
  2101. uint32_t size;
  2102. unsigned long flags;
  2103. uint8_t *to;
  2104. uint8_t *from;
  2105. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2106. dd_data = pmboxq->context1;
  2107. /* job already timed out? */
  2108. if (!dd_data) {
  2109. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2110. return;
  2111. }
  2112. /* build the outgoing buffer to do an sg copy
  2113. * the format is the response mailbox followed by any extended
  2114. * mailbox data
  2115. */
  2116. from = (uint8_t *)&pmboxq->u.mb;
  2117. to = (uint8_t *)dd_data->context_un.mbox.mb;
  2118. memcpy(to, from, sizeof(MAILBOX_t));
  2119. /* copy the extended data if any, count is in words */
  2120. if (dd_data->context_un.mbox.outWxtWLen) {
  2121. from = (uint8_t *)dd_data->context_un.mbox.ext;
  2122. to += sizeof(MAILBOX_t);
  2123. memcpy(to, from,
  2124. dd_data->context_un.mbox.outWxtWLen * sizeof(uint32_t));
  2125. }
  2126. from = (uint8_t *)dd_data->context_un.mbox.mb;
  2127. job = dd_data->context_un.mbox.set_job;
  2128. size = job->reply_payload.payload_len;
  2129. job->reply->reply_payload_rcv_len =
  2130. sg_copy_from_buffer(job->reply_payload.sg_list,
  2131. job->reply_payload.sg_cnt,
  2132. from, size);
  2133. job->reply->result = 0;
  2134. dd_data->context_un.mbox.set_job = NULL;
  2135. job->dd_data = NULL;
  2136. job->job_done(job);
  2137. /* need to hold the lock until we call job done to hold off
  2138. * the timeout handler returning to the midlayer while
  2139. * we are stillprocessing the job
  2140. */
  2141. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2142. kfree(dd_data->context_un.mbox.mb);
  2143. mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
  2144. kfree(dd_data->context_un.mbox.ext);
  2145. if (dd_data->context_un.mbox.dmp) {
  2146. dma_free_coherent(&phba->pcidev->dev,
  2147. dd_data->context_un.mbox.dmp->size,
  2148. dd_data->context_un.mbox.dmp->dma.virt,
  2149. dd_data->context_un.mbox.dmp->dma.phys);
  2150. kfree(dd_data->context_un.mbox.dmp);
  2151. }
  2152. if (dd_data->context_un.mbox.rxbmp) {
  2153. lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
  2154. dd_data->context_un.mbox.rxbmp->phys);
  2155. kfree(dd_data->context_un.mbox.rxbmp);
  2156. }
  2157. kfree(dd_data);
  2158. return;
  2159. }
  2160. /**
  2161. * lpfc_bsg_check_cmd_access - test for a supported mailbox command
  2162. * @phba: Pointer to HBA context object.
  2163. * @mb: Pointer to a mailbox object.
  2164. * @vport: Pointer to a vport object.
  2165. *
  2166. * Some commands require the port to be offline, some may not be called from
  2167. * the application.
  2168. **/
  2169. static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
  2170. MAILBOX_t *mb, struct lpfc_vport *vport)
  2171. {
  2172. /* return negative error values for bsg job */
  2173. switch (mb->mbxCommand) {
  2174. /* Offline only */
  2175. case MBX_INIT_LINK:
  2176. case MBX_DOWN_LINK:
  2177. case MBX_CONFIG_LINK:
  2178. case MBX_CONFIG_RING:
  2179. case MBX_RESET_RING:
  2180. case MBX_UNREG_LOGIN:
  2181. case MBX_CLEAR_LA:
  2182. case MBX_DUMP_CONTEXT:
  2183. case MBX_RUN_DIAGS:
  2184. case MBX_RESTART:
  2185. case MBX_SET_MASK:
  2186. if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
  2187. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2188. "2743 Command 0x%x is illegal in on-line "
  2189. "state\n",
  2190. mb->mbxCommand);
  2191. return -EPERM;
  2192. }
  2193. case MBX_WRITE_NV:
  2194. case MBX_WRITE_VPARMS:
  2195. case MBX_LOAD_SM:
  2196. case MBX_READ_NV:
  2197. case MBX_READ_CONFIG:
  2198. case MBX_READ_RCONFIG:
  2199. case MBX_READ_STATUS:
  2200. case MBX_READ_XRI:
  2201. case MBX_READ_REV:
  2202. case MBX_READ_LNK_STAT:
  2203. case MBX_DUMP_MEMORY:
  2204. case MBX_DOWN_LOAD:
  2205. case MBX_UPDATE_CFG:
  2206. case MBX_KILL_BOARD:
  2207. case MBX_LOAD_AREA:
  2208. case MBX_LOAD_EXP_ROM:
  2209. case MBX_BEACON:
  2210. case MBX_DEL_LD_ENTRY:
  2211. case MBX_SET_DEBUG:
  2212. case MBX_WRITE_WWN:
  2213. case MBX_SLI4_CONFIG:
  2214. case MBX_READ_EVENT_LOG_STATUS:
  2215. case MBX_WRITE_EVENT_LOG:
  2216. case MBX_PORT_CAPABILITIES:
  2217. case MBX_PORT_IOV_CONTROL:
  2218. case MBX_RUN_BIU_DIAG64:
  2219. break;
  2220. case MBX_SET_VARIABLE:
  2221. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  2222. "1226 mbox: set_variable 0x%x, 0x%x\n",
  2223. mb->un.varWords[0],
  2224. mb->un.varWords[1]);
  2225. if ((mb->un.varWords[0] == SETVAR_MLOMNT)
  2226. && (mb->un.varWords[1] == 1)) {
  2227. phba->wait_4_mlo_maint_flg = 1;
  2228. } else if (mb->un.varWords[0] == SETVAR_MLORST) {
  2229. phba->link_flag &= ~LS_LOOPBACK_MODE;
  2230. phba->fc_topology = TOPOLOGY_PT_PT;
  2231. }
  2232. break;
  2233. case MBX_READ_EVENT_LOG:
  2234. case MBX_READ_SPARM64:
  2235. case MBX_READ_LA:
  2236. case MBX_READ_LA64:
  2237. case MBX_REG_LOGIN:
  2238. case MBX_REG_LOGIN64:
  2239. case MBX_CONFIG_PORT:
  2240. case MBX_RUN_BIU_DIAG:
  2241. default:
  2242. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2243. "2742 Unknown Command 0x%x\n",
  2244. mb->mbxCommand);
  2245. return -EPERM;
  2246. }
  2247. return 0; /* ok */
  2248. }
  2249. /**
  2250. * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
  2251. * @phba: Pointer to HBA context object.
  2252. * @mb: Pointer to a mailbox object.
  2253. * @vport: Pointer to a vport object.
  2254. *
  2255. * Allocate a tracking object, mailbox command memory, get a mailbox
  2256. * from the mailbox pool, copy the caller mailbox command.
  2257. *
  2258. * If offline and the sli is active we need to poll for the command (port is
  2259. * being reset) and com-plete the job, otherwise issue the mailbox command and
  2260. * let our completion handler finish the command.
  2261. **/
  2262. static uint32_t
  2263. lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
  2264. struct lpfc_vport *vport)
  2265. {
  2266. LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
  2267. MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
  2268. /* a 4k buffer to hold the mb and extended data from/to the bsg */
  2269. MAILBOX_t *mb = NULL;
  2270. struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
  2271. uint32_t size;
  2272. struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
  2273. struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
  2274. struct ulp_bde64 *rxbpl = NULL;
  2275. struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
  2276. job->request->rqst_data.h_vendor.vendor_cmd;
  2277. uint8_t *ext = NULL;
  2278. int rc = 0;
  2279. uint8_t *from;
  2280. /* in case no data is transferred */
  2281. job->reply->reply_payload_rcv_len = 0;
  2282. /* check if requested extended data lengths are valid */
  2283. if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
  2284. (mbox_req->outWxtWLen > MAILBOX_EXT_SIZE)) {
  2285. rc = -ERANGE;
  2286. goto job_done;
  2287. }
  2288. /* allocate our bsg tracking structure */
  2289. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  2290. if (!dd_data) {
  2291. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2292. "2727 Failed allocation of dd_data\n");
  2293. rc = -ENOMEM;
  2294. goto job_done;
  2295. }
  2296. mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
  2297. if (!mb) {
  2298. rc = -ENOMEM;
  2299. goto job_done;
  2300. }
  2301. pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  2302. if (!pmboxq) {
  2303. rc = -ENOMEM;
  2304. goto job_done;
  2305. }
  2306. memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  2307. size = job->request_payload.payload_len;
  2308. sg_copy_to_buffer(job->request_payload.sg_list,
  2309. job->request_payload.sg_cnt,
  2310. mb, size);
  2311. rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
  2312. if (rc != 0)
  2313. goto job_done; /* must be negative */
  2314. pmb = &pmboxq->u.mb;
  2315. memcpy(pmb, mb, sizeof(*pmb));
  2316. pmb->mbxOwner = OWN_HOST;
  2317. pmboxq->vport = vport;
  2318. /* extended mailbox commands will need an extended buffer */
  2319. if (mbox_req->inExtWLen || mbox_req->outWxtWLen) {
  2320. ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
  2321. if (!ext) {
  2322. rc = -ENOMEM;
  2323. goto job_done;
  2324. }
  2325. /* any data for the device? */
  2326. if (mbox_req->inExtWLen) {
  2327. from = (uint8_t *)mb;
  2328. from += sizeof(MAILBOX_t);
  2329. memcpy((uint8_t *)ext, from,
  2330. mbox_req->inExtWLen * sizeof(uint32_t));
  2331. }
  2332. pmboxq->context2 = ext;
  2333. pmboxq->in_ext_byte_len =
  2334. mbox_req->inExtWLen *
  2335. sizeof(uint32_t);
  2336. pmboxq->out_ext_byte_len =
  2337. mbox_req->outWxtWLen *
  2338. sizeof(uint32_t);
  2339. pmboxq->mbox_offset_word =
  2340. mbox_req->mbOffset;
  2341. pmboxq->context2 = ext;
  2342. pmboxq->in_ext_byte_len =
  2343. mbox_req->inExtWLen * sizeof(uint32_t);
  2344. pmboxq->out_ext_byte_len =
  2345. mbox_req->outWxtWLen * sizeof(uint32_t);
  2346. pmboxq->mbox_offset_word = mbox_req->mbOffset;
  2347. }
  2348. /* biu diag will need a kernel buffer to transfer the data
  2349. * allocate our own buffer and setup the mailbox command to
  2350. * use ours
  2351. */
  2352. if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
  2353. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2354. if (!rxbmp) {
  2355. rc = -ENOMEM;
  2356. goto job_done;
  2357. }
  2358. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2359. INIT_LIST_HEAD(&rxbmp->list);
  2360. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2361. dmp = diag_cmd_data_alloc(phba, rxbpl, BSG_MBOX_SIZE, 0);
  2362. if (!dmp) {
  2363. rc = -ENOMEM;
  2364. goto job_done;
  2365. }
  2366. INIT_LIST_HEAD(&dmp->dma.list);
  2367. pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
  2368. putPaddrHigh(dmp->dma.phys);
  2369. pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
  2370. putPaddrLow(dmp->dma.phys);
  2371. pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
  2372. putPaddrHigh(dmp->dma.phys +
  2373. pmb->un.varBIUdiag.un.s2.
  2374. xmit_bde64.tus.f.bdeSize);
  2375. pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
  2376. putPaddrLow(dmp->dma.phys +
  2377. pmb->un.varBIUdiag.un.s2.
  2378. xmit_bde64.tus.f.bdeSize);
  2379. dd_data->context_un.mbox.rxbmp = rxbmp;
  2380. dd_data->context_un.mbox.dmp = dmp;
  2381. } else {
  2382. dd_data->context_un.mbox.rxbmp = NULL;
  2383. dd_data->context_un.mbox.dmp = NULL;
  2384. }
  2385. /* setup wake call as IOCB callback */
  2386. pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
  2387. /* setup context field to pass wait_queue pointer to wake function */
  2388. pmboxq->context1 = dd_data;
  2389. dd_data->type = TYPE_MBOX;
  2390. dd_data->context_un.mbox.pmboxq = pmboxq;
  2391. dd_data->context_un.mbox.mb = mb;
  2392. dd_data->context_un.mbox.set_job = job;
  2393. dd_data->context_un.mbox.ext = ext;
  2394. dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
  2395. dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
  2396. dd_data->context_un.mbox.outWxtWLen = mbox_req->outWxtWLen;
  2397. job->dd_data = dd_data;
  2398. if ((vport->fc_flag & FC_OFFLINE_MODE) ||
  2399. (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
  2400. rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
  2401. if (rc != MBX_SUCCESS) {
  2402. rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
  2403. goto job_done;
  2404. }
  2405. /* job finished, copy the data */
  2406. memcpy(mb, pmb, sizeof(*pmb));
  2407. job->reply->reply_payload_rcv_len =
  2408. sg_copy_from_buffer(job->reply_payload.sg_list,
  2409. job->reply_payload.sg_cnt,
  2410. mb, size);
  2411. /* not waiting mbox already done */
  2412. rc = 0;
  2413. goto job_done;
  2414. }
  2415. rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
  2416. if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
  2417. return 1; /* job started */
  2418. job_done:
  2419. /* common exit for error or job completed inline */
  2420. kfree(mb);
  2421. if (pmboxq)
  2422. mempool_free(pmboxq, phba->mbox_mem_pool);
  2423. kfree(ext);
  2424. if (dmp) {
  2425. dma_free_coherent(&phba->pcidev->dev,
  2426. dmp->size, dmp->dma.virt,
  2427. dmp->dma.phys);
  2428. kfree(dmp);
  2429. }
  2430. if (rxbmp) {
  2431. lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
  2432. kfree(rxbmp);
  2433. }
  2434. kfree(dd_data);
  2435. return rc;
  2436. }
  2437. /**
  2438. * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
  2439. * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
  2440. **/
  2441. static int
  2442. lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
  2443. {
  2444. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2445. struct lpfc_hba *phba = vport->phba;
  2446. int rc = 0;
  2447. /* in case no data is transferred */
  2448. job->reply->reply_payload_rcv_len = 0;
  2449. if (job->request_len <
  2450. sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
  2451. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2452. "2737 Received MBOX_REQ request below "
  2453. "minimum size\n");
  2454. rc = -EINVAL;
  2455. goto job_error;
  2456. }
  2457. if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
  2458. rc = -EINVAL;
  2459. goto job_error;
  2460. }
  2461. if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
  2462. rc = -EINVAL;
  2463. goto job_error;
  2464. }
  2465. if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
  2466. rc = -EAGAIN;
  2467. goto job_error;
  2468. }
  2469. rc = lpfc_bsg_issue_mbox(phba, job, vport);
  2470. job_error:
  2471. if (rc == 0) {
  2472. /* job done */
  2473. job->reply->result = 0;
  2474. job->dd_data = NULL;
  2475. job->job_done(job);
  2476. } else if (rc == 1)
  2477. /* job submitted, will complete later*/
  2478. rc = 0; /* return zero, no error */
  2479. else {
  2480. /* some error occurred */
  2481. job->reply->result = rc;
  2482. job->dd_data = NULL;
  2483. }
  2484. return rc;
  2485. }
  2486. /**
  2487. * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
  2488. * @phba: Pointer to HBA context object.
  2489. * @cmdiocbq: Pointer to command iocb.
  2490. * @rspiocbq: Pointer to response iocb.
  2491. *
  2492. * This function is the completion handler for iocbs issued using
  2493. * lpfc_menlo_cmd function. This function is called by the
  2494. * ring event handler function without any lock held. This function
  2495. * can be called from both worker thread context and interrupt
  2496. * context. This function also can be called from another thread which
  2497. * cleans up the SLI layer objects.
  2498. * This function copies the contents of the response iocb to the
  2499. * response iocb memory object provided by the caller of
  2500. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  2501. * sleeps for the iocb completion.
  2502. **/
  2503. static void
  2504. lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
  2505. struct lpfc_iocbq *cmdiocbq,
  2506. struct lpfc_iocbq *rspiocbq)
  2507. {
  2508. struct bsg_job_data *dd_data;
  2509. struct fc_bsg_job *job;
  2510. IOCB_t *rsp;
  2511. struct lpfc_dmabuf *bmp;
  2512. struct lpfc_bsg_menlo *menlo;
  2513. unsigned long flags;
  2514. struct menlo_response *menlo_resp;
  2515. int rc = 0;
  2516. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2517. dd_data = cmdiocbq->context1;
  2518. if (!dd_data) {
  2519. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2520. return;
  2521. }
  2522. menlo = &dd_data->context_un.menlo;
  2523. job = menlo->set_job;
  2524. job->dd_data = NULL; /* so timeout handler does not reply */
  2525. spin_lock_irqsave(&phba->hbalock, flags);
  2526. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  2527. if (cmdiocbq->context2 && rspiocbq)
  2528. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  2529. &rspiocbq->iocb, sizeof(IOCB_t));
  2530. spin_unlock_irqrestore(&phba->hbalock, flags);
  2531. bmp = menlo->bmp;
  2532. rspiocbq = menlo->rspiocbq;
  2533. rsp = &rspiocbq->iocb;
  2534. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  2535. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2536. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  2537. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2538. /* always return the xri, this would be used in the case
  2539. * of a menlo download to allow the data to be sent as a continuation
  2540. * of the exchange.
  2541. */
  2542. menlo_resp = (struct menlo_response *)
  2543. job->reply->reply_data.vendor_reply.vendor_rsp;
  2544. menlo_resp->xri = rsp->ulpContext;
  2545. if (rsp->ulpStatus) {
  2546. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  2547. switch (rsp->un.ulpWord[4] & 0xff) {
  2548. case IOERR_SEQUENCE_TIMEOUT:
  2549. rc = -ETIMEDOUT;
  2550. break;
  2551. case IOERR_INVALID_RPI:
  2552. rc = -EFAULT;
  2553. break;
  2554. default:
  2555. rc = -EACCES;
  2556. break;
  2557. }
  2558. } else
  2559. rc = -EACCES;
  2560. } else
  2561. job->reply->reply_payload_rcv_len =
  2562. rsp->un.genreq64.bdl.bdeSize;
  2563. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  2564. lpfc_sli_release_iocbq(phba, rspiocbq);
  2565. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2566. kfree(bmp);
  2567. kfree(dd_data);
  2568. /* make error code available to userspace */
  2569. job->reply->result = rc;
  2570. /* complete the job back to userspace */
  2571. job->job_done(job);
  2572. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2573. return;
  2574. }
  2575. /**
  2576. * lpfc_menlo_cmd - send an ioctl for menlo hardware
  2577. * @job: fc_bsg_job to handle
  2578. *
  2579. * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
  2580. * all the command completions will return the xri for the command.
  2581. * For menlo data requests a gen request 64 CX is used to continue the exchange
  2582. * supplied in the menlo request header xri field.
  2583. **/
  2584. static int
  2585. lpfc_menlo_cmd(struct fc_bsg_job *job)
  2586. {
  2587. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2588. struct lpfc_hba *phba = vport->phba;
  2589. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  2590. IOCB_t *cmd, *rsp;
  2591. int rc = 0;
  2592. struct menlo_command *menlo_cmd;
  2593. struct menlo_response *menlo_resp;
  2594. struct lpfc_dmabuf *bmp = NULL;
  2595. int request_nseg;
  2596. int reply_nseg;
  2597. struct scatterlist *sgel = NULL;
  2598. int numbde;
  2599. dma_addr_t busaddr;
  2600. struct bsg_job_data *dd_data;
  2601. struct ulp_bde64 *bpl = NULL;
  2602. /* in case no data is returned return just the return code */
  2603. job->reply->reply_payload_rcv_len = 0;
  2604. if (job->request_len <
  2605. sizeof(struct fc_bsg_request) +
  2606. sizeof(struct menlo_command)) {
  2607. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2608. "2784 Received MENLO_CMD request below "
  2609. "minimum size\n");
  2610. rc = -ERANGE;
  2611. goto no_dd_data;
  2612. }
  2613. if (job->reply_len <
  2614. sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
  2615. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2616. "2785 Received MENLO_CMD reply below "
  2617. "minimum size\n");
  2618. rc = -ERANGE;
  2619. goto no_dd_data;
  2620. }
  2621. if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
  2622. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2623. "2786 Adapter does not support menlo "
  2624. "commands\n");
  2625. rc = -EPERM;
  2626. goto no_dd_data;
  2627. }
  2628. menlo_cmd = (struct menlo_command *)
  2629. job->request->rqst_data.h_vendor.vendor_cmd;
  2630. menlo_resp = (struct menlo_response *)
  2631. job->reply->reply_data.vendor_reply.vendor_rsp;
  2632. /* allocate our bsg tracking structure */
  2633. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  2634. if (!dd_data) {
  2635. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2636. "2787 Failed allocation of dd_data\n");
  2637. rc = -ENOMEM;
  2638. goto no_dd_data;
  2639. }
  2640. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2641. if (!bmp) {
  2642. rc = -ENOMEM;
  2643. goto free_dd;
  2644. }
  2645. cmdiocbq = lpfc_sli_get_iocbq(phba);
  2646. if (!cmdiocbq) {
  2647. rc = -ENOMEM;
  2648. goto free_bmp;
  2649. }
  2650. rspiocbq = lpfc_sli_get_iocbq(phba);
  2651. if (!rspiocbq) {
  2652. rc = -ENOMEM;
  2653. goto free_cmdiocbq;
  2654. }
  2655. rsp = &rspiocbq->iocb;
  2656. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  2657. if (!bmp->virt) {
  2658. rc = -ENOMEM;
  2659. goto free_rspiocbq;
  2660. }
  2661. INIT_LIST_HEAD(&bmp->list);
  2662. bpl = (struct ulp_bde64 *) bmp->virt;
  2663. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  2664. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2665. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  2666. busaddr = sg_dma_address(sgel);
  2667. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  2668. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  2669. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  2670. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  2671. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  2672. bpl++;
  2673. }
  2674. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  2675. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2676. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  2677. busaddr = sg_dma_address(sgel);
  2678. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  2679. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  2680. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  2681. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  2682. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  2683. bpl++;
  2684. }
  2685. cmd = &cmdiocbq->iocb;
  2686. cmd->un.genreq64.bdl.ulpIoTag32 = 0;
  2687. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  2688. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  2689. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  2690. cmd->un.genreq64.bdl.bdeSize =
  2691. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  2692. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  2693. cmd->un.genreq64.w5.hcsw.Dfctl = 0;
  2694. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
  2695. cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
  2696. cmd->ulpBdeCount = 1;
  2697. cmd->ulpClass = CLASS3;
  2698. cmd->ulpOwner = OWN_CHIP;
  2699. cmd->ulpLe = 1; /* Limited Edition */
  2700. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  2701. cmdiocbq->vport = phba->pport;
  2702. /* We want the firmware to timeout before we do */
  2703. cmd->ulpTimeout = MENLO_TIMEOUT - 5;
  2704. cmdiocbq->context3 = bmp;
  2705. cmdiocbq->context2 = rspiocbq;
  2706. cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
  2707. cmdiocbq->context1 = dd_data;
  2708. cmdiocbq->context2 = rspiocbq;
  2709. if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
  2710. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  2711. cmd->ulpPU = MENLO_PU; /* 3 */
  2712. cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
  2713. cmd->ulpContext = MENLO_CONTEXT; /* 0 */
  2714. } else {
  2715. cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
  2716. cmd->ulpPU = 1;
  2717. cmd->un.ulpWord[4] = 0;
  2718. cmd->ulpContext = menlo_cmd->xri;
  2719. }
  2720. dd_data->type = TYPE_MENLO;
  2721. dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
  2722. dd_data->context_un.menlo.rspiocbq = rspiocbq;
  2723. dd_data->context_un.menlo.set_job = job;
  2724. dd_data->context_un.menlo.bmp = bmp;
  2725. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
  2726. MENLO_TIMEOUT - 5);
  2727. if (rc == IOCB_SUCCESS)
  2728. return 0; /* done for now */
  2729. /* iocb failed so cleanup */
  2730. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  2731. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2732. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  2733. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2734. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  2735. free_rspiocbq:
  2736. lpfc_sli_release_iocbq(phba, rspiocbq);
  2737. free_cmdiocbq:
  2738. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2739. free_bmp:
  2740. kfree(bmp);
  2741. free_dd:
  2742. kfree(dd_data);
  2743. no_dd_data:
  2744. /* make error code available to userspace */
  2745. job->reply->result = rc;
  2746. job->dd_data = NULL;
  2747. return rc;
  2748. }
  2749. /**
  2750. * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  2751. * @job: fc_bsg_job to handle
  2752. **/
  2753. static int
  2754. lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
  2755. {
  2756. int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
  2757. int rc;
  2758. switch (command) {
  2759. case LPFC_BSG_VENDOR_SET_CT_EVENT:
  2760. rc = lpfc_bsg_hba_set_event(job);
  2761. break;
  2762. case LPFC_BSG_VENDOR_GET_CT_EVENT:
  2763. rc = lpfc_bsg_hba_get_event(job);
  2764. break;
  2765. case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
  2766. rc = lpfc_bsg_send_mgmt_rsp(job);
  2767. break;
  2768. case LPFC_BSG_VENDOR_DIAG_MODE:
  2769. rc = lpfc_bsg_diag_mode(job);
  2770. break;
  2771. case LPFC_BSG_VENDOR_DIAG_TEST:
  2772. rc = lpfc_bsg_diag_test(job);
  2773. break;
  2774. case LPFC_BSG_VENDOR_GET_MGMT_REV:
  2775. rc = lpfc_bsg_get_dfc_rev(job);
  2776. break;
  2777. case LPFC_BSG_VENDOR_MBOX:
  2778. rc = lpfc_bsg_mbox_cmd(job);
  2779. break;
  2780. case LPFC_BSG_VENDOR_MENLO_CMD:
  2781. case LPFC_BSG_VENDOR_MENLO_DATA:
  2782. rc = lpfc_menlo_cmd(job);
  2783. break;
  2784. default:
  2785. rc = -EINVAL;
  2786. job->reply->reply_payload_rcv_len = 0;
  2787. /* make error code available to userspace */
  2788. job->reply->result = rc;
  2789. break;
  2790. }
  2791. return rc;
  2792. }
  2793. /**
  2794. * lpfc_bsg_request - handle a bsg request from the FC transport
  2795. * @job: fc_bsg_job to handle
  2796. **/
  2797. int
  2798. lpfc_bsg_request(struct fc_bsg_job *job)
  2799. {
  2800. uint32_t msgcode;
  2801. int rc;
  2802. msgcode = job->request->msgcode;
  2803. switch (msgcode) {
  2804. case FC_BSG_HST_VENDOR:
  2805. rc = lpfc_bsg_hst_vendor(job);
  2806. break;
  2807. case FC_BSG_RPT_ELS:
  2808. rc = lpfc_bsg_rport_els(job);
  2809. break;
  2810. case FC_BSG_RPT_CT:
  2811. rc = lpfc_bsg_send_mgmt_cmd(job);
  2812. break;
  2813. default:
  2814. rc = -EINVAL;
  2815. job->reply->reply_payload_rcv_len = 0;
  2816. /* make error code available to userspace */
  2817. job->reply->result = rc;
  2818. break;
  2819. }
  2820. return rc;
  2821. }
  2822. /**
  2823. * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
  2824. * @job: fc_bsg_job that has timed out
  2825. *
  2826. * This function just aborts the job's IOCB. The aborted IOCB will return to
  2827. * the waiting function which will handle passing the error back to userspace
  2828. **/
  2829. int
  2830. lpfc_bsg_timeout(struct fc_bsg_job *job)
  2831. {
  2832. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2833. struct lpfc_hba *phba = vport->phba;
  2834. struct lpfc_iocbq *cmdiocb;
  2835. struct lpfc_bsg_event *evt;
  2836. struct lpfc_bsg_iocb *iocb;
  2837. struct lpfc_bsg_mbox *mbox;
  2838. struct lpfc_bsg_menlo *menlo;
  2839. struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
  2840. struct bsg_job_data *dd_data;
  2841. unsigned long flags;
  2842. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2843. dd_data = (struct bsg_job_data *)job->dd_data;
  2844. /* timeout and completion crossed paths if no dd_data */
  2845. if (!dd_data) {
  2846. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2847. return 0;
  2848. }
  2849. switch (dd_data->type) {
  2850. case TYPE_IOCB:
  2851. iocb = &dd_data->context_un.iocb;
  2852. cmdiocb = iocb->cmdiocbq;
  2853. /* hint to completion handler that the job timed out */
  2854. job->reply->result = -EAGAIN;
  2855. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2856. /* this will call our completion handler */
  2857. spin_lock_irq(&phba->hbalock);
  2858. lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
  2859. spin_unlock_irq(&phba->hbalock);
  2860. break;
  2861. case TYPE_EVT:
  2862. evt = dd_data->context_un.evt;
  2863. /* this event has no job anymore */
  2864. evt->set_job = NULL;
  2865. job->dd_data = NULL;
  2866. job->reply->reply_payload_rcv_len = 0;
  2867. /* Return -EAGAIN which is our way of signallying the
  2868. * app to retry.
  2869. */
  2870. job->reply->result = -EAGAIN;
  2871. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2872. job->job_done(job);
  2873. break;
  2874. case TYPE_MBOX:
  2875. mbox = &dd_data->context_un.mbox;
  2876. /* this mbox has no job anymore */
  2877. mbox->set_job = NULL;
  2878. job->dd_data = NULL;
  2879. job->reply->reply_payload_rcv_len = 0;
  2880. job->reply->result = -EAGAIN;
  2881. /* the mbox completion handler can now be run */
  2882. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2883. job->job_done(job);
  2884. break;
  2885. case TYPE_MENLO:
  2886. menlo = &dd_data->context_un.menlo;
  2887. cmdiocb = menlo->cmdiocbq;
  2888. /* hint to completion handler that the job timed out */
  2889. job->reply->result = -EAGAIN;
  2890. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2891. /* this will call our completion handler */
  2892. spin_lock_irq(&phba->hbalock);
  2893. lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
  2894. spin_unlock_irq(&phba->hbalock);
  2895. break;
  2896. default:
  2897. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2898. break;
  2899. }
  2900. /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
  2901. * otherwise an error message will be displayed on the console
  2902. * so always return success (zero)
  2903. */
  2904. return 0;
  2905. }