lpfc_bsg.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2009-2010 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * *
  8. * This program is free software; you can redistribute it and/or *
  9. * modify it under the terms of version 2 of the GNU General *
  10. * Public License as published by the Free Software Foundation. *
  11. * This program is distributed in the hope that it will be useful. *
  12. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  13. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  14. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  15. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  16. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  17. * more details, a copy of which can be found in the file COPYING *
  18. * included with this package. *
  19. *******************************************************************/
  20. #include <linux/interrupt.h>
  21. #include <linux/mempool.h>
  22. #include <linux/pci.h>
  23. #include <linux/slab.h>
  24. #include <linux/delay.h>
  25. #include <scsi/scsi.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_transport_fc.h>
  28. #include <scsi/scsi_bsg_fc.h>
  29. #include <scsi/fc/fc_fs.h>
  30. #include "lpfc_hw4.h"
  31. #include "lpfc_hw.h"
  32. #include "lpfc_sli.h"
  33. #include "lpfc_sli4.h"
  34. #include "lpfc_nl.h"
  35. #include "lpfc_bsg.h"
  36. #include "lpfc_disc.h"
  37. #include "lpfc_scsi.h"
  38. #include "lpfc.h"
  39. #include "lpfc_logmsg.h"
  40. #include "lpfc_crtn.h"
  41. #include "lpfc_vport.h"
  42. #include "lpfc_version.h"
  43. struct lpfc_bsg_event {
  44. struct list_head node;
  45. struct kref kref;
  46. wait_queue_head_t wq;
  47. /* Event type and waiter identifiers */
  48. uint32_t type_mask;
  49. uint32_t req_id;
  50. uint32_t reg_id;
  51. /* next two flags are here for the auto-delete logic */
  52. unsigned long wait_time_stamp;
  53. int waiting;
  54. /* seen and not seen events */
  55. struct list_head events_to_get;
  56. struct list_head events_to_see;
  57. /* job waiting for this event to finish */
  58. struct fc_bsg_job *set_job;
  59. };
  60. struct lpfc_bsg_iocb {
  61. struct lpfc_iocbq *cmdiocbq;
  62. struct lpfc_iocbq *rspiocbq;
  63. struct lpfc_dmabuf *bmp;
  64. struct lpfc_nodelist *ndlp;
  65. /* job waiting for this iocb to finish */
  66. struct fc_bsg_job *set_job;
  67. };
  68. struct lpfc_bsg_mbox {
  69. LPFC_MBOXQ_t *pmboxq;
  70. MAILBOX_t *mb;
  71. struct lpfc_dmabuf *rxbmp; /* for BIU diags */
  72. struct lpfc_dmabufext *dmp; /* for BIU diags */
  73. uint8_t *ext; /* extended mailbox data */
  74. uint32_t mbOffset; /* from app */
  75. uint32_t inExtWLen; /* from app */
  76. uint32_t outExtWLen; /* from app */
  77. /* job waiting for this mbox command to finish */
  78. struct fc_bsg_job *set_job;
  79. };
  80. #define MENLO_DID 0x0000FC0E
  81. struct lpfc_bsg_menlo {
  82. struct lpfc_iocbq *cmdiocbq;
  83. struct lpfc_iocbq *rspiocbq;
  84. struct lpfc_dmabuf *bmp;
  85. /* job waiting for this iocb to finish */
  86. struct fc_bsg_job *set_job;
  87. };
  88. #define TYPE_EVT 1
  89. #define TYPE_IOCB 2
  90. #define TYPE_MBOX 3
  91. #define TYPE_MENLO 4
  92. struct bsg_job_data {
  93. uint32_t type;
  94. union {
  95. struct lpfc_bsg_event *evt;
  96. struct lpfc_bsg_iocb iocb;
  97. struct lpfc_bsg_mbox mbox;
  98. struct lpfc_bsg_menlo menlo;
  99. } context_un;
  100. };
  101. struct event_data {
  102. struct list_head node;
  103. uint32_t type;
  104. uint32_t immed_dat;
  105. void *data;
  106. uint32_t len;
  107. };
  108. #define BUF_SZ_4K 4096
  109. #define SLI_CT_ELX_LOOPBACK 0x10
  110. enum ELX_LOOPBACK_CMD {
  111. ELX_LOOPBACK_XRI_SETUP,
  112. ELX_LOOPBACK_DATA,
  113. };
  114. #define ELX_LOOPBACK_HEADER_SZ \
  115. (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
  116. struct lpfc_dmabufext {
  117. struct lpfc_dmabuf dma;
  118. uint32_t size;
  119. uint32_t flag;
  120. };
  121. /**
  122. * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
  123. * @phba: Pointer to HBA context object.
  124. * @cmdiocbq: Pointer to command iocb.
  125. * @rspiocbq: Pointer to response iocb.
  126. *
  127. * This function is the completion handler for iocbs issued using
  128. * lpfc_bsg_send_mgmt_cmd function. This function is called by the
  129. * ring event handler function without any lock held. This function
  130. * can be called from both worker thread context and interrupt
  131. * context. This function also can be called from another thread which
  132. * cleans up the SLI layer objects.
  133. * This function copies the contents of the response iocb to the
  134. * response iocb memory object provided by the caller of
  135. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  136. * sleeps for the iocb completion.
  137. **/
  138. static void
  139. lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
  140. struct lpfc_iocbq *cmdiocbq,
  141. struct lpfc_iocbq *rspiocbq)
  142. {
  143. unsigned long iflags;
  144. struct bsg_job_data *dd_data;
  145. struct fc_bsg_job *job;
  146. IOCB_t *rsp;
  147. struct lpfc_dmabuf *bmp;
  148. struct lpfc_nodelist *ndlp;
  149. struct lpfc_bsg_iocb *iocb;
  150. unsigned long flags;
  151. int rc = 0;
  152. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  153. dd_data = cmdiocbq->context1;
  154. if (!dd_data) {
  155. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  156. return;
  157. }
  158. iocb = &dd_data->context_un.iocb;
  159. job = iocb->set_job;
  160. job->dd_data = NULL; /* so timeout handler does not reply */
  161. spin_lock_irqsave(&phba->hbalock, iflags);
  162. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  163. if (cmdiocbq->context2 && rspiocbq)
  164. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  165. &rspiocbq->iocb, sizeof(IOCB_t));
  166. spin_unlock_irqrestore(&phba->hbalock, iflags);
  167. bmp = iocb->bmp;
  168. rspiocbq = iocb->rspiocbq;
  169. rsp = &rspiocbq->iocb;
  170. ndlp = iocb->ndlp;
  171. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  172. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  173. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  174. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  175. if (rsp->ulpStatus) {
  176. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  177. switch (rsp->un.ulpWord[4] & 0xff) {
  178. case IOERR_SEQUENCE_TIMEOUT:
  179. rc = -ETIMEDOUT;
  180. break;
  181. case IOERR_INVALID_RPI:
  182. rc = -EFAULT;
  183. break;
  184. default:
  185. rc = -EACCES;
  186. break;
  187. }
  188. } else
  189. rc = -EACCES;
  190. } else
  191. job->reply->reply_payload_rcv_len =
  192. rsp->un.genreq64.bdl.bdeSize;
  193. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  194. lpfc_sli_release_iocbq(phba, rspiocbq);
  195. lpfc_sli_release_iocbq(phba, cmdiocbq);
  196. lpfc_nlp_put(ndlp);
  197. kfree(bmp);
  198. kfree(dd_data);
  199. /* make error code available to userspace */
  200. job->reply->result = rc;
  201. /* complete the job back to userspace */
  202. job->job_done(job);
  203. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  204. return;
  205. }
  206. /**
  207. * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
  208. * @job: fc_bsg_job to handle
  209. **/
  210. static int
  211. lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
  212. {
  213. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  214. struct lpfc_hba *phba = vport->phba;
  215. struct lpfc_rport_data *rdata = job->rport->dd_data;
  216. struct lpfc_nodelist *ndlp = rdata->pnode;
  217. struct ulp_bde64 *bpl = NULL;
  218. uint32_t timeout;
  219. struct lpfc_iocbq *cmdiocbq = NULL;
  220. struct lpfc_iocbq *rspiocbq = NULL;
  221. IOCB_t *cmd;
  222. IOCB_t *rsp;
  223. struct lpfc_dmabuf *bmp = NULL;
  224. int request_nseg;
  225. int reply_nseg;
  226. struct scatterlist *sgel = NULL;
  227. int numbde;
  228. dma_addr_t busaddr;
  229. struct bsg_job_data *dd_data;
  230. uint32_t creg_val;
  231. int rc = 0;
  232. /* in case no data is transferred */
  233. job->reply->reply_payload_rcv_len = 0;
  234. /* allocate our bsg tracking structure */
  235. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  236. if (!dd_data) {
  237. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  238. "2733 Failed allocation of dd_data\n");
  239. rc = -ENOMEM;
  240. goto no_dd_data;
  241. }
  242. if (!lpfc_nlp_get(ndlp)) {
  243. rc = -ENODEV;
  244. goto no_ndlp;
  245. }
  246. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  247. if (!bmp) {
  248. rc = -ENOMEM;
  249. goto free_ndlp;
  250. }
  251. if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
  252. rc = -ENODEV;
  253. goto free_bmp;
  254. }
  255. cmdiocbq = lpfc_sli_get_iocbq(phba);
  256. if (!cmdiocbq) {
  257. rc = -ENOMEM;
  258. goto free_bmp;
  259. }
  260. cmd = &cmdiocbq->iocb;
  261. rspiocbq = lpfc_sli_get_iocbq(phba);
  262. if (!rspiocbq) {
  263. rc = -ENOMEM;
  264. goto free_cmdiocbq;
  265. }
  266. rsp = &rspiocbq->iocb;
  267. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  268. if (!bmp->virt) {
  269. rc = -ENOMEM;
  270. goto free_rspiocbq;
  271. }
  272. INIT_LIST_HEAD(&bmp->list);
  273. bpl = (struct ulp_bde64 *) bmp->virt;
  274. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  275. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  276. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  277. busaddr = sg_dma_address(sgel);
  278. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  279. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  280. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  281. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  282. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  283. bpl++;
  284. }
  285. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  286. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  287. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  288. busaddr = sg_dma_address(sgel);
  289. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  290. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  291. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  292. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  293. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  294. bpl++;
  295. }
  296. cmd->un.genreq64.bdl.ulpIoTag32 = 0;
  297. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  298. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  299. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  300. cmd->un.genreq64.bdl.bdeSize =
  301. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  302. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  303. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  304. cmd->un.genreq64.w5.hcsw.Dfctl = 0;
  305. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  306. cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
  307. cmd->ulpBdeCount = 1;
  308. cmd->ulpLe = 1;
  309. cmd->ulpClass = CLASS3;
  310. cmd->ulpContext = ndlp->nlp_rpi;
  311. cmd->ulpOwner = OWN_CHIP;
  312. cmdiocbq->vport = phba->pport;
  313. cmdiocbq->context3 = bmp;
  314. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  315. timeout = phba->fc_ratov * 2;
  316. cmd->ulpTimeout = timeout;
  317. cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
  318. cmdiocbq->context1 = dd_data;
  319. cmdiocbq->context2 = rspiocbq;
  320. dd_data->type = TYPE_IOCB;
  321. dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
  322. dd_data->context_un.iocb.rspiocbq = rspiocbq;
  323. dd_data->context_un.iocb.set_job = job;
  324. dd_data->context_un.iocb.bmp = bmp;
  325. dd_data->context_un.iocb.ndlp = ndlp;
  326. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  327. creg_val = readl(phba->HCregaddr);
  328. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  329. writel(creg_val, phba->HCregaddr);
  330. readl(phba->HCregaddr); /* flush */
  331. }
  332. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  333. if (rc == IOCB_SUCCESS)
  334. return 0; /* done for now */
  335. /* iocb failed so cleanup */
  336. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  337. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  338. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  339. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  340. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  341. free_rspiocbq:
  342. lpfc_sli_release_iocbq(phba, rspiocbq);
  343. free_cmdiocbq:
  344. lpfc_sli_release_iocbq(phba, cmdiocbq);
  345. free_bmp:
  346. kfree(bmp);
  347. free_ndlp:
  348. lpfc_nlp_put(ndlp);
  349. no_ndlp:
  350. kfree(dd_data);
  351. no_dd_data:
  352. /* make error code available to userspace */
  353. job->reply->result = rc;
  354. job->dd_data = NULL;
  355. return rc;
  356. }
  357. /**
  358. * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
  359. * @phba: Pointer to HBA context object.
  360. * @cmdiocbq: Pointer to command iocb.
  361. * @rspiocbq: Pointer to response iocb.
  362. *
  363. * This function is the completion handler for iocbs issued using
  364. * lpfc_bsg_rport_els_cmp function. This function is called by the
  365. * ring event handler function without any lock held. This function
  366. * can be called from both worker thread context and interrupt
  367. * context. This function also can be called from other thread which
  368. * cleans up the SLI layer objects.
  369. * This function copies the contents of the response iocb to the
  370. * response iocb memory object provided by the caller of
  371. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  372. * sleeps for the iocb completion.
  373. **/
  374. static void
  375. lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
  376. struct lpfc_iocbq *cmdiocbq,
  377. struct lpfc_iocbq *rspiocbq)
  378. {
  379. struct bsg_job_data *dd_data;
  380. struct fc_bsg_job *job;
  381. IOCB_t *rsp;
  382. struct lpfc_nodelist *ndlp;
  383. struct lpfc_dmabuf *pbuflist = NULL;
  384. struct fc_bsg_ctels_reply *els_reply;
  385. uint8_t *rjt_data;
  386. unsigned long flags;
  387. int rc = 0;
  388. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  389. dd_data = cmdiocbq->context1;
  390. /* normal completion and timeout crossed paths, already done */
  391. if (!dd_data) {
  392. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  393. return;
  394. }
  395. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  396. if (cmdiocbq->context2 && rspiocbq)
  397. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  398. &rspiocbq->iocb, sizeof(IOCB_t));
  399. job = dd_data->context_un.iocb.set_job;
  400. cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
  401. rspiocbq = dd_data->context_un.iocb.rspiocbq;
  402. rsp = &rspiocbq->iocb;
  403. ndlp = dd_data->context_un.iocb.ndlp;
  404. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  405. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  406. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  407. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  408. if (job->reply->result == -EAGAIN)
  409. rc = -EAGAIN;
  410. else if (rsp->ulpStatus == IOSTAT_SUCCESS)
  411. job->reply->reply_payload_rcv_len =
  412. rsp->un.elsreq64.bdl.bdeSize;
  413. else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
  414. job->reply->reply_payload_rcv_len =
  415. sizeof(struct fc_bsg_ctels_reply);
  416. /* LS_RJT data returned in word 4 */
  417. rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
  418. els_reply = &job->reply->reply_data.ctels_reply;
  419. els_reply->status = FC_CTELS_STATUS_REJECT;
  420. els_reply->rjt_data.action = rjt_data[3];
  421. els_reply->rjt_data.reason_code = rjt_data[2];
  422. els_reply->rjt_data.reason_explanation = rjt_data[1];
  423. els_reply->rjt_data.vendor_unique = rjt_data[0];
  424. } else
  425. rc = -EIO;
  426. pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
  427. lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
  428. lpfc_sli_release_iocbq(phba, rspiocbq);
  429. lpfc_sli_release_iocbq(phba, cmdiocbq);
  430. lpfc_nlp_put(ndlp);
  431. kfree(dd_data);
  432. /* make error code available to userspace */
  433. job->reply->result = rc;
  434. job->dd_data = NULL;
  435. /* complete the job back to userspace */
  436. job->job_done(job);
  437. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  438. return;
  439. }
  440. /**
  441. * lpfc_bsg_rport_els - send an ELS command from a bsg request
  442. * @job: fc_bsg_job to handle
  443. **/
  444. static int
  445. lpfc_bsg_rport_els(struct fc_bsg_job *job)
  446. {
  447. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  448. struct lpfc_hba *phba = vport->phba;
  449. struct lpfc_rport_data *rdata = job->rport->dd_data;
  450. struct lpfc_nodelist *ndlp = rdata->pnode;
  451. uint32_t elscmd;
  452. uint32_t cmdsize;
  453. uint32_t rspsize;
  454. struct lpfc_iocbq *rspiocbq;
  455. struct lpfc_iocbq *cmdiocbq;
  456. IOCB_t *rsp;
  457. uint16_t rpi = 0;
  458. struct lpfc_dmabuf *pcmd;
  459. struct lpfc_dmabuf *prsp;
  460. struct lpfc_dmabuf *pbuflist = NULL;
  461. struct ulp_bde64 *bpl;
  462. int request_nseg;
  463. int reply_nseg;
  464. struct scatterlist *sgel = NULL;
  465. int numbde;
  466. dma_addr_t busaddr;
  467. struct bsg_job_data *dd_data;
  468. uint32_t creg_val;
  469. int rc = 0;
  470. /* in case no data is transferred */
  471. job->reply->reply_payload_rcv_len = 0;
  472. /* allocate our bsg tracking structure */
  473. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  474. if (!dd_data) {
  475. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  476. "2735 Failed allocation of dd_data\n");
  477. rc = -ENOMEM;
  478. goto no_dd_data;
  479. }
  480. if (!lpfc_nlp_get(ndlp)) {
  481. rc = -ENODEV;
  482. goto free_dd_data;
  483. }
  484. elscmd = job->request->rqst_data.r_els.els_code;
  485. cmdsize = job->request_payload.payload_len;
  486. rspsize = job->reply_payload.payload_len;
  487. rspiocbq = lpfc_sli_get_iocbq(phba);
  488. if (!rspiocbq) {
  489. lpfc_nlp_put(ndlp);
  490. rc = -ENOMEM;
  491. goto free_dd_data;
  492. }
  493. rsp = &rspiocbq->iocb;
  494. rpi = ndlp->nlp_rpi;
  495. cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
  496. ndlp->nlp_DID, elscmd);
  497. if (!cmdiocbq) {
  498. rc = -EIO;
  499. goto free_rspiocbq;
  500. }
  501. /* prep els iocb set context1 to the ndlp, context2 to the command
  502. * dmabuf, context3 holds the data dmabuf
  503. */
  504. pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
  505. prsp = (struct lpfc_dmabuf *) pcmd->list.next;
  506. lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  507. kfree(pcmd);
  508. lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
  509. kfree(prsp);
  510. cmdiocbq->context2 = NULL;
  511. pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
  512. bpl = (struct ulp_bde64 *) pbuflist->virt;
  513. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  514. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  515. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  516. busaddr = sg_dma_address(sgel);
  517. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  518. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  519. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  520. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  521. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  522. bpl++;
  523. }
  524. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  525. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  526. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  527. busaddr = sg_dma_address(sgel);
  528. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  529. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  530. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  531. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  532. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  533. bpl++;
  534. }
  535. cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
  536. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  537. cmdiocbq->iocb.ulpContext = rpi;
  538. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  539. cmdiocbq->context1 = NULL;
  540. cmdiocbq->context2 = NULL;
  541. cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
  542. cmdiocbq->context1 = dd_data;
  543. cmdiocbq->context2 = rspiocbq;
  544. dd_data->type = TYPE_IOCB;
  545. dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
  546. dd_data->context_un.iocb.rspiocbq = rspiocbq;
  547. dd_data->context_un.iocb.set_job = job;
  548. dd_data->context_un.iocb.bmp = NULL;;
  549. dd_data->context_un.iocb.ndlp = ndlp;
  550. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  551. creg_val = readl(phba->HCregaddr);
  552. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  553. writel(creg_val, phba->HCregaddr);
  554. readl(phba->HCregaddr); /* flush */
  555. }
  556. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  557. lpfc_nlp_put(ndlp);
  558. if (rc == IOCB_SUCCESS)
  559. return 0; /* done for now */
  560. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  561. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  562. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  563. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  564. lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
  565. lpfc_sli_release_iocbq(phba, cmdiocbq);
  566. free_rspiocbq:
  567. lpfc_sli_release_iocbq(phba, rspiocbq);
  568. free_dd_data:
  569. kfree(dd_data);
  570. no_dd_data:
  571. /* make error code available to userspace */
  572. job->reply->result = rc;
  573. job->dd_data = NULL;
  574. return rc;
  575. }
  576. /**
  577. * lpfc_bsg_event_free - frees an allocated event structure
  578. * @kref: Pointer to a kref.
  579. *
  580. * Called from kref_put. Back cast the kref into an event structure address.
  581. * Free any events to get, delete associated nodes, free any events to see,
  582. * free any data then free the event itself.
  583. **/
  584. static void
  585. lpfc_bsg_event_free(struct kref *kref)
  586. {
  587. struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
  588. kref);
  589. struct event_data *ed;
  590. list_del(&evt->node);
  591. while (!list_empty(&evt->events_to_get)) {
  592. ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
  593. list_del(&ed->node);
  594. kfree(ed->data);
  595. kfree(ed);
  596. }
  597. while (!list_empty(&evt->events_to_see)) {
  598. ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
  599. list_del(&ed->node);
  600. kfree(ed->data);
  601. kfree(ed);
  602. }
  603. kfree(evt);
  604. }
  605. /**
  606. * lpfc_bsg_event_ref - increments the kref for an event
  607. * @evt: Pointer to an event structure.
  608. **/
  609. static inline void
  610. lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
  611. {
  612. kref_get(&evt->kref);
  613. }
  614. /**
  615. * lpfc_bsg_event_unref - Uses kref_put to free an event structure
  616. * @evt: Pointer to an event structure.
  617. **/
  618. static inline void
  619. lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
  620. {
  621. kref_put(&evt->kref, lpfc_bsg_event_free);
  622. }
  623. /**
  624. * lpfc_bsg_event_new - allocate and initialize a event structure
  625. * @ev_mask: Mask of events.
  626. * @ev_reg_id: Event reg id.
  627. * @ev_req_id: Event request id.
  628. **/
  629. static struct lpfc_bsg_event *
  630. lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
  631. {
  632. struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
  633. if (!evt)
  634. return NULL;
  635. INIT_LIST_HEAD(&evt->events_to_get);
  636. INIT_LIST_HEAD(&evt->events_to_see);
  637. evt->type_mask = ev_mask;
  638. evt->req_id = ev_req_id;
  639. evt->reg_id = ev_reg_id;
  640. evt->wait_time_stamp = jiffies;
  641. init_waitqueue_head(&evt->wq);
  642. kref_init(&evt->kref);
  643. return evt;
  644. }
  645. /**
  646. * diag_cmd_data_free - Frees an lpfc dma buffer extension
  647. * @phba: Pointer to HBA context object.
  648. * @mlist: Pointer to an lpfc dma buffer extension.
  649. **/
  650. static int
  651. diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
  652. {
  653. struct lpfc_dmabufext *mlast;
  654. struct pci_dev *pcidev;
  655. struct list_head head, *curr, *next;
  656. if ((!mlist) || (!lpfc_is_link_up(phba) &&
  657. (phba->link_flag & LS_LOOPBACK_MODE))) {
  658. return 0;
  659. }
  660. pcidev = phba->pcidev;
  661. list_add_tail(&head, &mlist->dma.list);
  662. list_for_each_safe(curr, next, &head) {
  663. mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
  664. if (mlast->dma.virt)
  665. dma_free_coherent(&pcidev->dev,
  666. mlast->size,
  667. mlast->dma.virt,
  668. mlast->dma.phys);
  669. kfree(mlast);
  670. }
  671. return 0;
  672. }
  673. /**
  674. * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
  675. * @phba:
  676. * @pring:
  677. * @piocbq:
  678. *
  679. * This function is called when an unsolicited CT command is received. It
  680. * forwards the event to any processes registered to receive CT events.
  681. **/
  682. int
  683. lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  684. struct lpfc_iocbq *piocbq)
  685. {
  686. uint32_t evt_req_id = 0;
  687. uint32_t cmd;
  688. uint32_t len;
  689. struct lpfc_dmabuf *dmabuf = NULL;
  690. struct lpfc_bsg_event *evt;
  691. struct event_data *evt_dat = NULL;
  692. struct lpfc_iocbq *iocbq;
  693. size_t offset = 0;
  694. struct list_head head;
  695. struct ulp_bde64 *bde;
  696. dma_addr_t dma_addr;
  697. int i;
  698. struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
  699. struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
  700. struct lpfc_hbq_entry *hbqe;
  701. struct lpfc_sli_ct_request *ct_req;
  702. struct fc_bsg_job *job = NULL;
  703. unsigned long flags;
  704. int size = 0;
  705. INIT_LIST_HEAD(&head);
  706. list_add_tail(&head, &piocbq->list);
  707. if (piocbq->iocb.ulpBdeCount == 0 ||
  708. piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
  709. goto error_ct_unsol_exit;
  710. if (phba->link_state == LPFC_HBA_ERROR ||
  711. (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
  712. goto error_ct_unsol_exit;
  713. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  714. dmabuf = bdeBuf1;
  715. else {
  716. dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
  717. piocbq->iocb.un.cont64[0].addrLow);
  718. dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
  719. }
  720. if (dmabuf == NULL)
  721. goto error_ct_unsol_exit;
  722. ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
  723. evt_req_id = ct_req->FsType;
  724. cmd = ct_req->CommandResponse.bits.CmdRsp;
  725. len = ct_req->CommandResponse.bits.Size;
  726. if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
  727. lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
  728. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  729. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  730. if (!(evt->type_mask & FC_REG_CT_EVENT) ||
  731. evt->req_id != evt_req_id)
  732. continue;
  733. lpfc_bsg_event_ref(evt);
  734. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  735. evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
  736. if (evt_dat == NULL) {
  737. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  738. lpfc_bsg_event_unref(evt);
  739. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  740. "2614 Memory allocation failed for "
  741. "CT event\n");
  742. break;
  743. }
  744. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  745. /* take accumulated byte count from the last iocbq */
  746. iocbq = list_entry(head.prev, typeof(*iocbq), list);
  747. evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
  748. } else {
  749. list_for_each_entry(iocbq, &head, list) {
  750. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
  751. evt_dat->len +=
  752. iocbq->iocb.un.cont64[i].tus.f.bdeSize;
  753. }
  754. }
  755. evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
  756. if (evt_dat->data == NULL) {
  757. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  758. "2615 Memory allocation failed for "
  759. "CT event data, size %d\n",
  760. evt_dat->len);
  761. kfree(evt_dat);
  762. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  763. lpfc_bsg_event_unref(evt);
  764. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  765. goto error_ct_unsol_exit;
  766. }
  767. list_for_each_entry(iocbq, &head, list) {
  768. size = 0;
  769. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  770. bdeBuf1 = iocbq->context2;
  771. bdeBuf2 = iocbq->context3;
  772. }
  773. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
  774. if (phba->sli3_options &
  775. LPFC_SLI3_HBQ_ENABLED) {
  776. if (i == 0) {
  777. hbqe = (struct lpfc_hbq_entry *)
  778. &iocbq->iocb.un.ulpWord[0];
  779. size = hbqe->bde.tus.f.bdeSize;
  780. dmabuf = bdeBuf1;
  781. } else if (i == 1) {
  782. hbqe = (struct lpfc_hbq_entry *)
  783. &iocbq->iocb.unsli3.
  784. sli3Words[4];
  785. size = hbqe->bde.tus.f.bdeSize;
  786. dmabuf = bdeBuf2;
  787. }
  788. if ((offset + size) > evt_dat->len)
  789. size = evt_dat->len - offset;
  790. } else {
  791. size = iocbq->iocb.un.cont64[i].
  792. tus.f.bdeSize;
  793. bde = &iocbq->iocb.un.cont64[i];
  794. dma_addr = getPaddr(bde->addrHigh,
  795. bde->addrLow);
  796. dmabuf = lpfc_sli_ringpostbuf_get(phba,
  797. pring, dma_addr);
  798. }
  799. if (!dmabuf) {
  800. lpfc_printf_log(phba, KERN_ERR,
  801. LOG_LIBDFC, "2616 No dmabuf "
  802. "found for iocbq 0x%p\n",
  803. iocbq);
  804. kfree(evt_dat->data);
  805. kfree(evt_dat);
  806. spin_lock_irqsave(&phba->ct_ev_lock,
  807. flags);
  808. lpfc_bsg_event_unref(evt);
  809. spin_unlock_irqrestore(
  810. &phba->ct_ev_lock, flags);
  811. goto error_ct_unsol_exit;
  812. }
  813. memcpy((char *)(evt_dat->data) + offset,
  814. dmabuf->virt, size);
  815. offset += size;
  816. if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
  817. !(phba->sli3_options &
  818. LPFC_SLI3_HBQ_ENABLED)) {
  819. lpfc_sli_ringpostbuf_put(phba, pring,
  820. dmabuf);
  821. } else {
  822. switch (cmd) {
  823. case ELX_LOOPBACK_DATA:
  824. diag_cmd_data_free(phba,
  825. (struct lpfc_dmabufext *)
  826. dmabuf);
  827. break;
  828. case ELX_LOOPBACK_XRI_SETUP:
  829. if ((phba->sli_rev ==
  830. LPFC_SLI_REV2) ||
  831. (phba->sli3_options &
  832. LPFC_SLI3_HBQ_ENABLED
  833. )) {
  834. lpfc_in_buf_free(phba,
  835. dmabuf);
  836. } else {
  837. lpfc_post_buffer(phba,
  838. pring,
  839. 1);
  840. }
  841. break;
  842. default:
  843. if (!(phba->sli3_options &
  844. LPFC_SLI3_HBQ_ENABLED))
  845. lpfc_post_buffer(phba,
  846. pring,
  847. 1);
  848. break;
  849. }
  850. }
  851. }
  852. }
  853. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  854. if (phba->sli_rev == LPFC_SLI_REV4) {
  855. evt_dat->immed_dat = phba->ctx_idx;
  856. phba->ctx_idx = (phba->ctx_idx + 1) % 64;
  857. phba->ct_ctx[evt_dat->immed_dat].oxid =
  858. piocbq->iocb.ulpContext;
  859. phba->ct_ctx[evt_dat->immed_dat].SID =
  860. piocbq->iocb.un.rcvels.remoteID;
  861. } else
  862. evt_dat->immed_dat = piocbq->iocb.ulpContext;
  863. evt_dat->type = FC_REG_CT_EVENT;
  864. list_add(&evt_dat->node, &evt->events_to_see);
  865. if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
  866. wake_up_interruptible(&evt->wq);
  867. lpfc_bsg_event_unref(evt);
  868. break;
  869. }
  870. list_move(evt->events_to_see.prev, &evt->events_to_get);
  871. lpfc_bsg_event_unref(evt);
  872. job = evt->set_job;
  873. evt->set_job = NULL;
  874. if (job) {
  875. job->reply->reply_payload_rcv_len = size;
  876. /* make error code available to userspace */
  877. job->reply->result = 0;
  878. job->dd_data = NULL;
  879. /* complete the job back to userspace */
  880. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  881. job->job_done(job);
  882. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  883. }
  884. }
  885. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  886. error_ct_unsol_exit:
  887. if (!list_empty(&head))
  888. list_del(&head);
  889. if (evt_req_id == SLI_CT_ELX_LOOPBACK)
  890. return 0;
  891. return 1;
  892. }
  893. /**
  894. * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
  895. * @job: SET_EVENT fc_bsg_job
  896. **/
  897. static int
  898. lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
  899. {
  900. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  901. struct lpfc_hba *phba = vport->phba;
  902. struct set_ct_event *event_req;
  903. struct lpfc_bsg_event *evt;
  904. int rc = 0;
  905. struct bsg_job_data *dd_data = NULL;
  906. uint32_t ev_mask;
  907. unsigned long flags;
  908. if (job->request_len <
  909. sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
  910. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  911. "2612 Received SET_CT_EVENT below minimum "
  912. "size\n");
  913. rc = -EINVAL;
  914. goto job_error;
  915. }
  916. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  917. if (dd_data == NULL) {
  918. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  919. "2734 Failed allocation of dd_data\n");
  920. rc = -ENOMEM;
  921. goto job_error;
  922. }
  923. event_req = (struct set_ct_event *)
  924. job->request->rqst_data.h_vendor.vendor_cmd;
  925. ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
  926. FC_REG_EVENT_MASK);
  927. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  928. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  929. if (evt->reg_id == event_req->ev_reg_id) {
  930. lpfc_bsg_event_ref(evt);
  931. evt->wait_time_stamp = jiffies;
  932. break;
  933. }
  934. }
  935. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  936. if (&evt->node == &phba->ct_ev_waiters) {
  937. /* no event waiting struct yet - first call */
  938. evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
  939. event_req->ev_req_id);
  940. if (!evt) {
  941. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  942. "2617 Failed allocation of event "
  943. "waiter\n");
  944. rc = -ENOMEM;
  945. goto job_error;
  946. }
  947. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  948. list_add(&evt->node, &phba->ct_ev_waiters);
  949. lpfc_bsg_event_ref(evt);
  950. evt->wait_time_stamp = jiffies;
  951. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  952. }
  953. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  954. evt->waiting = 1;
  955. dd_data->type = TYPE_EVT;
  956. dd_data->context_un.evt = evt;
  957. evt->set_job = job; /* for unsolicited command */
  958. job->dd_data = dd_data; /* for fc transport timeout callback*/
  959. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  960. return 0; /* call job done later */
  961. job_error:
  962. if (dd_data != NULL)
  963. kfree(dd_data);
  964. job->dd_data = NULL;
  965. return rc;
  966. }
  967. /**
  968. * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
  969. * @job: GET_EVENT fc_bsg_job
  970. **/
  971. static int
  972. lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
  973. {
  974. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  975. struct lpfc_hba *phba = vport->phba;
  976. struct get_ct_event *event_req;
  977. struct get_ct_event_reply *event_reply;
  978. struct lpfc_bsg_event *evt;
  979. struct event_data *evt_dat = NULL;
  980. unsigned long flags;
  981. uint32_t rc = 0;
  982. if (job->request_len <
  983. sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
  984. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  985. "2613 Received GET_CT_EVENT request below "
  986. "minimum size\n");
  987. rc = -EINVAL;
  988. goto job_error;
  989. }
  990. event_req = (struct get_ct_event *)
  991. job->request->rqst_data.h_vendor.vendor_cmd;
  992. event_reply = (struct get_ct_event_reply *)
  993. job->reply->reply_data.vendor_reply.vendor_rsp;
  994. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  995. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  996. if (evt->reg_id == event_req->ev_reg_id) {
  997. if (list_empty(&evt->events_to_get))
  998. break;
  999. lpfc_bsg_event_ref(evt);
  1000. evt->wait_time_stamp = jiffies;
  1001. evt_dat = list_entry(evt->events_to_get.prev,
  1002. struct event_data, node);
  1003. list_del(&evt_dat->node);
  1004. break;
  1005. }
  1006. }
  1007. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1008. /* The app may continue to ask for event data until it gets
  1009. * an error indicating that there isn't anymore
  1010. */
  1011. if (evt_dat == NULL) {
  1012. job->reply->reply_payload_rcv_len = 0;
  1013. rc = -ENOENT;
  1014. goto job_error;
  1015. }
  1016. if (evt_dat->len > job->request_payload.payload_len) {
  1017. evt_dat->len = job->request_payload.payload_len;
  1018. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1019. "2618 Truncated event data at %d "
  1020. "bytes\n",
  1021. job->request_payload.payload_len);
  1022. }
  1023. event_reply->type = evt_dat->type;
  1024. event_reply->immed_data = evt_dat->immed_dat;
  1025. if (evt_dat->len > 0)
  1026. job->reply->reply_payload_rcv_len =
  1027. sg_copy_from_buffer(job->request_payload.sg_list,
  1028. job->request_payload.sg_cnt,
  1029. evt_dat->data, evt_dat->len);
  1030. else
  1031. job->reply->reply_payload_rcv_len = 0;
  1032. if (evt_dat) {
  1033. kfree(evt_dat->data);
  1034. kfree(evt_dat);
  1035. }
  1036. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1037. lpfc_bsg_event_unref(evt);
  1038. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1039. job->dd_data = NULL;
  1040. job->reply->result = 0;
  1041. job->job_done(job);
  1042. return 0;
  1043. job_error:
  1044. job->dd_data = NULL;
  1045. job->reply->result = rc;
  1046. return rc;
  1047. }
  1048. /**
  1049. * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
  1050. * @phba: Pointer to HBA context object.
  1051. * @cmdiocbq: Pointer to command iocb.
  1052. * @rspiocbq: Pointer to response iocb.
  1053. *
  1054. * This function is the completion handler for iocbs issued using
  1055. * lpfc_issue_ct_rsp_cmp function. This function is called by the
  1056. * ring event handler function without any lock held. This function
  1057. * can be called from both worker thread context and interrupt
  1058. * context. This function also can be called from other thread which
  1059. * cleans up the SLI layer objects.
  1060. * This function copy the contents of the response iocb to the
  1061. * response iocb memory object provided by the caller of
  1062. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  1063. * sleeps for the iocb completion.
  1064. **/
  1065. static void
  1066. lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
  1067. struct lpfc_iocbq *cmdiocbq,
  1068. struct lpfc_iocbq *rspiocbq)
  1069. {
  1070. struct bsg_job_data *dd_data;
  1071. struct fc_bsg_job *job;
  1072. IOCB_t *rsp;
  1073. struct lpfc_dmabuf *bmp;
  1074. struct lpfc_nodelist *ndlp;
  1075. unsigned long flags;
  1076. int rc = 0;
  1077. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1078. dd_data = cmdiocbq->context1;
  1079. /* normal completion and timeout crossed paths, already done */
  1080. if (!dd_data) {
  1081. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1082. return;
  1083. }
  1084. job = dd_data->context_un.iocb.set_job;
  1085. bmp = dd_data->context_un.iocb.bmp;
  1086. rsp = &rspiocbq->iocb;
  1087. ndlp = dd_data->context_un.iocb.ndlp;
  1088. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  1089. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1090. if (rsp->ulpStatus) {
  1091. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  1092. switch (rsp->un.ulpWord[4] & 0xff) {
  1093. case IOERR_SEQUENCE_TIMEOUT:
  1094. rc = -ETIMEDOUT;
  1095. break;
  1096. case IOERR_INVALID_RPI:
  1097. rc = -EFAULT;
  1098. break;
  1099. default:
  1100. rc = -EACCES;
  1101. break;
  1102. }
  1103. } else
  1104. rc = -EACCES;
  1105. } else
  1106. job->reply->reply_payload_rcv_len =
  1107. rsp->un.genreq64.bdl.bdeSize;
  1108. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  1109. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1110. lpfc_nlp_put(ndlp);
  1111. kfree(bmp);
  1112. kfree(dd_data);
  1113. /* make error code available to userspace */
  1114. job->reply->result = rc;
  1115. job->dd_data = NULL;
  1116. /* complete the job back to userspace */
  1117. job->job_done(job);
  1118. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1119. return;
  1120. }
  1121. /**
  1122. * lpfc_issue_ct_rsp - issue a ct response
  1123. * @phba: Pointer to HBA context object.
  1124. * @job: Pointer to the job object.
  1125. * @tag: tag index value into the ports context exchange array.
  1126. * @bmp: Pointer to a dma buffer descriptor.
  1127. * @num_entry: Number of enties in the bde.
  1128. **/
  1129. static int
  1130. lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
  1131. struct lpfc_dmabuf *bmp, int num_entry)
  1132. {
  1133. IOCB_t *icmd;
  1134. struct lpfc_iocbq *ctiocb = NULL;
  1135. int rc = 0;
  1136. struct lpfc_nodelist *ndlp = NULL;
  1137. struct bsg_job_data *dd_data;
  1138. uint32_t creg_val;
  1139. /* allocate our bsg tracking structure */
  1140. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  1141. if (!dd_data) {
  1142. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1143. "2736 Failed allocation of dd_data\n");
  1144. rc = -ENOMEM;
  1145. goto no_dd_data;
  1146. }
  1147. /* Allocate buffer for command iocb */
  1148. ctiocb = lpfc_sli_get_iocbq(phba);
  1149. if (!ctiocb) {
  1150. rc = ENOMEM;
  1151. goto no_ctiocb;
  1152. }
  1153. icmd = &ctiocb->iocb;
  1154. icmd->un.xseq64.bdl.ulpIoTag32 = 0;
  1155. icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  1156. icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
  1157. icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1158. icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
  1159. icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
  1160. icmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1161. icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
  1162. icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1163. /* Fill in rest of iocb */
  1164. icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  1165. icmd->ulpBdeCount = 1;
  1166. icmd->ulpLe = 1;
  1167. icmd->ulpClass = CLASS3;
  1168. if (phba->sli_rev == LPFC_SLI_REV4) {
  1169. /* Do not issue unsol response if oxid not marked as valid */
  1170. if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
  1171. rc = IOCB_ERROR;
  1172. goto issue_ct_rsp_exit;
  1173. }
  1174. icmd->ulpContext = phba->ct_ctx[tag].oxid;
  1175. ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
  1176. if (!ndlp) {
  1177. lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
  1178. "2721 ndlp null for oxid %x SID %x\n",
  1179. icmd->ulpContext,
  1180. phba->ct_ctx[tag].SID);
  1181. rc = IOCB_ERROR;
  1182. goto issue_ct_rsp_exit;
  1183. }
  1184. icmd->un.ulpWord[3] = ndlp->nlp_rpi;
  1185. /* The exchange is done, mark the entry as invalid */
  1186. phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
  1187. } else
  1188. icmd->ulpContext = (ushort) tag;
  1189. icmd->ulpTimeout = phba->fc_ratov * 2;
  1190. /* Xmit CT response on exchange <xid> */
  1191. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  1192. "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
  1193. icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
  1194. ctiocb->iocb_cmpl = NULL;
  1195. ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
  1196. ctiocb->vport = phba->pport;
  1197. ctiocb->context3 = bmp;
  1198. ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
  1199. ctiocb->context1 = dd_data;
  1200. ctiocb->context2 = NULL;
  1201. dd_data->type = TYPE_IOCB;
  1202. dd_data->context_un.iocb.cmdiocbq = ctiocb;
  1203. dd_data->context_un.iocb.rspiocbq = NULL;
  1204. dd_data->context_un.iocb.set_job = job;
  1205. dd_data->context_un.iocb.bmp = bmp;
  1206. dd_data->context_un.iocb.ndlp = ndlp;
  1207. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  1208. creg_val = readl(phba->HCregaddr);
  1209. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  1210. writel(creg_val, phba->HCregaddr);
  1211. readl(phba->HCregaddr); /* flush */
  1212. }
  1213. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
  1214. if (rc == IOCB_SUCCESS)
  1215. return 0; /* done for now */
  1216. issue_ct_rsp_exit:
  1217. lpfc_sli_release_iocbq(phba, ctiocb);
  1218. no_ctiocb:
  1219. kfree(dd_data);
  1220. no_dd_data:
  1221. return rc;
  1222. }
  1223. /**
  1224. * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
  1225. * @job: SEND_MGMT_RESP fc_bsg_job
  1226. **/
  1227. static int
  1228. lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
  1229. {
  1230. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1231. struct lpfc_hba *phba = vport->phba;
  1232. struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
  1233. job->request->rqst_data.h_vendor.vendor_cmd;
  1234. struct ulp_bde64 *bpl;
  1235. struct lpfc_dmabuf *bmp = NULL;
  1236. struct scatterlist *sgel = NULL;
  1237. int request_nseg;
  1238. int numbde;
  1239. dma_addr_t busaddr;
  1240. uint32_t tag = mgmt_resp->tag;
  1241. unsigned long reqbfrcnt =
  1242. (unsigned long)job->request_payload.payload_len;
  1243. int rc = 0;
  1244. /* in case no data is transferred */
  1245. job->reply->reply_payload_rcv_len = 0;
  1246. if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
  1247. rc = -ERANGE;
  1248. goto send_mgmt_rsp_exit;
  1249. }
  1250. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1251. if (!bmp) {
  1252. rc = -ENOMEM;
  1253. goto send_mgmt_rsp_exit;
  1254. }
  1255. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  1256. if (!bmp->virt) {
  1257. rc = -ENOMEM;
  1258. goto send_mgmt_rsp_free_bmp;
  1259. }
  1260. INIT_LIST_HEAD(&bmp->list);
  1261. bpl = (struct ulp_bde64 *) bmp->virt;
  1262. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  1263. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1264. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  1265. busaddr = sg_dma_address(sgel);
  1266. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  1267. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  1268. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  1269. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  1270. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  1271. bpl++;
  1272. }
  1273. rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
  1274. if (rc == IOCB_SUCCESS)
  1275. return 0; /* done for now */
  1276. /* TBD need to handle a timeout */
  1277. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  1278. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1279. rc = -EACCES;
  1280. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  1281. send_mgmt_rsp_free_bmp:
  1282. kfree(bmp);
  1283. send_mgmt_rsp_exit:
  1284. /* make error code available to userspace */
  1285. job->reply->result = rc;
  1286. job->dd_data = NULL;
  1287. return rc;
  1288. }
  1289. /**
  1290. * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
  1291. * @job: LPFC_BSG_VENDOR_DIAG_MODE
  1292. *
  1293. * This function is responsible for placing a port into diagnostic loopback
  1294. * mode in order to perform a diagnostic loopback test.
  1295. * All new scsi requests are blocked, a small delay is used to allow the
  1296. * scsi requests to complete then the link is brought down. If the link is
  1297. * is placed in loopback mode then scsi requests are again allowed
  1298. * so the scsi mid-layer doesn't give up on the port.
  1299. * All of this is done in-line.
  1300. */
  1301. static int
  1302. lpfc_bsg_diag_mode(struct fc_bsg_job *job)
  1303. {
  1304. struct Scsi_Host *shost = job->shost;
  1305. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1306. struct lpfc_hba *phba = vport->phba;
  1307. struct diag_mode_set *loopback_mode;
  1308. struct lpfc_sli *psli = &phba->sli;
  1309. struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
  1310. uint32_t link_flags;
  1311. uint32_t timeout;
  1312. struct lpfc_vport **vports;
  1313. LPFC_MBOXQ_t *pmboxq;
  1314. int mbxstatus;
  1315. int i = 0;
  1316. int rc = 0;
  1317. /* no data to return just the return code */
  1318. job->reply->reply_payload_rcv_len = 0;
  1319. if (job->request_len <
  1320. sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
  1321. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1322. "2738 Received DIAG MODE request below minimum "
  1323. "size\n");
  1324. rc = -EINVAL;
  1325. goto job_error;
  1326. }
  1327. loopback_mode = (struct diag_mode_set *)
  1328. job->request->rqst_data.h_vendor.vendor_cmd;
  1329. link_flags = loopback_mode->type;
  1330. timeout = loopback_mode->timeout;
  1331. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1332. (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
  1333. (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
  1334. rc = -EACCES;
  1335. goto job_error;
  1336. }
  1337. pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1338. if (!pmboxq) {
  1339. rc = -ENOMEM;
  1340. goto job_error;
  1341. }
  1342. vports = lpfc_create_vport_work_array(phba);
  1343. if (vports) {
  1344. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  1345. shost = lpfc_shost_from_vport(vports[i]);
  1346. scsi_block_requests(shost);
  1347. }
  1348. lpfc_destroy_vport_work_array(phba, vports);
  1349. } else {
  1350. shost = lpfc_shost_from_vport(phba->pport);
  1351. scsi_block_requests(shost);
  1352. }
  1353. while (pring->txcmplq_cnt) {
  1354. if (i++ > 500) /* wait up to 5 seconds */
  1355. break;
  1356. msleep(10);
  1357. }
  1358. memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  1359. pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
  1360. pmboxq->u.mb.mbxOwner = OWN_HOST;
  1361. mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
  1362. if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
  1363. /* wait for link down before proceeding */
  1364. i = 0;
  1365. while (phba->link_state != LPFC_LINK_DOWN) {
  1366. if (i++ > timeout) {
  1367. rc = -ETIMEDOUT;
  1368. goto loopback_mode_exit;
  1369. }
  1370. msleep(10);
  1371. }
  1372. memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  1373. if (link_flags == INTERNAL_LOOP_BACK)
  1374. pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
  1375. else
  1376. pmboxq->u.mb.un.varInitLnk.link_flags =
  1377. FLAGS_TOPOLOGY_MODE_LOOP;
  1378. pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
  1379. pmboxq->u.mb.mbxOwner = OWN_HOST;
  1380. mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
  1381. LPFC_MBOX_TMO);
  1382. if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
  1383. rc = -ENODEV;
  1384. else {
  1385. phba->link_flag |= LS_LOOPBACK_MODE;
  1386. /* wait for the link attention interrupt */
  1387. msleep(100);
  1388. i = 0;
  1389. while (phba->link_state != LPFC_HBA_READY) {
  1390. if (i++ > timeout) {
  1391. rc = -ETIMEDOUT;
  1392. break;
  1393. }
  1394. msleep(10);
  1395. }
  1396. }
  1397. } else
  1398. rc = -ENODEV;
  1399. loopback_mode_exit:
  1400. vports = lpfc_create_vport_work_array(phba);
  1401. if (vports) {
  1402. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  1403. shost = lpfc_shost_from_vport(vports[i]);
  1404. scsi_unblock_requests(shost);
  1405. }
  1406. lpfc_destroy_vport_work_array(phba, vports);
  1407. } else {
  1408. shost = lpfc_shost_from_vport(phba->pport);
  1409. scsi_unblock_requests(shost);
  1410. }
  1411. /*
  1412. * Let SLI layer release mboxq if mbox command completed after timeout.
  1413. */
  1414. if (mbxstatus != MBX_TIMEOUT)
  1415. mempool_free(pmboxq, phba->mbox_mem_pool);
  1416. job_error:
  1417. /* make error code available to userspace */
  1418. job->reply->result = rc;
  1419. /* complete the job back to userspace if no error */
  1420. if (rc == 0)
  1421. job->job_done(job);
  1422. return rc;
  1423. }
  1424. /**
  1425. * lpfcdiag_loop_self_reg - obtains a remote port login id
  1426. * @phba: Pointer to HBA context object
  1427. * @rpi: Pointer to a remote port login id
  1428. *
  1429. * This function obtains a remote port login id so the diag loopback test
  1430. * can send and receive its own unsolicited CT command.
  1431. **/
  1432. static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
  1433. {
  1434. LPFC_MBOXQ_t *mbox;
  1435. struct lpfc_dmabuf *dmabuff;
  1436. int status;
  1437. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1438. if (!mbox)
  1439. return ENOMEM;
  1440. status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
  1441. (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
  1442. if (status) {
  1443. mempool_free(mbox, phba->mbox_mem_pool);
  1444. return ENOMEM;
  1445. }
  1446. dmabuff = (struct lpfc_dmabuf *) mbox->context1;
  1447. mbox->context1 = NULL;
  1448. status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
  1449. if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
  1450. lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
  1451. kfree(dmabuff);
  1452. if (status != MBX_TIMEOUT)
  1453. mempool_free(mbox, phba->mbox_mem_pool);
  1454. return ENODEV;
  1455. }
  1456. *rpi = mbox->u.mb.un.varWords[0];
  1457. lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
  1458. kfree(dmabuff);
  1459. mempool_free(mbox, phba->mbox_mem_pool);
  1460. return 0;
  1461. }
  1462. /**
  1463. * lpfcdiag_loop_self_unreg - unregs from the rpi
  1464. * @phba: Pointer to HBA context object
  1465. * @rpi: Remote port login id
  1466. *
  1467. * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
  1468. **/
  1469. static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
  1470. {
  1471. LPFC_MBOXQ_t *mbox;
  1472. int status;
  1473. /* Allocate mboxq structure */
  1474. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1475. if (mbox == NULL)
  1476. return ENOMEM;
  1477. lpfc_unreg_login(phba, 0, rpi, mbox);
  1478. status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
  1479. if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
  1480. if (status != MBX_TIMEOUT)
  1481. mempool_free(mbox, phba->mbox_mem_pool);
  1482. return EIO;
  1483. }
  1484. mempool_free(mbox, phba->mbox_mem_pool);
  1485. return 0;
  1486. }
  1487. /**
  1488. * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
  1489. * @phba: Pointer to HBA context object
  1490. * @rpi: Remote port login id
  1491. * @txxri: Pointer to transmit exchange id
  1492. * @rxxri: Pointer to response exchabge id
  1493. *
  1494. * This function obtains the transmit and receive ids required to send
  1495. * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
  1496. * flags are used to the unsolicted response handler is able to process
  1497. * the ct command sent on the same port.
  1498. **/
  1499. static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
  1500. uint16_t *txxri, uint16_t * rxxri)
  1501. {
  1502. struct lpfc_bsg_event *evt;
  1503. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  1504. IOCB_t *cmd, *rsp;
  1505. struct lpfc_dmabuf *dmabuf;
  1506. struct ulp_bde64 *bpl = NULL;
  1507. struct lpfc_sli_ct_request *ctreq = NULL;
  1508. int ret_val = 0;
  1509. unsigned long flags;
  1510. *txxri = 0;
  1511. *rxxri = 0;
  1512. evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
  1513. SLI_CT_ELX_LOOPBACK);
  1514. if (!evt)
  1515. return ENOMEM;
  1516. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1517. list_add(&evt->node, &phba->ct_ev_waiters);
  1518. lpfc_bsg_event_ref(evt);
  1519. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1520. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1521. rspiocbq = lpfc_sli_get_iocbq(phba);
  1522. dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1523. if (dmabuf) {
  1524. dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
  1525. if (dmabuf->virt) {
  1526. INIT_LIST_HEAD(&dmabuf->list);
  1527. bpl = (struct ulp_bde64 *) dmabuf->virt;
  1528. memset(bpl, 0, sizeof(*bpl));
  1529. ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
  1530. bpl->addrHigh =
  1531. le32_to_cpu(putPaddrHigh(dmabuf->phys +
  1532. sizeof(*bpl)));
  1533. bpl->addrLow =
  1534. le32_to_cpu(putPaddrLow(dmabuf->phys +
  1535. sizeof(*bpl)));
  1536. bpl->tus.f.bdeFlags = 0;
  1537. bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
  1538. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  1539. }
  1540. }
  1541. if (cmdiocbq == NULL || rspiocbq == NULL ||
  1542. dmabuf == NULL || bpl == NULL || ctreq == NULL ||
  1543. dmabuf->virt == NULL) {
  1544. ret_val = ENOMEM;
  1545. goto err_get_xri_exit;
  1546. }
  1547. cmd = &cmdiocbq->iocb;
  1548. rsp = &rspiocbq->iocb;
  1549. memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
  1550. ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
  1551. ctreq->RevisionId.bits.InId = 0;
  1552. ctreq->FsType = SLI_CT_ELX_LOOPBACK;
  1553. ctreq->FsSubType = 0;
  1554. ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
  1555. ctreq->CommandResponse.bits.Size = 0;
  1556. cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
  1557. cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
  1558. cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1559. cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
  1560. cmd->un.xseq64.w5.hcsw.Fctl = LA;
  1561. cmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1562. cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  1563. cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1564. cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
  1565. cmd->ulpBdeCount = 1;
  1566. cmd->ulpLe = 1;
  1567. cmd->ulpClass = CLASS3;
  1568. cmd->ulpContext = rpi;
  1569. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  1570. cmdiocbq->vport = phba->pport;
  1571. ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
  1572. rspiocbq,
  1573. (phba->fc_ratov * 2)
  1574. + LPFC_DRVR_TIMEOUT);
  1575. if (ret_val)
  1576. goto err_get_xri_exit;
  1577. *txxri = rsp->ulpContext;
  1578. evt->waiting = 1;
  1579. evt->wait_time_stamp = jiffies;
  1580. ret_val = wait_event_interruptible_timeout(
  1581. evt->wq, !list_empty(&evt->events_to_see),
  1582. ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
  1583. if (list_empty(&evt->events_to_see))
  1584. ret_val = (ret_val) ? EINTR : ETIMEDOUT;
  1585. else {
  1586. ret_val = IOCB_SUCCESS;
  1587. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1588. list_move(evt->events_to_see.prev, &evt->events_to_get);
  1589. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1590. *rxxri = (list_entry(evt->events_to_get.prev,
  1591. typeof(struct event_data),
  1592. node))->immed_dat;
  1593. }
  1594. evt->waiting = 0;
  1595. err_get_xri_exit:
  1596. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1597. lpfc_bsg_event_unref(evt); /* release ref */
  1598. lpfc_bsg_event_unref(evt); /* delete */
  1599. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1600. if (dmabuf) {
  1601. if (dmabuf->virt)
  1602. lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
  1603. kfree(dmabuf);
  1604. }
  1605. if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
  1606. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1607. if (rspiocbq)
  1608. lpfc_sli_release_iocbq(phba, rspiocbq);
  1609. return ret_val;
  1610. }
  1611. /**
  1612. * diag_cmd_data_alloc - fills in a bde struct with dma buffers
  1613. * @phba: Pointer to HBA context object
  1614. * @bpl: Pointer to 64 bit bde structure
  1615. * @size: Number of bytes to process
  1616. * @nocopydata: Flag to copy user data into the allocated buffer
  1617. *
  1618. * This function allocates page size buffers and populates an lpfc_dmabufext.
  1619. * If allowed the user data pointed to with indataptr is copied into the kernel
  1620. * memory. The chained list of page size buffers is returned.
  1621. **/
  1622. static struct lpfc_dmabufext *
  1623. diag_cmd_data_alloc(struct lpfc_hba *phba,
  1624. struct ulp_bde64 *bpl, uint32_t size,
  1625. int nocopydata)
  1626. {
  1627. struct lpfc_dmabufext *mlist = NULL;
  1628. struct lpfc_dmabufext *dmp;
  1629. int cnt, offset = 0, i = 0;
  1630. struct pci_dev *pcidev;
  1631. pcidev = phba->pcidev;
  1632. while (size) {
  1633. /* We get chunks of 4K */
  1634. if (size > BUF_SZ_4K)
  1635. cnt = BUF_SZ_4K;
  1636. else
  1637. cnt = size;
  1638. /* allocate struct lpfc_dmabufext buffer header */
  1639. dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
  1640. if (!dmp)
  1641. goto out;
  1642. INIT_LIST_HEAD(&dmp->dma.list);
  1643. /* Queue it to a linked list */
  1644. if (mlist)
  1645. list_add_tail(&dmp->dma.list, &mlist->dma.list);
  1646. else
  1647. mlist = dmp;
  1648. /* allocate buffer */
  1649. dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
  1650. cnt,
  1651. &(dmp->dma.phys),
  1652. GFP_KERNEL);
  1653. if (!dmp->dma.virt)
  1654. goto out;
  1655. dmp->size = cnt;
  1656. if (nocopydata) {
  1657. bpl->tus.f.bdeFlags = 0;
  1658. pci_dma_sync_single_for_device(phba->pcidev,
  1659. dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
  1660. } else {
  1661. memset((uint8_t *)dmp->dma.virt, 0, cnt);
  1662. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  1663. }
  1664. /* build buffer ptr list for IOCB */
  1665. bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
  1666. bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
  1667. bpl->tus.f.bdeSize = (ushort) cnt;
  1668. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  1669. bpl++;
  1670. i++;
  1671. offset += cnt;
  1672. size -= cnt;
  1673. }
  1674. mlist->flag = i;
  1675. return mlist;
  1676. out:
  1677. diag_cmd_data_free(phba, mlist);
  1678. return NULL;
  1679. }
  1680. /**
  1681. * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
  1682. * @phba: Pointer to HBA context object
  1683. * @rxxri: Receive exchange id
  1684. * @len: Number of data bytes
  1685. *
  1686. * This function allocates and posts a data buffer of sufficient size to recieve
  1687. * an unsolicted CT command.
  1688. **/
  1689. static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
  1690. size_t len)
  1691. {
  1692. struct lpfc_sli *psli = &phba->sli;
  1693. struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
  1694. struct lpfc_iocbq *cmdiocbq;
  1695. IOCB_t *cmd = NULL;
  1696. struct list_head head, *curr, *next;
  1697. struct lpfc_dmabuf *rxbmp;
  1698. struct lpfc_dmabuf *dmp;
  1699. struct lpfc_dmabuf *mp[2] = {NULL, NULL};
  1700. struct ulp_bde64 *rxbpl = NULL;
  1701. uint32_t num_bde;
  1702. struct lpfc_dmabufext *rxbuffer = NULL;
  1703. int ret_val = 0;
  1704. int i = 0;
  1705. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1706. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1707. if (rxbmp != NULL) {
  1708. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  1709. if (rxbmp->virt) {
  1710. INIT_LIST_HEAD(&rxbmp->list);
  1711. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  1712. rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
  1713. }
  1714. }
  1715. if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
  1716. ret_val = ENOMEM;
  1717. goto err_post_rxbufs_exit;
  1718. }
  1719. /* Queue buffers for the receive exchange */
  1720. num_bde = (uint32_t)rxbuffer->flag;
  1721. dmp = &rxbuffer->dma;
  1722. cmd = &cmdiocbq->iocb;
  1723. i = 0;
  1724. INIT_LIST_HEAD(&head);
  1725. list_add_tail(&head, &dmp->list);
  1726. list_for_each_safe(curr, next, &head) {
  1727. mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
  1728. list_del(curr);
  1729. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  1730. mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
  1731. cmd->un.quexri64cx.buff.bde.addrHigh =
  1732. putPaddrHigh(mp[i]->phys);
  1733. cmd->un.quexri64cx.buff.bde.addrLow =
  1734. putPaddrLow(mp[i]->phys);
  1735. cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
  1736. ((struct lpfc_dmabufext *)mp[i])->size;
  1737. cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
  1738. cmd->ulpCommand = CMD_QUE_XRI64_CX;
  1739. cmd->ulpPU = 0;
  1740. cmd->ulpLe = 1;
  1741. cmd->ulpBdeCount = 1;
  1742. cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
  1743. } else {
  1744. cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
  1745. cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
  1746. cmd->un.cont64[i].tus.f.bdeSize =
  1747. ((struct lpfc_dmabufext *)mp[i])->size;
  1748. cmd->ulpBdeCount = ++i;
  1749. if ((--num_bde > 0) && (i < 2))
  1750. continue;
  1751. cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
  1752. cmd->ulpLe = 1;
  1753. }
  1754. cmd->ulpClass = CLASS3;
  1755. cmd->ulpContext = rxxri;
  1756. ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  1757. if (ret_val == IOCB_ERROR) {
  1758. diag_cmd_data_free(phba,
  1759. (struct lpfc_dmabufext *)mp[0]);
  1760. if (mp[1])
  1761. diag_cmd_data_free(phba,
  1762. (struct lpfc_dmabufext *)mp[1]);
  1763. dmp = list_entry(next, struct lpfc_dmabuf, list);
  1764. ret_val = EIO;
  1765. goto err_post_rxbufs_exit;
  1766. }
  1767. lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
  1768. if (mp[1]) {
  1769. lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
  1770. mp[1] = NULL;
  1771. }
  1772. /* The iocb was freed by lpfc_sli_issue_iocb */
  1773. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1774. if (!cmdiocbq) {
  1775. dmp = list_entry(next, struct lpfc_dmabuf, list);
  1776. ret_val = EIO;
  1777. goto err_post_rxbufs_exit;
  1778. }
  1779. cmd = &cmdiocbq->iocb;
  1780. i = 0;
  1781. }
  1782. list_del(&head);
  1783. err_post_rxbufs_exit:
  1784. if (rxbmp) {
  1785. if (rxbmp->virt)
  1786. lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
  1787. kfree(rxbmp);
  1788. }
  1789. if (cmdiocbq)
  1790. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1791. return ret_val;
  1792. }
  1793. /**
  1794. * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
  1795. * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
  1796. *
  1797. * This function receives a user data buffer to be transmitted and received on
  1798. * the same port, the link must be up and in loopback mode prior
  1799. * to being called.
  1800. * 1. A kernel buffer is allocated to copy the user data into.
  1801. * 2. The port registers with "itself".
  1802. * 3. The transmit and receive exchange ids are obtained.
  1803. * 4. The receive exchange id is posted.
  1804. * 5. A new els loopback event is created.
  1805. * 6. The command and response iocbs are allocated.
  1806. * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
  1807. *
  1808. * This function is meant to be called n times while the port is in loopback
  1809. * so it is the apps responsibility to issue a reset to take the port out
  1810. * of loopback mode.
  1811. **/
  1812. static int
  1813. lpfc_bsg_diag_test(struct fc_bsg_job *job)
  1814. {
  1815. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1816. struct lpfc_hba *phba = vport->phba;
  1817. struct diag_mode_test *diag_mode;
  1818. struct lpfc_bsg_event *evt;
  1819. struct event_data *evdat;
  1820. struct lpfc_sli *psli = &phba->sli;
  1821. uint32_t size;
  1822. uint32_t full_size;
  1823. size_t segment_len = 0, segment_offset = 0, current_offset = 0;
  1824. uint16_t rpi;
  1825. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  1826. IOCB_t *cmd, *rsp;
  1827. struct lpfc_sli_ct_request *ctreq;
  1828. struct lpfc_dmabuf *txbmp;
  1829. struct ulp_bde64 *txbpl = NULL;
  1830. struct lpfc_dmabufext *txbuffer = NULL;
  1831. struct list_head head;
  1832. struct lpfc_dmabuf *curr;
  1833. uint16_t txxri, rxxri;
  1834. uint32_t num_bde;
  1835. uint8_t *ptr = NULL, *rx_databuf = NULL;
  1836. int rc = 0;
  1837. unsigned long flags;
  1838. void *dataout = NULL;
  1839. uint32_t total_mem;
  1840. /* in case no data is returned return just the return code */
  1841. job->reply->reply_payload_rcv_len = 0;
  1842. if (job->request_len <
  1843. sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
  1844. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1845. "2739 Received DIAG TEST request below minimum "
  1846. "size\n");
  1847. rc = -EINVAL;
  1848. goto loopback_test_exit;
  1849. }
  1850. if (job->request_payload.payload_len !=
  1851. job->reply_payload.payload_len) {
  1852. rc = -EINVAL;
  1853. goto loopback_test_exit;
  1854. }
  1855. diag_mode = (struct diag_mode_test *)
  1856. job->request->rqst_data.h_vendor.vendor_cmd;
  1857. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1858. (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
  1859. (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
  1860. rc = -EACCES;
  1861. goto loopback_test_exit;
  1862. }
  1863. if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
  1864. rc = -EACCES;
  1865. goto loopback_test_exit;
  1866. }
  1867. size = job->request_payload.payload_len;
  1868. full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
  1869. if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
  1870. rc = -ERANGE;
  1871. goto loopback_test_exit;
  1872. }
  1873. if (size >= BUF_SZ_4K) {
  1874. /*
  1875. * Allocate memory for ioctl data. If buffer is bigger than 64k,
  1876. * then we allocate 64k and re-use that buffer over and over to
  1877. * xfer the whole block. This is because Linux kernel has a
  1878. * problem allocating more than 120k of kernel space memory. Saw
  1879. * problem with GET_FCPTARGETMAPPING...
  1880. */
  1881. if (size <= (64 * 1024))
  1882. total_mem = size;
  1883. else
  1884. total_mem = 64 * 1024;
  1885. } else
  1886. /* Allocate memory for ioctl data */
  1887. total_mem = BUF_SZ_4K;
  1888. dataout = kmalloc(total_mem, GFP_KERNEL);
  1889. if (dataout == NULL) {
  1890. rc = -ENOMEM;
  1891. goto loopback_test_exit;
  1892. }
  1893. ptr = dataout;
  1894. ptr += ELX_LOOPBACK_HEADER_SZ;
  1895. sg_copy_to_buffer(job->request_payload.sg_list,
  1896. job->request_payload.sg_cnt,
  1897. ptr, size);
  1898. rc = lpfcdiag_loop_self_reg(phba, &rpi);
  1899. if (rc) {
  1900. rc = -ENOMEM;
  1901. goto loopback_test_exit;
  1902. }
  1903. rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
  1904. if (rc) {
  1905. lpfcdiag_loop_self_unreg(phba, rpi);
  1906. rc = -ENOMEM;
  1907. goto loopback_test_exit;
  1908. }
  1909. rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
  1910. if (rc) {
  1911. lpfcdiag_loop_self_unreg(phba, rpi);
  1912. rc = -ENOMEM;
  1913. goto loopback_test_exit;
  1914. }
  1915. evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
  1916. SLI_CT_ELX_LOOPBACK);
  1917. if (!evt) {
  1918. lpfcdiag_loop_self_unreg(phba, rpi);
  1919. rc = -ENOMEM;
  1920. goto loopback_test_exit;
  1921. }
  1922. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1923. list_add(&evt->node, &phba->ct_ev_waiters);
  1924. lpfc_bsg_event_ref(evt);
  1925. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1926. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1927. rspiocbq = lpfc_sli_get_iocbq(phba);
  1928. txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1929. if (txbmp) {
  1930. txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
  1931. if (txbmp->virt) {
  1932. INIT_LIST_HEAD(&txbmp->list);
  1933. txbpl = (struct ulp_bde64 *) txbmp->virt;
  1934. txbuffer = diag_cmd_data_alloc(phba,
  1935. txbpl, full_size, 0);
  1936. }
  1937. }
  1938. if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
  1939. !txbmp->virt) {
  1940. rc = -ENOMEM;
  1941. goto err_loopback_test_exit;
  1942. }
  1943. cmd = &cmdiocbq->iocb;
  1944. rsp = &rspiocbq->iocb;
  1945. INIT_LIST_HEAD(&head);
  1946. list_add_tail(&head, &txbuffer->dma.list);
  1947. list_for_each_entry(curr, &head, list) {
  1948. segment_len = ((struct lpfc_dmabufext *)curr)->size;
  1949. if (current_offset == 0) {
  1950. ctreq = curr->virt;
  1951. memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
  1952. ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
  1953. ctreq->RevisionId.bits.InId = 0;
  1954. ctreq->FsType = SLI_CT_ELX_LOOPBACK;
  1955. ctreq->FsSubType = 0;
  1956. ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
  1957. ctreq->CommandResponse.bits.Size = size;
  1958. segment_offset = ELX_LOOPBACK_HEADER_SZ;
  1959. } else
  1960. segment_offset = 0;
  1961. BUG_ON(segment_offset >= segment_len);
  1962. memcpy(curr->virt + segment_offset,
  1963. ptr + current_offset,
  1964. segment_len - segment_offset);
  1965. current_offset += segment_len - segment_offset;
  1966. BUG_ON(current_offset > size);
  1967. }
  1968. list_del(&head);
  1969. /* Build the XMIT_SEQUENCE iocb */
  1970. num_bde = (uint32_t)txbuffer->flag;
  1971. cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
  1972. cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
  1973. cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1974. cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
  1975. cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
  1976. cmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1977. cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  1978. cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1979. cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  1980. cmd->ulpBdeCount = 1;
  1981. cmd->ulpLe = 1;
  1982. cmd->ulpClass = CLASS3;
  1983. cmd->ulpContext = txxri;
  1984. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  1985. cmdiocbq->vport = phba->pport;
  1986. rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
  1987. (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
  1988. if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
  1989. rc = -EIO;
  1990. goto err_loopback_test_exit;
  1991. }
  1992. evt->waiting = 1;
  1993. rc = wait_event_interruptible_timeout(
  1994. evt->wq, !list_empty(&evt->events_to_see),
  1995. ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
  1996. evt->waiting = 0;
  1997. if (list_empty(&evt->events_to_see))
  1998. rc = (rc) ? -EINTR : -ETIMEDOUT;
  1999. else {
  2000. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2001. list_move(evt->events_to_see.prev, &evt->events_to_get);
  2002. evdat = list_entry(evt->events_to_get.prev,
  2003. typeof(*evdat), node);
  2004. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2005. rx_databuf = evdat->data;
  2006. if (evdat->len != full_size) {
  2007. lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
  2008. "1603 Loopback test did not receive expected "
  2009. "data length. actual length 0x%x expected "
  2010. "length 0x%x\n",
  2011. evdat->len, full_size);
  2012. rc = -EIO;
  2013. } else if (rx_databuf == NULL)
  2014. rc = -EIO;
  2015. else {
  2016. rc = IOCB_SUCCESS;
  2017. /* skip over elx loopback header */
  2018. rx_databuf += ELX_LOOPBACK_HEADER_SZ;
  2019. job->reply->reply_payload_rcv_len =
  2020. sg_copy_from_buffer(job->reply_payload.sg_list,
  2021. job->reply_payload.sg_cnt,
  2022. rx_databuf, size);
  2023. job->reply->reply_payload_rcv_len = size;
  2024. }
  2025. }
  2026. err_loopback_test_exit:
  2027. lpfcdiag_loop_self_unreg(phba, rpi);
  2028. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2029. lpfc_bsg_event_unref(evt); /* release ref */
  2030. lpfc_bsg_event_unref(evt); /* delete */
  2031. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2032. if (cmdiocbq != NULL)
  2033. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2034. if (rspiocbq != NULL)
  2035. lpfc_sli_release_iocbq(phba, rspiocbq);
  2036. if (txbmp != NULL) {
  2037. if (txbpl != NULL) {
  2038. if (txbuffer != NULL)
  2039. diag_cmd_data_free(phba, txbuffer);
  2040. lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
  2041. }
  2042. kfree(txbmp);
  2043. }
  2044. loopback_test_exit:
  2045. kfree(dataout);
  2046. /* make error code available to userspace */
  2047. job->reply->result = rc;
  2048. job->dd_data = NULL;
  2049. /* complete the job back to userspace if no error */
  2050. if (rc == 0)
  2051. job->job_done(job);
  2052. return rc;
  2053. }
  2054. /**
  2055. * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
  2056. * @job: GET_DFC_REV fc_bsg_job
  2057. **/
  2058. static int
  2059. lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
  2060. {
  2061. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2062. struct lpfc_hba *phba = vport->phba;
  2063. struct get_mgmt_rev *event_req;
  2064. struct get_mgmt_rev_reply *event_reply;
  2065. int rc = 0;
  2066. if (job->request_len <
  2067. sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
  2068. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2069. "2740 Received GET_DFC_REV request below "
  2070. "minimum size\n");
  2071. rc = -EINVAL;
  2072. goto job_error;
  2073. }
  2074. event_req = (struct get_mgmt_rev *)
  2075. job->request->rqst_data.h_vendor.vendor_cmd;
  2076. event_reply = (struct get_mgmt_rev_reply *)
  2077. job->reply->reply_data.vendor_reply.vendor_rsp;
  2078. if (job->reply_len <
  2079. sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
  2080. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2081. "2741 Received GET_DFC_REV reply below "
  2082. "minimum size\n");
  2083. rc = -EINVAL;
  2084. goto job_error;
  2085. }
  2086. event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
  2087. event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
  2088. job_error:
  2089. job->reply->result = rc;
  2090. if (rc == 0)
  2091. job->job_done(job);
  2092. return rc;
  2093. }
  2094. /**
  2095. * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
  2096. * @phba: Pointer to HBA context object.
  2097. * @pmboxq: Pointer to mailbox command.
  2098. *
  2099. * This is completion handler function for mailbox commands issued from
  2100. * lpfc_bsg_issue_mbox function. This function is called by the
  2101. * mailbox event handler function with no lock held. This function
  2102. * will wake up thread waiting on the wait queue pointed by context1
  2103. * of the mailbox.
  2104. **/
  2105. void
  2106. lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  2107. {
  2108. struct bsg_job_data *dd_data;
  2109. struct fc_bsg_job *job;
  2110. uint32_t size;
  2111. unsigned long flags;
  2112. uint8_t *to;
  2113. uint8_t *from;
  2114. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2115. dd_data = pmboxq->context1;
  2116. /* job already timed out? */
  2117. if (!dd_data) {
  2118. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2119. return;
  2120. }
  2121. /* build the outgoing buffer to do an sg copy
  2122. * the format is the response mailbox followed by any extended
  2123. * mailbox data
  2124. */
  2125. from = (uint8_t *)&pmboxq->u.mb;
  2126. to = (uint8_t *)dd_data->context_un.mbox.mb;
  2127. memcpy(to, from, sizeof(MAILBOX_t));
  2128. if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
  2129. /* copy the extended data if any, count is in words */
  2130. if (dd_data->context_un.mbox.outExtWLen) {
  2131. from = (uint8_t *)dd_data->context_un.mbox.ext;
  2132. to += sizeof(MAILBOX_t);
  2133. size = dd_data->context_un.mbox.outExtWLen *
  2134. sizeof(uint32_t);
  2135. memcpy(to, from, size);
  2136. } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
  2137. from = (uint8_t *)dd_data->context_un.mbox.
  2138. dmp->dma.virt;
  2139. to += sizeof(MAILBOX_t);
  2140. size = dd_data->context_un.mbox.dmp->size;
  2141. memcpy(to, from, size);
  2142. } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
  2143. (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
  2144. from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
  2145. virt;
  2146. to += sizeof(MAILBOX_t);
  2147. size = pmboxq->u.mb.un.varWords[5];
  2148. memcpy(to, from, size);
  2149. } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
  2150. from = (uint8_t *)dd_data->context_un.
  2151. mbox.dmp->dma.virt;
  2152. to += sizeof(MAILBOX_t);
  2153. size = dd_data->context_un.mbox.dmp->size;
  2154. memcpy(to, from, size);
  2155. }
  2156. }
  2157. from = (uint8_t *)dd_data->context_un.mbox.mb;
  2158. job = dd_data->context_un.mbox.set_job;
  2159. size = job->reply_payload.payload_len;
  2160. job->reply->reply_payload_rcv_len =
  2161. sg_copy_from_buffer(job->reply_payload.sg_list,
  2162. job->reply_payload.sg_cnt,
  2163. from, size);
  2164. job->reply->result = 0;
  2165. dd_data->context_un.mbox.set_job = NULL;
  2166. job->dd_data = NULL;
  2167. job->job_done(job);
  2168. /* need to hold the lock until we call job done to hold off
  2169. * the timeout handler returning to the midlayer while
  2170. * we are stillprocessing the job
  2171. */
  2172. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2173. kfree(dd_data->context_un.mbox.mb);
  2174. mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
  2175. kfree(dd_data->context_un.mbox.ext);
  2176. if (dd_data->context_un.mbox.dmp) {
  2177. dma_free_coherent(&phba->pcidev->dev,
  2178. dd_data->context_un.mbox.dmp->size,
  2179. dd_data->context_un.mbox.dmp->dma.virt,
  2180. dd_data->context_un.mbox.dmp->dma.phys);
  2181. kfree(dd_data->context_un.mbox.dmp);
  2182. }
  2183. if (dd_data->context_un.mbox.rxbmp) {
  2184. lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
  2185. dd_data->context_un.mbox.rxbmp->phys);
  2186. kfree(dd_data->context_un.mbox.rxbmp);
  2187. }
  2188. kfree(dd_data);
  2189. return;
  2190. }
  2191. /**
  2192. * lpfc_bsg_check_cmd_access - test for a supported mailbox command
  2193. * @phba: Pointer to HBA context object.
  2194. * @mb: Pointer to a mailbox object.
  2195. * @vport: Pointer to a vport object.
  2196. *
  2197. * Some commands require the port to be offline, some may not be called from
  2198. * the application.
  2199. **/
  2200. static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
  2201. MAILBOX_t *mb, struct lpfc_vport *vport)
  2202. {
  2203. /* return negative error values for bsg job */
  2204. switch (mb->mbxCommand) {
  2205. /* Offline only */
  2206. case MBX_INIT_LINK:
  2207. case MBX_DOWN_LINK:
  2208. case MBX_CONFIG_LINK:
  2209. case MBX_CONFIG_RING:
  2210. case MBX_RESET_RING:
  2211. case MBX_UNREG_LOGIN:
  2212. case MBX_CLEAR_LA:
  2213. case MBX_DUMP_CONTEXT:
  2214. case MBX_RUN_DIAGS:
  2215. case MBX_RESTART:
  2216. case MBX_SET_MASK:
  2217. if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
  2218. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2219. "2743 Command 0x%x is illegal in on-line "
  2220. "state\n",
  2221. mb->mbxCommand);
  2222. return -EPERM;
  2223. }
  2224. case MBX_WRITE_NV:
  2225. case MBX_WRITE_VPARMS:
  2226. case MBX_LOAD_SM:
  2227. case MBX_READ_NV:
  2228. case MBX_READ_CONFIG:
  2229. case MBX_READ_RCONFIG:
  2230. case MBX_READ_STATUS:
  2231. case MBX_READ_XRI:
  2232. case MBX_READ_REV:
  2233. case MBX_READ_LNK_STAT:
  2234. case MBX_DUMP_MEMORY:
  2235. case MBX_DOWN_LOAD:
  2236. case MBX_UPDATE_CFG:
  2237. case MBX_KILL_BOARD:
  2238. case MBX_LOAD_AREA:
  2239. case MBX_LOAD_EXP_ROM:
  2240. case MBX_BEACON:
  2241. case MBX_DEL_LD_ENTRY:
  2242. case MBX_SET_DEBUG:
  2243. case MBX_WRITE_WWN:
  2244. case MBX_SLI4_CONFIG:
  2245. case MBX_READ_EVENT_LOG:
  2246. case MBX_READ_EVENT_LOG_STATUS:
  2247. case MBX_WRITE_EVENT_LOG:
  2248. case MBX_PORT_CAPABILITIES:
  2249. case MBX_PORT_IOV_CONTROL:
  2250. case MBX_RUN_BIU_DIAG64:
  2251. break;
  2252. case MBX_SET_VARIABLE:
  2253. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  2254. "1226 mbox: set_variable 0x%x, 0x%x\n",
  2255. mb->un.varWords[0],
  2256. mb->un.varWords[1]);
  2257. if ((mb->un.varWords[0] == SETVAR_MLOMNT)
  2258. && (mb->un.varWords[1] == 1)) {
  2259. phba->wait_4_mlo_maint_flg = 1;
  2260. } else if (mb->un.varWords[0] == SETVAR_MLORST) {
  2261. phba->link_flag &= ~LS_LOOPBACK_MODE;
  2262. phba->fc_topology = TOPOLOGY_PT_PT;
  2263. }
  2264. break;
  2265. case MBX_READ_SPARM64:
  2266. case MBX_READ_LA:
  2267. case MBX_READ_LA64:
  2268. case MBX_REG_LOGIN:
  2269. case MBX_REG_LOGIN64:
  2270. case MBX_CONFIG_PORT:
  2271. case MBX_RUN_BIU_DIAG:
  2272. default:
  2273. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2274. "2742 Unknown Command 0x%x\n",
  2275. mb->mbxCommand);
  2276. return -EPERM;
  2277. }
  2278. return 0; /* ok */
  2279. }
  2280. /**
  2281. * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
  2282. * @phba: Pointer to HBA context object.
  2283. * @mb: Pointer to a mailbox object.
  2284. * @vport: Pointer to a vport object.
  2285. *
  2286. * Allocate a tracking object, mailbox command memory, get a mailbox
  2287. * from the mailbox pool, copy the caller mailbox command.
  2288. *
  2289. * If offline and the sli is active we need to poll for the command (port is
  2290. * being reset) and com-plete the job, otherwise issue the mailbox command and
  2291. * let our completion handler finish the command.
  2292. **/
  2293. static uint32_t
  2294. lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
  2295. struct lpfc_vport *vport)
  2296. {
  2297. LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
  2298. MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
  2299. /* a 4k buffer to hold the mb and extended data from/to the bsg */
  2300. MAILBOX_t *mb = NULL;
  2301. struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
  2302. uint32_t size;
  2303. struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
  2304. struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
  2305. struct ulp_bde64 *rxbpl = NULL;
  2306. struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
  2307. job->request->rqst_data.h_vendor.vendor_cmd;
  2308. uint8_t *ext = NULL;
  2309. int rc = 0;
  2310. uint8_t *from;
  2311. /* in case no data is transferred */
  2312. job->reply->reply_payload_rcv_len = 0;
  2313. /* check if requested extended data lengths are valid */
  2314. if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
  2315. (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
  2316. rc = -ERANGE;
  2317. goto job_done;
  2318. }
  2319. /* allocate our bsg tracking structure */
  2320. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  2321. if (!dd_data) {
  2322. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2323. "2727 Failed allocation of dd_data\n");
  2324. rc = -ENOMEM;
  2325. goto job_done;
  2326. }
  2327. mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
  2328. if (!mb) {
  2329. rc = -ENOMEM;
  2330. goto job_done;
  2331. }
  2332. pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  2333. if (!pmboxq) {
  2334. rc = -ENOMEM;
  2335. goto job_done;
  2336. }
  2337. memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  2338. size = job->request_payload.payload_len;
  2339. sg_copy_to_buffer(job->request_payload.sg_list,
  2340. job->request_payload.sg_cnt,
  2341. mb, size);
  2342. rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
  2343. if (rc != 0)
  2344. goto job_done; /* must be negative */
  2345. pmb = &pmboxq->u.mb;
  2346. memcpy(pmb, mb, sizeof(*pmb));
  2347. pmb->mbxOwner = OWN_HOST;
  2348. pmboxq->vport = vport;
  2349. /* If HBA encountered an error attention, allow only DUMP
  2350. * or RESTART mailbox commands until the HBA is restarted.
  2351. */
  2352. if (phba->pport->stopped &&
  2353. pmb->mbxCommand != MBX_DUMP_MEMORY &&
  2354. pmb->mbxCommand != MBX_RESTART &&
  2355. pmb->mbxCommand != MBX_WRITE_VPARMS &&
  2356. pmb->mbxCommand != MBX_WRITE_WWN)
  2357. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
  2358. "2797 mbox: Issued mailbox cmd "
  2359. "0x%x while in stopped state.\n",
  2360. pmb->mbxCommand);
  2361. /* Don't allow mailbox commands to be sent when blocked
  2362. * or when in the middle of discovery
  2363. */
  2364. if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
  2365. rc = -EAGAIN;
  2366. goto job_done;
  2367. }
  2368. /* extended mailbox commands will need an extended buffer */
  2369. if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
  2370. ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
  2371. if (!ext) {
  2372. rc = -ENOMEM;
  2373. goto job_done;
  2374. }
  2375. /* any data for the device? */
  2376. if (mbox_req->inExtWLen) {
  2377. from = (uint8_t *)mb;
  2378. from += sizeof(MAILBOX_t);
  2379. memcpy((uint8_t *)ext, from,
  2380. mbox_req->inExtWLen * sizeof(uint32_t));
  2381. }
  2382. pmboxq->context2 = ext;
  2383. pmboxq->in_ext_byte_len =
  2384. mbox_req->inExtWLen *
  2385. sizeof(uint32_t);
  2386. pmboxq->out_ext_byte_len =
  2387. mbox_req->outExtWLen *
  2388. sizeof(uint32_t);
  2389. pmboxq->mbox_offset_word =
  2390. mbox_req->mbOffset;
  2391. pmboxq->context2 = ext;
  2392. pmboxq->in_ext_byte_len =
  2393. mbox_req->inExtWLen * sizeof(uint32_t);
  2394. pmboxq->out_ext_byte_len =
  2395. mbox_req->outExtWLen * sizeof(uint32_t);
  2396. pmboxq->mbox_offset_word = mbox_req->mbOffset;
  2397. }
  2398. /* biu diag will need a kernel buffer to transfer the data
  2399. * allocate our own buffer and setup the mailbox command to
  2400. * use ours
  2401. */
  2402. if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
  2403. uint32_t transmit_length = pmb->un.varWords[1];
  2404. uint32_t receive_length = pmb->un.varWords[4];
  2405. /* transmit length cannot be greater than receive length or
  2406. * mailbox extension size
  2407. */
  2408. if ((transmit_length > receive_length) ||
  2409. (transmit_length > MAILBOX_EXT_SIZE)) {
  2410. rc = -ERANGE;
  2411. goto job_done;
  2412. }
  2413. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2414. if (!rxbmp) {
  2415. rc = -ENOMEM;
  2416. goto job_done;
  2417. }
  2418. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2419. if (!rxbmp->virt) {
  2420. rc = -ENOMEM;
  2421. goto job_done;
  2422. }
  2423. INIT_LIST_HEAD(&rxbmp->list);
  2424. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2425. dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
  2426. if (!dmp) {
  2427. rc = -ENOMEM;
  2428. goto job_done;
  2429. }
  2430. INIT_LIST_HEAD(&dmp->dma.list);
  2431. pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
  2432. putPaddrHigh(dmp->dma.phys);
  2433. pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
  2434. putPaddrLow(dmp->dma.phys);
  2435. pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
  2436. putPaddrHigh(dmp->dma.phys +
  2437. pmb->un.varBIUdiag.un.s2.
  2438. xmit_bde64.tus.f.bdeSize);
  2439. pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
  2440. putPaddrLow(dmp->dma.phys +
  2441. pmb->un.varBIUdiag.un.s2.
  2442. xmit_bde64.tus.f.bdeSize);
  2443. /* copy the transmit data found in the mailbox extension area */
  2444. from = (uint8_t *)mb;
  2445. from += sizeof(MAILBOX_t);
  2446. memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
  2447. } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
  2448. struct READ_EVENT_LOG_VAR *rdEventLog =
  2449. &pmb->un.varRdEventLog ;
  2450. uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
  2451. uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
  2452. /* receive length cannot be greater than mailbox
  2453. * extension size
  2454. */
  2455. if (receive_length > MAILBOX_EXT_SIZE) {
  2456. rc = -ERANGE;
  2457. goto job_done;
  2458. }
  2459. /* mode zero uses a bde like biu diags command */
  2460. if (mode == 0) {
  2461. /* rebuild the command for sli4 using our own buffers
  2462. * like we do for biu diags
  2463. */
  2464. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2465. if (!rxbmp) {
  2466. rc = -ENOMEM;
  2467. goto job_done;
  2468. }
  2469. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2470. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2471. if (rxbpl) {
  2472. INIT_LIST_HEAD(&rxbmp->list);
  2473. dmp = diag_cmd_data_alloc(phba, rxbpl,
  2474. receive_length, 0);
  2475. }
  2476. if (!dmp) {
  2477. rc = -ENOMEM;
  2478. goto job_done;
  2479. }
  2480. INIT_LIST_HEAD(&dmp->dma.list);
  2481. pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
  2482. pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
  2483. }
  2484. } else if (phba->sli_rev == LPFC_SLI_REV4) {
  2485. if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
  2486. /* rebuild the command for sli4 using our own buffers
  2487. * like we do for biu diags
  2488. */
  2489. uint32_t receive_length = pmb->un.varWords[2];
  2490. /* receive length cannot be greater than mailbox
  2491. * extension size
  2492. */
  2493. if ((receive_length == 0) ||
  2494. (receive_length > MAILBOX_EXT_SIZE)) {
  2495. rc = -ERANGE;
  2496. goto job_done;
  2497. }
  2498. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2499. if (!rxbmp) {
  2500. rc = -ENOMEM;
  2501. goto job_done;
  2502. }
  2503. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2504. if (!rxbmp->virt) {
  2505. rc = -ENOMEM;
  2506. goto job_done;
  2507. }
  2508. INIT_LIST_HEAD(&rxbmp->list);
  2509. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2510. dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
  2511. 0);
  2512. if (!dmp) {
  2513. rc = -ENOMEM;
  2514. goto job_done;
  2515. }
  2516. INIT_LIST_HEAD(&dmp->dma.list);
  2517. pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
  2518. pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
  2519. } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
  2520. pmb->un.varUpdateCfg.co) {
  2521. struct ulp_bde64 *bde =
  2522. (struct ulp_bde64 *)&pmb->un.varWords[4];
  2523. /* bde size cannot be greater than mailbox ext size */
  2524. if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
  2525. rc = -ERANGE;
  2526. goto job_done;
  2527. }
  2528. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2529. if (!rxbmp) {
  2530. rc = -ENOMEM;
  2531. goto job_done;
  2532. }
  2533. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2534. if (!rxbmp->virt) {
  2535. rc = -ENOMEM;
  2536. goto job_done;
  2537. }
  2538. INIT_LIST_HEAD(&rxbmp->list);
  2539. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2540. dmp = diag_cmd_data_alloc(phba, rxbpl,
  2541. bde->tus.f.bdeSize, 0);
  2542. if (!dmp) {
  2543. rc = -ENOMEM;
  2544. goto job_done;
  2545. }
  2546. INIT_LIST_HEAD(&dmp->dma.list);
  2547. bde->addrHigh = putPaddrHigh(dmp->dma.phys);
  2548. bde->addrLow = putPaddrLow(dmp->dma.phys);
  2549. /* copy the transmit data found in the mailbox
  2550. * extension area
  2551. */
  2552. from = (uint8_t *)mb;
  2553. from += sizeof(MAILBOX_t);
  2554. memcpy((uint8_t *)dmp->dma.virt, from,
  2555. bde->tus.f.bdeSize);
  2556. }
  2557. }
  2558. dd_data->context_un.mbox.rxbmp = rxbmp;
  2559. dd_data->context_un.mbox.dmp = dmp;
  2560. /* setup wake call as IOCB callback */
  2561. pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
  2562. /* setup context field to pass wait_queue pointer to wake function */
  2563. pmboxq->context1 = dd_data;
  2564. dd_data->type = TYPE_MBOX;
  2565. dd_data->context_un.mbox.pmboxq = pmboxq;
  2566. dd_data->context_un.mbox.mb = mb;
  2567. dd_data->context_un.mbox.set_job = job;
  2568. dd_data->context_un.mbox.ext = ext;
  2569. dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
  2570. dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
  2571. dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
  2572. job->dd_data = dd_data;
  2573. if ((vport->fc_flag & FC_OFFLINE_MODE) ||
  2574. (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
  2575. rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
  2576. if (rc != MBX_SUCCESS) {
  2577. rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
  2578. goto job_done;
  2579. }
  2580. /* job finished, copy the data */
  2581. memcpy(mb, pmb, sizeof(*pmb));
  2582. job->reply->reply_payload_rcv_len =
  2583. sg_copy_from_buffer(job->reply_payload.sg_list,
  2584. job->reply_payload.sg_cnt,
  2585. mb, size);
  2586. /* not waiting mbox already done */
  2587. rc = 0;
  2588. goto job_done;
  2589. }
  2590. rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
  2591. if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
  2592. return 1; /* job started */
  2593. job_done:
  2594. /* common exit for error or job completed inline */
  2595. kfree(mb);
  2596. if (pmboxq)
  2597. mempool_free(pmboxq, phba->mbox_mem_pool);
  2598. kfree(ext);
  2599. if (dmp) {
  2600. dma_free_coherent(&phba->pcidev->dev,
  2601. dmp->size, dmp->dma.virt,
  2602. dmp->dma.phys);
  2603. kfree(dmp);
  2604. }
  2605. if (rxbmp) {
  2606. lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
  2607. kfree(rxbmp);
  2608. }
  2609. kfree(dd_data);
  2610. return rc;
  2611. }
  2612. /**
  2613. * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
  2614. * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
  2615. **/
  2616. static int
  2617. lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
  2618. {
  2619. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2620. struct lpfc_hba *phba = vport->phba;
  2621. int rc = 0;
  2622. /* in case no data is transferred */
  2623. job->reply->reply_payload_rcv_len = 0;
  2624. if (job->request_len <
  2625. sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
  2626. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2627. "2737 Received MBOX_REQ request below "
  2628. "minimum size\n");
  2629. rc = -EINVAL;
  2630. goto job_error;
  2631. }
  2632. if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
  2633. rc = -EINVAL;
  2634. goto job_error;
  2635. }
  2636. if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
  2637. rc = -EINVAL;
  2638. goto job_error;
  2639. }
  2640. if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
  2641. rc = -EAGAIN;
  2642. goto job_error;
  2643. }
  2644. rc = lpfc_bsg_issue_mbox(phba, job, vport);
  2645. job_error:
  2646. if (rc == 0) {
  2647. /* job done */
  2648. job->reply->result = 0;
  2649. job->dd_data = NULL;
  2650. job->job_done(job);
  2651. } else if (rc == 1)
  2652. /* job submitted, will complete later*/
  2653. rc = 0; /* return zero, no error */
  2654. else {
  2655. /* some error occurred */
  2656. job->reply->result = rc;
  2657. job->dd_data = NULL;
  2658. }
  2659. return rc;
  2660. }
  2661. /**
  2662. * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
  2663. * @phba: Pointer to HBA context object.
  2664. * @cmdiocbq: Pointer to command iocb.
  2665. * @rspiocbq: Pointer to response iocb.
  2666. *
  2667. * This function is the completion handler for iocbs issued using
  2668. * lpfc_menlo_cmd function. This function is called by the
  2669. * ring event handler function without any lock held. This function
  2670. * can be called from both worker thread context and interrupt
  2671. * context. This function also can be called from another thread which
  2672. * cleans up the SLI layer objects.
  2673. * This function copies the contents of the response iocb to the
  2674. * response iocb memory object provided by the caller of
  2675. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  2676. * sleeps for the iocb completion.
  2677. **/
  2678. static void
  2679. lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
  2680. struct lpfc_iocbq *cmdiocbq,
  2681. struct lpfc_iocbq *rspiocbq)
  2682. {
  2683. struct bsg_job_data *dd_data;
  2684. struct fc_bsg_job *job;
  2685. IOCB_t *rsp;
  2686. struct lpfc_dmabuf *bmp;
  2687. struct lpfc_bsg_menlo *menlo;
  2688. unsigned long flags;
  2689. struct menlo_response *menlo_resp;
  2690. int rc = 0;
  2691. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2692. dd_data = cmdiocbq->context1;
  2693. if (!dd_data) {
  2694. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2695. return;
  2696. }
  2697. menlo = &dd_data->context_un.menlo;
  2698. job = menlo->set_job;
  2699. job->dd_data = NULL; /* so timeout handler does not reply */
  2700. spin_lock_irqsave(&phba->hbalock, flags);
  2701. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  2702. if (cmdiocbq->context2 && rspiocbq)
  2703. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  2704. &rspiocbq->iocb, sizeof(IOCB_t));
  2705. spin_unlock_irqrestore(&phba->hbalock, flags);
  2706. bmp = menlo->bmp;
  2707. rspiocbq = menlo->rspiocbq;
  2708. rsp = &rspiocbq->iocb;
  2709. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  2710. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2711. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  2712. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2713. /* always return the xri, this would be used in the case
  2714. * of a menlo download to allow the data to be sent as a continuation
  2715. * of the exchange.
  2716. */
  2717. menlo_resp = (struct menlo_response *)
  2718. job->reply->reply_data.vendor_reply.vendor_rsp;
  2719. menlo_resp->xri = rsp->ulpContext;
  2720. if (rsp->ulpStatus) {
  2721. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  2722. switch (rsp->un.ulpWord[4] & 0xff) {
  2723. case IOERR_SEQUENCE_TIMEOUT:
  2724. rc = -ETIMEDOUT;
  2725. break;
  2726. case IOERR_INVALID_RPI:
  2727. rc = -EFAULT;
  2728. break;
  2729. default:
  2730. rc = -EACCES;
  2731. break;
  2732. }
  2733. } else
  2734. rc = -EACCES;
  2735. } else
  2736. job->reply->reply_payload_rcv_len =
  2737. rsp->un.genreq64.bdl.bdeSize;
  2738. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  2739. lpfc_sli_release_iocbq(phba, rspiocbq);
  2740. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2741. kfree(bmp);
  2742. kfree(dd_data);
  2743. /* make error code available to userspace */
  2744. job->reply->result = rc;
  2745. /* complete the job back to userspace */
  2746. job->job_done(job);
  2747. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2748. return;
  2749. }
  2750. /**
  2751. * lpfc_menlo_cmd - send an ioctl for menlo hardware
  2752. * @job: fc_bsg_job to handle
  2753. *
  2754. * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
  2755. * all the command completions will return the xri for the command.
  2756. * For menlo data requests a gen request 64 CX is used to continue the exchange
  2757. * supplied in the menlo request header xri field.
  2758. **/
  2759. static int
  2760. lpfc_menlo_cmd(struct fc_bsg_job *job)
  2761. {
  2762. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2763. struct lpfc_hba *phba = vport->phba;
  2764. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  2765. IOCB_t *cmd, *rsp;
  2766. int rc = 0;
  2767. struct menlo_command *menlo_cmd;
  2768. struct menlo_response *menlo_resp;
  2769. struct lpfc_dmabuf *bmp = NULL;
  2770. int request_nseg;
  2771. int reply_nseg;
  2772. struct scatterlist *sgel = NULL;
  2773. int numbde;
  2774. dma_addr_t busaddr;
  2775. struct bsg_job_data *dd_data;
  2776. struct ulp_bde64 *bpl = NULL;
  2777. /* in case no data is returned return just the return code */
  2778. job->reply->reply_payload_rcv_len = 0;
  2779. if (job->request_len <
  2780. sizeof(struct fc_bsg_request) +
  2781. sizeof(struct menlo_command)) {
  2782. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2783. "2784 Received MENLO_CMD request below "
  2784. "minimum size\n");
  2785. rc = -ERANGE;
  2786. goto no_dd_data;
  2787. }
  2788. if (job->reply_len <
  2789. sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
  2790. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2791. "2785 Received MENLO_CMD reply below "
  2792. "minimum size\n");
  2793. rc = -ERANGE;
  2794. goto no_dd_data;
  2795. }
  2796. if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
  2797. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2798. "2786 Adapter does not support menlo "
  2799. "commands\n");
  2800. rc = -EPERM;
  2801. goto no_dd_data;
  2802. }
  2803. menlo_cmd = (struct menlo_command *)
  2804. job->request->rqst_data.h_vendor.vendor_cmd;
  2805. menlo_resp = (struct menlo_response *)
  2806. job->reply->reply_data.vendor_reply.vendor_rsp;
  2807. /* allocate our bsg tracking structure */
  2808. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  2809. if (!dd_data) {
  2810. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2811. "2787 Failed allocation of dd_data\n");
  2812. rc = -ENOMEM;
  2813. goto no_dd_data;
  2814. }
  2815. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2816. if (!bmp) {
  2817. rc = -ENOMEM;
  2818. goto free_dd;
  2819. }
  2820. cmdiocbq = lpfc_sli_get_iocbq(phba);
  2821. if (!cmdiocbq) {
  2822. rc = -ENOMEM;
  2823. goto free_bmp;
  2824. }
  2825. rspiocbq = lpfc_sli_get_iocbq(phba);
  2826. if (!rspiocbq) {
  2827. rc = -ENOMEM;
  2828. goto free_cmdiocbq;
  2829. }
  2830. rsp = &rspiocbq->iocb;
  2831. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  2832. if (!bmp->virt) {
  2833. rc = -ENOMEM;
  2834. goto free_rspiocbq;
  2835. }
  2836. INIT_LIST_HEAD(&bmp->list);
  2837. bpl = (struct ulp_bde64 *) bmp->virt;
  2838. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  2839. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2840. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  2841. busaddr = sg_dma_address(sgel);
  2842. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  2843. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  2844. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  2845. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  2846. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  2847. bpl++;
  2848. }
  2849. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  2850. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2851. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  2852. busaddr = sg_dma_address(sgel);
  2853. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  2854. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  2855. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  2856. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  2857. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  2858. bpl++;
  2859. }
  2860. cmd = &cmdiocbq->iocb;
  2861. cmd->un.genreq64.bdl.ulpIoTag32 = 0;
  2862. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  2863. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  2864. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  2865. cmd->un.genreq64.bdl.bdeSize =
  2866. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  2867. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  2868. cmd->un.genreq64.w5.hcsw.Dfctl = 0;
  2869. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
  2870. cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
  2871. cmd->ulpBdeCount = 1;
  2872. cmd->ulpClass = CLASS3;
  2873. cmd->ulpOwner = OWN_CHIP;
  2874. cmd->ulpLe = 1; /* Limited Edition */
  2875. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  2876. cmdiocbq->vport = phba->pport;
  2877. /* We want the firmware to timeout before we do */
  2878. cmd->ulpTimeout = MENLO_TIMEOUT - 5;
  2879. cmdiocbq->context3 = bmp;
  2880. cmdiocbq->context2 = rspiocbq;
  2881. cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
  2882. cmdiocbq->context1 = dd_data;
  2883. cmdiocbq->context2 = rspiocbq;
  2884. if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
  2885. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  2886. cmd->ulpPU = MENLO_PU; /* 3 */
  2887. cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
  2888. cmd->ulpContext = MENLO_CONTEXT; /* 0 */
  2889. } else {
  2890. cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
  2891. cmd->ulpPU = 1;
  2892. cmd->un.ulpWord[4] = 0;
  2893. cmd->ulpContext = menlo_cmd->xri;
  2894. }
  2895. dd_data->type = TYPE_MENLO;
  2896. dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
  2897. dd_data->context_un.menlo.rspiocbq = rspiocbq;
  2898. dd_data->context_un.menlo.set_job = job;
  2899. dd_data->context_un.menlo.bmp = bmp;
  2900. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
  2901. MENLO_TIMEOUT - 5);
  2902. if (rc == IOCB_SUCCESS)
  2903. return 0; /* done for now */
  2904. /* iocb failed so cleanup */
  2905. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  2906. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2907. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  2908. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2909. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  2910. free_rspiocbq:
  2911. lpfc_sli_release_iocbq(phba, rspiocbq);
  2912. free_cmdiocbq:
  2913. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2914. free_bmp:
  2915. kfree(bmp);
  2916. free_dd:
  2917. kfree(dd_data);
  2918. no_dd_data:
  2919. /* make error code available to userspace */
  2920. job->reply->result = rc;
  2921. job->dd_data = NULL;
  2922. return rc;
  2923. }
  2924. /**
  2925. * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  2926. * @job: fc_bsg_job to handle
  2927. **/
  2928. static int
  2929. lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
  2930. {
  2931. int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
  2932. int rc;
  2933. switch (command) {
  2934. case LPFC_BSG_VENDOR_SET_CT_EVENT:
  2935. rc = lpfc_bsg_hba_set_event(job);
  2936. break;
  2937. case LPFC_BSG_VENDOR_GET_CT_EVENT:
  2938. rc = lpfc_bsg_hba_get_event(job);
  2939. break;
  2940. case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
  2941. rc = lpfc_bsg_send_mgmt_rsp(job);
  2942. break;
  2943. case LPFC_BSG_VENDOR_DIAG_MODE:
  2944. rc = lpfc_bsg_diag_mode(job);
  2945. break;
  2946. case LPFC_BSG_VENDOR_DIAG_TEST:
  2947. rc = lpfc_bsg_diag_test(job);
  2948. break;
  2949. case LPFC_BSG_VENDOR_GET_MGMT_REV:
  2950. rc = lpfc_bsg_get_dfc_rev(job);
  2951. break;
  2952. case LPFC_BSG_VENDOR_MBOX:
  2953. rc = lpfc_bsg_mbox_cmd(job);
  2954. break;
  2955. case LPFC_BSG_VENDOR_MENLO_CMD:
  2956. case LPFC_BSG_VENDOR_MENLO_DATA:
  2957. rc = lpfc_menlo_cmd(job);
  2958. break;
  2959. default:
  2960. rc = -EINVAL;
  2961. job->reply->reply_payload_rcv_len = 0;
  2962. /* make error code available to userspace */
  2963. job->reply->result = rc;
  2964. break;
  2965. }
  2966. return rc;
  2967. }
  2968. /**
  2969. * lpfc_bsg_request - handle a bsg request from the FC transport
  2970. * @job: fc_bsg_job to handle
  2971. **/
  2972. int
  2973. lpfc_bsg_request(struct fc_bsg_job *job)
  2974. {
  2975. uint32_t msgcode;
  2976. int rc;
  2977. msgcode = job->request->msgcode;
  2978. switch (msgcode) {
  2979. case FC_BSG_HST_VENDOR:
  2980. rc = lpfc_bsg_hst_vendor(job);
  2981. break;
  2982. case FC_BSG_RPT_ELS:
  2983. rc = lpfc_bsg_rport_els(job);
  2984. break;
  2985. case FC_BSG_RPT_CT:
  2986. rc = lpfc_bsg_send_mgmt_cmd(job);
  2987. break;
  2988. default:
  2989. rc = -EINVAL;
  2990. job->reply->reply_payload_rcv_len = 0;
  2991. /* make error code available to userspace */
  2992. job->reply->result = rc;
  2993. break;
  2994. }
  2995. return rc;
  2996. }
  2997. /**
  2998. * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
  2999. * @job: fc_bsg_job that has timed out
  3000. *
  3001. * This function just aborts the job's IOCB. The aborted IOCB will return to
  3002. * the waiting function which will handle passing the error back to userspace
  3003. **/
  3004. int
  3005. lpfc_bsg_timeout(struct fc_bsg_job *job)
  3006. {
  3007. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  3008. struct lpfc_hba *phba = vport->phba;
  3009. struct lpfc_iocbq *cmdiocb;
  3010. struct lpfc_bsg_event *evt;
  3011. struct lpfc_bsg_iocb *iocb;
  3012. struct lpfc_bsg_mbox *mbox;
  3013. struct lpfc_bsg_menlo *menlo;
  3014. struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
  3015. struct bsg_job_data *dd_data;
  3016. unsigned long flags;
  3017. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  3018. dd_data = (struct bsg_job_data *)job->dd_data;
  3019. /* timeout and completion crossed paths if no dd_data */
  3020. if (!dd_data) {
  3021. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3022. return 0;
  3023. }
  3024. switch (dd_data->type) {
  3025. case TYPE_IOCB:
  3026. iocb = &dd_data->context_un.iocb;
  3027. cmdiocb = iocb->cmdiocbq;
  3028. /* hint to completion handler that the job timed out */
  3029. job->reply->result = -EAGAIN;
  3030. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3031. /* this will call our completion handler */
  3032. spin_lock_irq(&phba->hbalock);
  3033. lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
  3034. spin_unlock_irq(&phba->hbalock);
  3035. break;
  3036. case TYPE_EVT:
  3037. evt = dd_data->context_un.evt;
  3038. /* this event has no job anymore */
  3039. evt->set_job = NULL;
  3040. job->dd_data = NULL;
  3041. job->reply->reply_payload_rcv_len = 0;
  3042. /* Return -EAGAIN which is our way of signallying the
  3043. * app to retry.
  3044. */
  3045. job->reply->result = -EAGAIN;
  3046. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3047. job->job_done(job);
  3048. break;
  3049. case TYPE_MBOX:
  3050. mbox = &dd_data->context_un.mbox;
  3051. /* this mbox has no job anymore */
  3052. mbox->set_job = NULL;
  3053. job->dd_data = NULL;
  3054. job->reply->reply_payload_rcv_len = 0;
  3055. job->reply->result = -EAGAIN;
  3056. /* the mbox completion handler can now be run */
  3057. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3058. job->job_done(job);
  3059. break;
  3060. case TYPE_MENLO:
  3061. menlo = &dd_data->context_un.menlo;
  3062. cmdiocb = menlo->cmdiocbq;
  3063. /* hint to completion handler that the job timed out */
  3064. job->reply->result = -EAGAIN;
  3065. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3066. /* this will call our completion handler */
  3067. spin_lock_irq(&phba->hbalock);
  3068. lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
  3069. spin_unlock_irq(&phba->hbalock);
  3070. break;
  3071. default:
  3072. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3073. break;
  3074. }
  3075. /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
  3076. * otherwise an error message will be displayed on the console
  3077. * so always return success (zero)
  3078. */
  3079. return 0;
  3080. }