lpfc_bsg.c 95 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501
  1. /*******************************************************************
  2. * This file is part of the Emulex Linux Device Driver for *
  3. * Fibre Channel Host Bus Adapters. *
  4. * Copyright (C) 2009-2010 Emulex. All rights reserved. *
  5. * EMULEX and SLI are trademarks of Emulex. *
  6. * www.emulex.com *
  7. * *
  8. * This program is free software; you can redistribute it and/or *
  9. * modify it under the terms of version 2 of the GNU General *
  10. * Public License as published by the Free Software Foundation. *
  11. * This program is distributed in the hope that it will be useful. *
  12. * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
  13. * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
  14. * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
  15. * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
  16. * TO BE LEGALLY INVALID. See the GNU General Public License for *
  17. * more details, a copy of which can be found in the file COPYING *
  18. * included with this package. *
  19. *******************************************************************/
  20. #include <linux/interrupt.h>
  21. #include <linux/mempool.h>
  22. #include <linux/pci.h>
  23. #include <linux/slab.h>
  24. #include <linux/delay.h>
  25. #include <scsi/scsi.h>
  26. #include <scsi/scsi_host.h>
  27. #include <scsi/scsi_transport_fc.h>
  28. #include <scsi/scsi_bsg_fc.h>
  29. #include <scsi/fc/fc_fs.h>
  30. #include "lpfc_hw4.h"
  31. #include "lpfc_hw.h"
  32. #include "lpfc_sli.h"
  33. #include "lpfc_sli4.h"
  34. #include "lpfc_nl.h"
  35. #include "lpfc_bsg.h"
  36. #include "lpfc_disc.h"
  37. #include "lpfc_scsi.h"
  38. #include "lpfc.h"
  39. #include "lpfc_logmsg.h"
  40. #include "lpfc_crtn.h"
  41. #include "lpfc_vport.h"
  42. #include "lpfc_version.h"
  43. struct lpfc_bsg_event {
  44. struct list_head node;
  45. struct kref kref;
  46. wait_queue_head_t wq;
  47. /* Event type and waiter identifiers */
  48. uint32_t type_mask;
  49. uint32_t req_id;
  50. uint32_t reg_id;
  51. /* next two flags are here for the auto-delete logic */
  52. unsigned long wait_time_stamp;
  53. int waiting;
  54. /* seen and not seen events */
  55. struct list_head events_to_get;
  56. struct list_head events_to_see;
  57. /* job waiting for this event to finish */
  58. struct fc_bsg_job *set_job;
  59. };
  60. struct lpfc_bsg_iocb {
  61. struct lpfc_iocbq *cmdiocbq;
  62. struct lpfc_iocbq *rspiocbq;
  63. struct lpfc_dmabuf *bmp;
  64. struct lpfc_nodelist *ndlp;
  65. /* job waiting for this iocb to finish */
  66. struct fc_bsg_job *set_job;
  67. };
  68. struct lpfc_bsg_mbox {
  69. LPFC_MBOXQ_t *pmboxq;
  70. MAILBOX_t *mb;
  71. struct lpfc_dmabuf *rxbmp; /* for BIU diags */
  72. struct lpfc_dmabufext *dmp; /* for BIU diags */
  73. uint8_t *ext; /* extended mailbox data */
  74. uint32_t mbOffset; /* from app */
  75. uint32_t inExtWLen; /* from app */
  76. uint32_t outExtWLen; /* from app */
  77. /* job waiting for this mbox command to finish */
  78. struct fc_bsg_job *set_job;
  79. };
  80. #define MENLO_DID 0x0000FC0E
  81. struct lpfc_bsg_menlo {
  82. struct lpfc_iocbq *cmdiocbq;
  83. struct lpfc_iocbq *rspiocbq;
  84. struct lpfc_dmabuf *bmp;
  85. /* job waiting for this iocb to finish */
  86. struct fc_bsg_job *set_job;
  87. };
  88. #define TYPE_EVT 1
  89. #define TYPE_IOCB 2
  90. #define TYPE_MBOX 3
  91. #define TYPE_MENLO 4
  92. struct bsg_job_data {
  93. uint32_t type;
  94. union {
  95. struct lpfc_bsg_event *evt;
  96. struct lpfc_bsg_iocb iocb;
  97. struct lpfc_bsg_mbox mbox;
  98. struct lpfc_bsg_menlo menlo;
  99. } context_un;
  100. };
  101. struct event_data {
  102. struct list_head node;
  103. uint32_t type;
  104. uint32_t immed_dat;
  105. void *data;
  106. uint32_t len;
  107. };
  108. #define BUF_SZ_4K 4096
  109. #define SLI_CT_ELX_LOOPBACK 0x10
  110. enum ELX_LOOPBACK_CMD {
  111. ELX_LOOPBACK_XRI_SETUP,
  112. ELX_LOOPBACK_DATA,
  113. };
  114. #define ELX_LOOPBACK_HEADER_SZ \
  115. (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
  116. struct lpfc_dmabufext {
  117. struct lpfc_dmabuf dma;
  118. uint32_t size;
  119. uint32_t flag;
  120. };
  121. /**
  122. * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
  123. * @phba: Pointer to HBA context object.
  124. * @cmdiocbq: Pointer to command iocb.
  125. * @rspiocbq: Pointer to response iocb.
  126. *
  127. * This function is the completion handler for iocbs issued using
  128. * lpfc_bsg_send_mgmt_cmd function. This function is called by the
  129. * ring event handler function without any lock held. This function
  130. * can be called from both worker thread context and interrupt
  131. * context. This function also can be called from another thread which
  132. * cleans up the SLI layer objects.
  133. * This function copies the contents of the response iocb to the
  134. * response iocb memory object provided by the caller of
  135. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  136. * sleeps for the iocb completion.
  137. **/
  138. static void
  139. lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
  140. struct lpfc_iocbq *cmdiocbq,
  141. struct lpfc_iocbq *rspiocbq)
  142. {
  143. unsigned long iflags;
  144. struct bsg_job_data *dd_data;
  145. struct fc_bsg_job *job;
  146. IOCB_t *rsp;
  147. struct lpfc_dmabuf *bmp;
  148. struct lpfc_nodelist *ndlp;
  149. struct lpfc_bsg_iocb *iocb;
  150. unsigned long flags;
  151. int rc = 0;
  152. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  153. dd_data = cmdiocbq->context1;
  154. if (!dd_data) {
  155. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  156. return;
  157. }
  158. iocb = &dd_data->context_un.iocb;
  159. job = iocb->set_job;
  160. job->dd_data = NULL; /* so timeout handler does not reply */
  161. spin_lock_irqsave(&phba->hbalock, iflags);
  162. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  163. if (cmdiocbq->context2 && rspiocbq)
  164. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  165. &rspiocbq->iocb, sizeof(IOCB_t));
  166. spin_unlock_irqrestore(&phba->hbalock, iflags);
  167. bmp = iocb->bmp;
  168. rspiocbq = iocb->rspiocbq;
  169. rsp = &rspiocbq->iocb;
  170. ndlp = iocb->ndlp;
  171. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  172. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  173. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  174. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  175. if (rsp->ulpStatus) {
  176. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  177. switch (rsp->un.ulpWord[4] & 0xff) {
  178. case IOERR_SEQUENCE_TIMEOUT:
  179. rc = -ETIMEDOUT;
  180. break;
  181. case IOERR_INVALID_RPI:
  182. rc = -EFAULT;
  183. break;
  184. default:
  185. rc = -EACCES;
  186. break;
  187. }
  188. } else
  189. rc = -EACCES;
  190. } else
  191. job->reply->reply_payload_rcv_len =
  192. rsp->un.genreq64.bdl.bdeSize;
  193. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  194. lpfc_sli_release_iocbq(phba, rspiocbq);
  195. lpfc_sli_release_iocbq(phba, cmdiocbq);
  196. lpfc_nlp_put(ndlp);
  197. kfree(bmp);
  198. kfree(dd_data);
  199. /* make error code available to userspace */
  200. job->reply->result = rc;
  201. /* complete the job back to userspace */
  202. job->job_done(job);
  203. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  204. return;
  205. }
  206. /**
  207. * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
  208. * @job: fc_bsg_job to handle
  209. **/
  210. static int
  211. lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
  212. {
  213. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  214. struct lpfc_hba *phba = vport->phba;
  215. struct lpfc_rport_data *rdata = job->rport->dd_data;
  216. struct lpfc_nodelist *ndlp = rdata->pnode;
  217. struct ulp_bde64 *bpl = NULL;
  218. uint32_t timeout;
  219. struct lpfc_iocbq *cmdiocbq = NULL;
  220. struct lpfc_iocbq *rspiocbq = NULL;
  221. IOCB_t *cmd;
  222. IOCB_t *rsp;
  223. struct lpfc_dmabuf *bmp = NULL;
  224. int request_nseg;
  225. int reply_nseg;
  226. struct scatterlist *sgel = NULL;
  227. int numbde;
  228. dma_addr_t busaddr;
  229. struct bsg_job_data *dd_data;
  230. uint32_t creg_val;
  231. int rc = 0;
  232. /* in case no data is transferred */
  233. job->reply->reply_payload_rcv_len = 0;
  234. /* allocate our bsg tracking structure */
  235. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  236. if (!dd_data) {
  237. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  238. "2733 Failed allocation of dd_data\n");
  239. rc = -ENOMEM;
  240. goto no_dd_data;
  241. }
  242. if (!lpfc_nlp_get(ndlp)) {
  243. rc = -ENODEV;
  244. goto no_ndlp;
  245. }
  246. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  247. if (!bmp) {
  248. rc = -ENOMEM;
  249. goto free_ndlp;
  250. }
  251. if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
  252. rc = -ENODEV;
  253. goto free_bmp;
  254. }
  255. cmdiocbq = lpfc_sli_get_iocbq(phba);
  256. if (!cmdiocbq) {
  257. rc = -ENOMEM;
  258. goto free_bmp;
  259. }
  260. cmd = &cmdiocbq->iocb;
  261. rspiocbq = lpfc_sli_get_iocbq(phba);
  262. if (!rspiocbq) {
  263. rc = -ENOMEM;
  264. goto free_cmdiocbq;
  265. }
  266. rsp = &rspiocbq->iocb;
  267. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  268. if (!bmp->virt) {
  269. rc = -ENOMEM;
  270. goto free_rspiocbq;
  271. }
  272. INIT_LIST_HEAD(&bmp->list);
  273. bpl = (struct ulp_bde64 *) bmp->virt;
  274. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  275. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  276. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  277. busaddr = sg_dma_address(sgel);
  278. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  279. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  280. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  281. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  282. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  283. bpl++;
  284. }
  285. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  286. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  287. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  288. busaddr = sg_dma_address(sgel);
  289. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  290. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  291. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  292. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  293. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  294. bpl++;
  295. }
  296. cmd->un.genreq64.bdl.ulpIoTag32 = 0;
  297. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  298. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  299. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  300. cmd->un.genreq64.bdl.bdeSize =
  301. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  302. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  303. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  304. cmd->un.genreq64.w5.hcsw.Dfctl = 0;
  305. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  306. cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
  307. cmd->ulpBdeCount = 1;
  308. cmd->ulpLe = 1;
  309. cmd->ulpClass = CLASS3;
  310. cmd->ulpContext = ndlp->nlp_rpi;
  311. cmd->ulpOwner = OWN_CHIP;
  312. cmdiocbq->vport = phba->pport;
  313. cmdiocbq->context3 = bmp;
  314. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  315. timeout = phba->fc_ratov * 2;
  316. cmd->ulpTimeout = timeout;
  317. cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
  318. cmdiocbq->context1 = dd_data;
  319. cmdiocbq->context2 = rspiocbq;
  320. dd_data->type = TYPE_IOCB;
  321. dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
  322. dd_data->context_un.iocb.rspiocbq = rspiocbq;
  323. dd_data->context_un.iocb.set_job = job;
  324. dd_data->context_un.iocb.bmp = bmp;
  325. dd_data->context_un.iocb.ndlp = ndlp;
  326. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  327. creg_val = readl(phba->HCregaddr);
  328. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  329. writel(creg_val, phba->HCregaddr);
  330. readl(phba->HCregaddr); /* flush */
  331. }
  332. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  333. if (rc == IOCB_SUCCESS)
  334. return 0; /* done for now */
  335. else if (rc == IOCB_BUSY)
  336. rc = EAGAIN;
  337. else
  338. rc = EIO;
  339. /* iocb failed so cleanup */
  340. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  341. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  342. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  343. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  344. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  345. free_rspiocbq:
  346. lpfc_sli_release_iocbq(phba, rspiocbq);
  347. free_cmdiocbq:
  348. lpfc_sli_release_iocbq(phba, cmdiocbq);
  349. free_bmp:
  350. kfree(bmp);
  351. free_ndlp:
  352. lpfc_nlp_put(ndlp);
  353. no_ndlp:
  354. kfree(dd_data);
  355. no_dd_data:
  356. /* make error code available to userspace */
  357. job->reply->result = rc;
  358. job->dd_data = NULL;
  359. return rc;
  360. }
  361. /**
  362. * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
  363. * @phba: Pointer to HBA context object.
  364. * @cmdiocbq: Pointer to command iocb.
  365. * @rspiocbq: Pointer to response iocb.
  366. *
  367. * This function is the completion handler for iocbs issued using
  368. * lpfc_bsg_rport_els_cmp function. This function is called by the
  369. * ring event handler function without any lock held. This function
  370. * can be called from both worker thread context and interrupt
  371. * context. This function also can be called from other thread which
  372. * cleans up the SLI layer objects.
  373. * This function copies the contents of the response iocb to the
  374. * response iocb memory object provided by the caller of
  375. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  376. * sleeps for the iocb completion.
  377. **/
  378. static void
  379. lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
  380. struct lpfc_iocbq *cmdiocbq,
  381. struct lpfc_iocbq *rspiocbq)
  382. {
  383. struct bsg_job_data *dd_data;
  384. struct fc_bsg_job *job;
  385. IOCB_t *rsp;
  386. struct lpfc_nodelist *ndlp;
  387. struct lpfc_dmabuf *pbuflist = NULL;
  388. struct fc_bsg_ctels_reply *els_reply;
  389. uint8_t *rjt_data;
  390. unsigned long flags;
  391. int rc = 0;
  392. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  393. dd_data = cmdiocbq->context1;
  394. /* normal completion and timeout crossed paths, already done */
  395. if (!dd_data) {
  396. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  397. return;
  398. }
  399. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  400. if (cmdiocbq->context2 && rspiocbq)
  401. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  402. &rspiocbq->iocb, sizeof(IOCB_t));
  403. job = dd_data->context_un.iocb.set_job;
  404. cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
  405. rspiocbq = dd_data->context_un.iocb.rspiocbq;
  406. rsp = &rspiocbq->iocb;
  407. ndlp = dd_data->context_un.iocb.ndlp;
  408. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  409. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  410. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  411. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  412. if (job->reply->result == -EAGAIN)
  413. rc = -EAGAIN;
  414. else if (rsp->ulpStatus == IOSTAT_SUCCESS)
  415. job->reply->reply_payload_rcv_len =
  416. rsp->un.elsreq64.bdl.bdeSize;
  417. else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
  418. job->reply->reply_payload_rcv_len =
  419. sizeof(struct fc_bsg_ctels_reply);
  420. /* LS_RJT data returned in word 4 */
  421. rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
  422. els_reply = &job->reply->reply_data.ctels_reply;
  423. els_reply->status = FC_CTELS_STATUS_REJECT;
  424. els_reply->rjt_data.action = rjt_data[3];
  425. els_reply->rjt_data.reason_code = rjt_data[2];
  426. els_reply->rjt_data.reason_explanation = rjt_data[1];
  427. els_reply->rjt_data.vendor_unique = rjt_data[0];
  428. } else
  429. rc = -EIO;
  430. pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
  431. lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
  432. lpfc_sli_release_iocbq(phba, rspiocbq);
  433. lpfc_sli_release_iocbq(phba, cmdiocbq);
  434. lpfc_nlp_put(ndlp);
  435. kfree(dd_data);
  436. /* make error code available to userspace */
  437. job->reply->result = rc;
  438. job->dd_data = NULL;
  439. /* complete the job back to userspace */
  440. job->job_done(job);
  441. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  442. return;
  443. }
  444. /**
  445. * lpfc_bsg_rport_els - send an ELS command from a bsg request
  446. * @job: fc_bsg_job to handle
  447. **/
  448. static int
  449. lpfc_bsg_rport_els(struct fc_bsg_job *job)
  450. {
  451. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  452. struct lpfc_hba *phba = vport->phba;
  453. struct lpfc_rport_data *rdata = job->rport->dd_data;
  454. struct lpfc_nodelist *ndlp = rdata->pnode;
  455. uint32_t elscmd;
  456. uint32_t cmdsize;
  457. uint32_t rspsize;
  458. struct lpfc_iocbq *rspiocbq;
  459. struct lpfc_iocbq *cmdiocbq;
  460. IOCB_t *rsp;
  461. uint16_t rpi = 0;
  462. struct lpfc_dmabuf *pcmd;
  463. struct lpfc_dmabuf *prsp;
  464. struct lpfc_dmabuf *pbuflist = NULL;
  465. struct ulp_bde64 *bpl;
  466. int request_nseg;
  467. int reply_nseg;
  468. struct scatterlist *sgel = NULL;
  469. int numbde;
  470. dma_addr_t busaddr;
  471. struct bsg_job_data *dd_data;
  472. uint32_t creg_val;
  473. int rc = 0;
  474. /* in case no data is transferred */
  475. job->reply->reply_payload_rcv_len = 0;
  476. /* allocate our bsg tracking structure */
  477. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  478. if (!dd_data) {
  479. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  480. "2735 Failed allocation of dd_data\n");
  481. rc = -ENOMEM;
  482. goto no_dd_data;
  483. }
  484. if (!lpfc_nlp_get(ndlp)) {
  485. rc = -ENODEV;
  486. goto free_dd_data;
  487. }
  488. elscmd = job->request->rqst_data.r_els.els_code;
  489. cmdsize = job->request_payload.payload_len;
  490. rspsize = job->reply_payload.payload_len;
  491. rspiocbq = lpfc_sli_get_iocbq(phba);
  492. if (!rspiocbq) {
  493. lpfc_nlp_put(ndlp);
  494. rc = -ENOMEM;
  495. goto free_dd_data;
  496. }
  497. rsp = &rspiocbq->iocb;
  498. rpi = ndlp->nlp_rpi;
  499. cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
  500. ndlp->nlp_DID, elscmd);
  501. if (!cmdiocbq) {
  502. rc = -EIO;
  503. goto free_rspiocbq;
  504. }
  505. /* prep els iocb set context1 to the ndlp, context2 to the command
  506. * dmabuf, context3 holds the data dmabuf
  507. */
  508. pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
  509. prsp = (struct lpfc_dmabuf *) pcmd->list.next;
  510. lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
  511. kfree(pcmd);
  512. lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
  513. kfree(prsp);
  514. cmdiocbq->context2 = NULL;
  515. pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
  516. bpl = (struct ulp_bde64 *) pbuflist->virt;
  517. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  518. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  519. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  520. busaddr = sg_dma_address(sgel);
  521. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  522. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  523. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  524. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  525. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  526. bpl++;
  527. }
  528. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  529. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  530. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  531. busaddr = sg_dma_address(sgel);
  532. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  533. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  534. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  535. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  536. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  537. bpl++;
  538. }
  539. cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
  540. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  541. cmdiocbq->iocb.ulpContext = rpi;
  542. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  543. cmdiocbq->context1 = NULL;
  544. cmdiocbq->context2 = NULL;
  545. cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
  546. cmdiocbq->context1 = dd_data;
  547. cmdiocbq->context2 = rspiocbq;
  548. dd_data->type = TYPE_IOCB;
  549. dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
  550. dd_data->context_un.iocb.rspiocbq = rspiocbq;
  551. dd_data->context_un.iocb.set_job = job;
  552. dd_data->context_un.iocb.bmp = NULL;;
  553. dd_data->context_un.iocb.ndlp = ndlp;
  554. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  555. creg_val = readl(phba->HCregaddr);
  556. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  557. writel(creg_val, phba->HCregaddr);
  558. readl(phba->HCregaddr); /* flush */
  559. }
  560. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  561. lpfc_nlp_put(ndlp);
  562. if (rc == IOCB_SUCCESS)
  563. return 0; /* done for now */
  564. else if (rc == IOCB_BUSY)
  565. rc = EAGAIN;
  566. else
  567. rc = EIO;
  568. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  569. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  570. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  571. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  572. lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
  573. lpfc_sli_release_iocbq(phba, cmdiocbq);
  574. free_rspiocbq:
  575. lpfc_sli_release_iocbq(phba, rspiocbq);
  576. free_dd_data:
  577. kfree(dd_data);
  578. no_dd_data:
  579. /* make error code available to userspace */
  580. job->reply->result = rc;
  581. job->dd_data = NULL;
  582. return rc;
  583. }
  584. /**
  585. * lpfc_bsg_event_free - frees an allocated event structure
  586. * @kref: Pointer to a kref.
  587. *
  588. * Called from kref_put. Back cast the kref into an event structure address.
  589. * Free any events to get, delete associated nodes, free any events to see,
  590. * free any data then free the event itself.
  591. **/
  592. static void
  593. lpfc_bsg_event_free(struct kref *kref)
  594. {
  595. struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
  596. kref);
  597. struct event_data *ed;
  598. list_del(&evt->node);
  599. while (!list_empty(&evt->events_to_get)) {
  600. ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
  601. list_del(&ed->node);
  602. kfree(ed->data);
  603. kfree(ed);
  604. }
  605. while (!list_empty(&evt->events_to_see)) {
  606. ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
  607. list_del(&ed->node);
  608. kfree(ed->data);
  609. kfree(ed);
  610. }
  611. kfree(evt);
  612. }
  613. /**
  614. * lpfc_bsg_event_ref - increments the kref for an event
  615. * @evt: Pointer to an event structure.
  616. **/
  617. static inline void
  618. lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
  619. {
  620. kref_get(&evt->kref);
  621. }
  622. /**
  623. * lpfc_bsg_event_unref - Uses kref_put to free an event structure
  624. * @evt: Pointer to an event structure.
  625. **/
  626. static inline void
  627. lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
  628. {
  629. kref_put(&evt->kref, lpfc_bsg_event_free);
  630. }
  631. /**
  632. * lpfc_bsg_event_new - allocate and initialize a event structure
  633. * @ev_mask: Mask of events.
  634. * @ev_reg_id: Event reg id.
  635. * @ev_req_id: Event request id.
  636. **/
  637. static struct lpfc_bsg_event *
  638. lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
  639. {
  640. struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
  641. if (!evt)
  642. return NULL;
  643. INIT_LIST_HEAD(&evt->events_to_get);
  644. INIT_LIST_HEAD(&evt->events_to_see);
  645. evt->type_mask = ev_mask;
  646. evt->req_id = ev_req_id;
  647. evt->reg_id = ev_reg_id;
  648. evt->wait_time_stamp = jiffies;
  649. init_waitqueue_head(&evt->wq);
  650. kref_init(&evt->kref);
  651. return evt;
  652. }
  653. /**
  654. * diag_cmd_data_free - Frees an lpfc dma buffer extension
  655. * @phba: Pointer to HBA context object.
  656. * @mlist: Pointer to an lpfc dma buffer extension.
  657. **/
  658. static int
  659. diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
  660. {
  661. struct lpfc_dmabufext *mlast;
  662. struct pci_dev *pcidev;
  663. struct list_head head, *curr, *next;
  664. if ((!mlist) || (!lpfc_is_link_up(phba) &&
  665. (phba->link_flag & LS_LOOPBACK_MODE))) {
  666. return 0;
  667. }
  668. pcidev = phba->pcidev;
  669. list_add_tail(&head, &mlist->dma.list);
  670. list_for_each_safe(curr, next, &head) {
  671. mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
  672. if (mlast->dma.virt)
  673. dma_free_coherent(&pcidev->dev,
  674. mlast->size,
  675. mlast->dma.virt,
  676. mlast->dma.phys);
  677. kfree(mlast);
  678. }
  679. return 0;
  680. }
  681. /**
  682. * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
  683. * @phba:
  684. * @pring:
  685. * @piocbq:
  686. *
  687. * This function is called when an unsolicited CT command is received. It
  688. * forwards the event to any processes registered to receive CT events.
  689. **/
  690. int
  691. lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
  692. struct lpfc_iocbq *piocbq)
  693. {
  694. uint32_t evt_req_id = 0;
  695. uint32_t cmd;
  696. uint32_t len;
  697. struct lpfc_dmabuf *dmabuf = NULL;
  698. struct lpfc_bsg_event *evt;
  699. struct event_data *evt_dat = NULL;
  700. struct lpfc_iocbq *iocbq;
  701. size_t offset = 0;
  702. struct list_head head;
  703. struct ulp_bde64 *bde;
  704. dma_addr_t dma_addr;
  705. int i;
  706. struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
  707. struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
  708. struct lpfc_hbq_entry *hbqe;
  709. struct lpfc_sli_ct_request *ct_req;
  710. struct fc_bsg_job *job = NULL;
  711. unsigned long flags;
  712. int size = 0;
  713. INIT_LIST_HEAD(&head);
  714. list_add_tail(&head, &piocbq->list);
  715. if (piocbq->iocb.ulpBdeCount == 0 ||
  716. piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
  717. goto error_ct_unsol_exit;
  718. if (phba->link_state == LPFC_HBA_ERROR ||
  719. (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
  720. goto error_ct_unsol_exit;
  721. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
  722. dmabuf = bdeBuf1;
  723. else {
  724. dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
  725. piocbq->iocb.un.cont64[0].addrLow);
  726. dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
  727. }
  728. if (dmabuf == NULL)
  729. goto error_ct_unsol_exit;
  730. ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
  731. evt_req_id = ct_req->FsType;
  732. cmd = ct_req->CommandResponse.bits.CmdRsp;
  733. len = ct_req->CommandResponse.bits.Size;
  734. if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
  735. lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
  736. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  737. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  738. if (!(evt->type_mask & FC_REG_CT_EVENT) ||
  739. evt->req_id != evt_req_id)
  740. continue;
  741. lpfc_bsg_event_ref(evt);
  742. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  743. evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
  744. if (evt_dat == NULL) {
  745. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  746. lpfc_bsg_event_unref(evt);
  747. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  748. "2614 Memory allocation failed for "
  749. "CT event\n");
  750. break;
  751. }
  752. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  753. /* take accumulated byte count from the last iocbq */
  754. iocbq = list_entry(head.prev, typeof(*iocbq), list);
  755. evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
  756. } else {
  757. list_for_each_entry(iocbq, &head, list) {
  758. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
  759. evt_dat->len +=
  760. iocbq->iocb.un.cont64[i].tus.f.bdeSize;
  761. }
  762. }
  763. evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
  764. if (evt_dat->data == NULL) {
  765. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  766. "2615 Memory allocation failed for "
  767. "CT event data, size %d\n",
  768. evt_dat->len);
  769. kfree(evt_dat);
  770. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  771. lpfc_bsg_event_unref(evt);
  772. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  773. goto error_ct_unsol_exit;
  774. }
  775. list_for_each_entry(iocbq, &head, list) {
  776. size = 0;
  777. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  778. bdeBuf1 = iocbq->context2;
  779. bdeBuf2 = iocbq->context3;
  780. }
  781. for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
  782. if (phba->sli3_options &
  783. LPFC_SLI3_HBQ_ENABLED) {
  784. if (i == 0) {
  785. hbqe = (struct lpfc_hbq_entry *)
  786. &iocbq->iocb.un.ulpWord[0];
  787. size = hbqe->bde.tus.f.bdeSize;
  788. dmabuf = bdeBuf1;
  789. } else if (i == 1) {
  790. hbqe = (struct lpfc_hbq_entry *)
  791. &iocbq->iocb.unsli3.
  792. sli3Words[4];
  793. size = hbqe->bde.tus.f.bdeSize;
  794. dmabuf = bdeBuf2;
  795. }
  796. if ((offset + size) > evt_dat->len)
  797. size = evt_dat->len - offset;
  798. } else {
  799. size = iocbq->iocb.un.cont64[i].
  800. tus.f.bdeSize;
  801. bde = &iocbq->iocb.un.cont64[i];
  802. dma_addr = getPaddr(bde->addrHigh,
  803. bde->addrLow);
  804. dmabuf = lpfc_sli_ringpostbuf_get(phba,
  805. pring, dma_addr);
  806. }
  807. if (!dmabuf) {
  808. lpfc_printf_log(phba, KERN_ERR,
  809. LOG_LIBDFC, "2616 No dmabuf "
  810. "found for iocbq 0x%p\n",
  811. iocbq);
  812. kfree(evt_dat->data);
  813. kfree(evt_dat);
  814. spin_lock_irqsave(&phba->ct_ev_lock,
  815. flags);
  816. lpfc_bsg_event_unref(evt);
  817. spin_unlock_irqrestore(
  818. &phba->ct_ev_lock, flags);
  819. goto error_ct_unsol_exit;
  820. }
  821. memcpy((char *)(evt_dat->data) + offset,
  822. dmabuf->virt, size);
  823. offset += size;
  824. if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
  825. !(phba->sli3_options &
  826. LPFC_SLI3_HBQ_ENABLED)) {
  827. lpfc_sli_ringpostbuf_put(phba, pring,
  828. dmabuf);
  829. } else {
  830. switch (cmd) {
  831. case ELX_LOOPBACK_DATA:
  832. diag_cmd_data_free(phba,
  833. (struct lpfc_dmabufext *)
  834. dmabuf);
  835. break;
  836. case ELX_LOOPBACK_XRI_SETUP:
  837. if ((phba->sli_rev ==
  838. LPFC_SLI_REV2) ||
  839. (phba->sli3_options &
  840. LPFC_SLI3_HBQ_ENABLED
  841. )) {
  842. lpfc_in_buf_free(phba,
  843. dmabuf);
  844. } else {
  845. lpfc_post_buffer(phba,
  846. pring,
  847. 1);
  848. }
  849. break;
  850. default:
  851. if (!(phba->sli3_options &
  852. LPFC_SLI3_HBQ_ENABLED))
  853. lpfc_post_buffer(phba,
  854. pring,
  855. 1);
  856. break;
  857. }
  858. }
  859. }
  860. }
  861. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  862. if (phba->sli_rev == LPFC_SLI_REV4) {
  863. evt_dat->immed_dat = phba->ctx_idx;
  864. phba->ctx_idx = (phba->ctx_idx + 1) % 64;
  865. /* Provide warning for over-run of the ct_ctx array */
  866. if (phba->ct_ctx[evt_dat->immed_dat].flags &
  867. UNSOL_VALID)
  868. lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
  869. "2717 CT context array entry "
  870. "[%d] over-run: oxid:x%x, "
  871. "sid:x%x\n", phba->ctx_idx,
  872. phba->ct_ctx[
  873. evt_dat->immed_dat].oxid,
  874. phba->ct_ctx[
  875. evt_dat->immed_dat].SID);
  876. phba->ct_ctx[evt_dat->immed_dat].oxid =
  877. piocbq->iocb.ulpContext;
  878. phba->ct_ctx[evt_dat->immed_dat].SID =
  879. piocbq->iocb.un.rcvels.remoteID;
  880. phba->ct_ctx[evt_dat->immed_dat].flags = UNSOL_VALID;
  881. } else
  882. evt_dat->immed_dat = piocbq->iocb.ulpContext;
  883. evt_dat->type = FC_REG_CT_EVENT;
  884. list_add(&evt_dat->node, &evt->events_to_see);
  885. if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
  886. wake_up_interruptible(&evt->wq);
  887. lpfc_bsg_event_unref(evt);
  888. break;
  889. }
  890. list_move(evt->events_to_see.prev, &evt->events_to_get);
  891. lpfc_bsg_event_unref(evt);
  892. job = evt->set_job;
  893. evt->set_job = NULL;
  894. if (job) {
  895. job->reply->reply_payload_rcv_len = size;
  896. /* make error code available to userspace */
  897. job->reply->result = 0;
  898. job->dd_data = NULL;
  899. /* complete the job back to userspace */
  900. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  901. job->job_done(job);
  902. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  903. }
  904. }
  905. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  906. error_ct_unsol_exit:
  907. if (!list_empty(&head))
  908. list_del(&head);
  909. if (evt_req_id == SLI_CT_ELX_LOOPBACK)
  910. return 0;
  911. return 1;
  912. }
  913. /**
  914. * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
  915. * @job: SET_EVENT fc_bsg_job
  916. **/
  917. static int
  918. lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
  919. {
  920. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  921. struct lpfc_hba *phba = vport->phba;
  922. struct set_ct_event *event_req;
  923. struct lpfc_bsg_event *evt;
  924. int rc = 0;
  925. struct bsg_job_data *dd_data = NULL;
  926. uint32_t ev_mask;
  927. unsigned long flags;
  928. if (job->request_len <
  929. sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
  930. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  931. "2612 Received SET_CT_EVENT below minimum "
  932. "size\n");
  933. rc = -EINVAL;
  934. goto job_error;
  935. }
  936. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  937. if (dd_data == NULL) {
  938. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  939. "2734 Failed allocation of dd_data\n");
  940. rc = -ENOMEM;
  941. goto job_error;
  942. }
  943. event_req = (struct set_ct_event *)
  944. job->request->rqst_data.h_vendor.vendor_cmd;
  945. ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
  946. FC_REG_EVENT_MASK);
  947. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  948. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  949. if (evt->reg_id == event_req->ev_reg_id) {
  950. lpfc_bsg_event_ref(evt);
  951. evt->wait_time_stamp = jiffies;
  952. break;
  953. }
  954. }
  955. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  956. if (&evt->node == &phba->ct_ev_waiters) {
  957. /* no event waiting struct yet - first call */
  958. evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
  959. event_req->ev_req_id);
  960. if (!evt) {
  961. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  962. "2617 Failed allocation of event "
  963. "waiter\n");
  964. rc = -ENOMEM;
  965. goto job_error;
  966. }
  967. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  968. list_add(&evt->node, &phba->ct_ev_waiters);
  969. lpfc_bsg_event_ref(evt);
  970. evt->wait_time_stamp = jiffies;
  971. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  972. }
  973. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  974. evt->waiting = 1;
  975. dd_data->type = TYPE_EVT;
  976. dd_data->context_un.evt = evt;
  977. evt->set_job = job; /* for unsolicited command */
  978. job->dd_data = dd_data; /* for fc transport timeout callback*/
  979. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  980. return 0; /* call job done later */
  981. job_error:
  982. if (dd_data != NULL)
  983. kfree(dd_data);
  984. job->dd_data = NULL;
  985. return rc;
  986. }
  987. /**
  988. * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
  989. * @job: GET_EVENT fc_bsg_job
  990. **/
  991. static int
  992. lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
  993. {
  994. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  995. struct lpfc_hba *phba = vport->phba;
  996. struct get_ct_event *event_req;
  997. struct get_ct_event_reply *event_reply;
  998. struct lpfc_bsg_event *evt;
  999. struct event_data *evt_dat = NULL;
  1000. unsigned long flags;
  1001. uint32_t rc = 0;
  1002. if (job->request_len <
  1003. sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
  1004. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1005. "2613 Received GET_CT_EVENT request below "
  1006. "minimum size\n");
  1007. rc = -EINVAL;
  1008. goto job_error;
  1009. }
  1010. event_req = (struct get_ct_event *)
  1011. job->request->rqst_data.h_vendor.vendor_cmd;
  1012. event_reply = (struct get_ct_event_reply *)
  1013. job->reply->reply_data.vendor_reply.vendor_rsp;
  1014. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1015. list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
  1016. if (evt->reg_id == event_req->ev_reg_id) {
  1017. if (list_empty(&evt->events_to_get))
  1018. break;
  1019. lpfc_bsg_event_ref(evt);
  1020. evt->wait_time_stamp = jiffies;
  1021. evt_dat = list_entry(evt->events_to_get.prev,
  1022. struct event_data, node);
  1023. list_del(&evt_dat->node);
  1024. break;
  1025. }
  1026. }
  1027. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1028. /* The app may continue to ask for event data until it gets
  1029. * an error indicating that there isn't anymore
  1030. */
  1031. if (evt_dat == NULL) {
  1032. job->reply->reply_payload_rcv_len = 0;
  1033. rc = -ENOENT;
  1034. goto job_error;
  1035. }
  1036. if (evt_dat->len > job->request_payload.payload_len) {
  1037. evt_dat->len = job->request_payload.payload_len;
  1038. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1039. "2618 Truncated event data at %d "
  1040. "bytes\n",
  1041. job->request_payload.payload_len);
  1042. }
  1043. event_reply->type = evt_dat->type;
  1044. event_reply->immed_data = evt_dat->immed_dat;
  1045. if (evt_dat->len > 0)
  1046. job->reply->reply_payload_rcv_len =
  1047. sg_copy_from_buffer(job->request_payload.sg_list,
  1048. job->request_payload.sg_cnt,
  1049. evt_dat->data, evt_dat->len);
  1050. else
  1051. job->reply->reply_payload_rcv_len = 0;
  1052. if (evt_dat) {
  1053. kfree(evt_dat->data);
  1054. kfree(evt_dat);
  1055. }
  1056. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1057. lpfc_bsg_event_unref(evt);
  1058. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1059. job->dd_data = NULL;
  1060. job->reply->result = 0;
  1061. job->job_done(job);
  1062. return 0;
  1063. job_error:
  1064. job->dd_data = NULL;
  1065. job->reply->result = rc;
  1066. return rc;
  1067. }
  1068. /**
  1069. * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
  1070. * @phba: Pointer to HBA context object.
  1071. * @cmdiocbq: Pointer to command iocb.
  1072. * @rspiocbq: Pointer to response iocb.
  1073. *
  1074. * This function is the completion handler for iocbs issued using
  1075. * lpfc_issue_ct_rsp_cmp function. This function is called by the
  1076. * ring event handler function without any lock held. This function
  1077. * can be called from both worker thread context and interrupt
  1078. * context. This function also can be called from other thread which
  1079. * cleans up the SLI layer objects.
  1080. * This function copy the contents of the response iocb to the
  1081. * response iocb memory object provided by the caller of
  1082. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  1083. * sleeps for the iocb completion.
  1084. **/
  1085. static void
  1086. lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
  1087. struct lpfc_iocbq *cmdiocbq,
  1088. struct lpfc_iocbq *rspiocbq)
  1089. {
  1090. struct bsg_job_data *dd_data;
  1091. struct fc_bsg_job *job;
  1092. IOCB_t *rsp;
  1093. struct lpfc_dmabuf *bmp;
  1094. struct lpfc_nodelist *ndlp;
  1095. unsigned long flags;
  1096. int rc = 0;
  1097. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1098. dd_data = cmdiocbq->context1;
  1099. /* normal completion and timeout crossed paths, already done */
  1100. if (!dd_data) {
  1101. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1102. return;
  1103. }
  1104. job = dd_data->context_un.iocb.set_job;
  1105. bmp = dd_data->context_un.iocb.bmp;
  1106. rsp = &rspiocbq->iocb;
  1107. ndlp = dd_data->context_un.iocb.ndlp;
  1108. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  1109. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1110. if (rsp->ulpStatus) {
  1111. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  1112. switch (rsp->un.ulpWord[4] & 0xff) {
  1113. case IOERR_SEQUENCE_TIMEOUT:
  1114. rc = -ETIMEDOUT;
  1115. break;
  1116. case IOERR_INVALID_RPI:
  1117. rc = -EFAULT;
  1118. break;
  1119. default:
  1120. rc = -EACCES;
  1121. break;
  1122. }
  1123. } else
  1124. rc = -EACCES;
  1125. } else
  1126. job->reply->reply_payload_rcv_len =
  1127. rsp->un.genreq64.bdl.bdeSize;
  1128. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  1129. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1130. lpfc_nlp_put(ndlp);
  1131. kfree(bmp);
  1132. kfree(dd_data);
  1133. /* make error code available to userspace */
  1134. job->reply->result = rc;
  1135. job->dd_data = NULL;
  1136. /* complete the job back to userspace */
  1137. job->job_done(job);
  1138. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1139. return;
  1140. }
  1141. /**
  1142. * lpfc_issue_ct_rsp - issue a ct response
  1143. * @phba: Pointer to HBA context object.
  1144. * @job: Pointer to the job object.
  1145. * @tag: tag index value into the ports context exchange array.
  1146. * @bmp: Pointer to a dma buffer descriptor.
  1147. * @num_entry: Number of enties in the bde.
  1148. **/
  1149. static int
  1150. lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
  1151. struct lpfc_dmabuf *bmp, int num_entry)
  1152. {
  1153. IOCB_t *icmd;
  1154. struct lpfc_iocbq *ctiocb = NULL;
  1155. int rc = 0;
  1156. struct lpfc_nodelist *ndlp = NULL;
  1157. struct bsg_job_data *dd_data;
  1158. uint32_t creg_val;
  1159. /* allocate our bsg tracking structure */
  1160. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  1161. if (!dd_data) {
  1162. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1163. "2736 Failed allocation of dd_data\n");
  1164. rc = -ENOMEM;
  1165. goto no_dd_data;
  1166. }
  1167. /* Allocate buffer for command iocb */
  1168. ctiocb = lpfc_sli_get_iocbq(phba);
  1169. if (!ctiocb) {
  1170. rc = ENOMEM;
  1171. goto no_ctiocb;
  1172. }
  1173. icmd = &ctiocb->iocb;
  1174. icmd->un.xseq64.bdl.ulpIoTag32 = 0;
  1175. icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  1176. icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
  1177. icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1178. icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
  1179. icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
  1180. icmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1181. icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
  1182. icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1183. /* Fill in rest of iocb */
  1184. icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  1185. icmd->ulpBdeCount = 1;
  1186. icmd->ulpLe = 1;
  1187. icmd->ulpClass = CLASS3;
  1188. if (phba->sli_rev == LPFC_SLI_REV4) {
  1189. /* Do not issue unsol response if oxid not marked as valid */
  1190. if (!(phba->ct_ctx[tag].flags & UNSOL_VALID)) {
  1191. rc = IOCB_ERROR;
  1192. goto issue_ct_rsp_exit;
  1193. }
  1194. icmd->ulpContext = phba->ct_ctx[tag].oxid;
  1195. ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
  1196. if (!ndlp) {
  1197. lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
  1198. "2721 ndlp null for oxid %x SID %x\n",
  1199. icmd->ulpContext,
  1200. phba->ct_ctx[tag].SID);
  1201. rc = IOCB_ERROR;
  1202. goto issue_ct_rsp_exit;
  1203. }
  1204. /* Check if the ndlp is active */
  1205. if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
  1206. rc = -IOCB_ERROR;
  1207. goto issue_ct_rsp_exit;
  1208. }
  1209. /* get a refernece count so the ndlp doesn't go away while
  1210. * we respond
  1211. */
  1212. if (!lpfc_nlp_get(ndlp)) {
  1213. rc = -IOCB_ERROR;
  1214. goto issue_ct_rsp_exit;
  1215. }
  1216. icmd->un.ulpWord[3] = ndlp->nlp_rpi;
  1217. /* The exchange is done, mark the entry as invalid */
  1218. phba->ct_ctx[tag].flags &= ~UNSOL_VALID;
  1219. } else
  1220. icmd->ulpContext = (ushort) tag;
  1221. icmd->ulpTimeout = phba->fc_ratov * 2;
  1222. /* Xmit CT response on exchange <xid> */
  1223. lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
  1224. "2722 Xmit CT response on exchange x%x Data: x%x x%x\n",
  1225. icmd->ulpContext, icmd->ulpIoTag, phba->link_state);
  1226. ctiocb->iocb_cmpl = NULL;
  1227. ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
  1228. ctiocb->vport = phba->pport;
  1229. ctiocb->context3 = bmp;
  1230. ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
  1231. ctiocb->context1 = dd_data;
  1232. ctiocb->context2 = NULL;
  1233. dd_data->type = TYPE_IOCB;
  1234. dd_data->context_un.iocb.cmdiocbq = ctiocb;
  1235. dd_data->context_un.iocb.rspiocbq = NULL;
  1236. dd_data->context_un.iocb.set_job = job;
  1237. dd_data->context_un.iocb.bmp = bmp;
  1238. dd_data->context_un.iocb.ndlp = ndlp;
  1239. if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
  1240. creg_val = readl(phba->HCregaddr);
  1241. creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
  1242. writel(creg_val, phba->HCregaddr);
  1243. readl(phba->HCregaddr); /* flush */
  1244. }
  1245. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
  1246. if (rc == IOCB_SUCCESS)
  1247. return 0; /* done for now */
  1248. issue_ct_rsp_exit:
  1249. lpfc_sli_release_iocbq(phba, ctiocb);
  1250. no_ctiocb:
  1251. kfree(dd_data);
  1252. no_dd_data:
  1253. return rc;
  1254. }
  1255. /**
  1256. * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
  1257. * @job: SEND_MGMT_RESP fc_bsg_job
  1258. **/
  1259. static int
  1260. lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
  1261. {
  1262. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1263. struct lpfc_hba *phba = vport->phba;
  1264. struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
  1265. job->request->rqst_data.h_vendor.vendor_cmd;
  1266. struct ulp_bde64 *bpl;
  1267. struct lpfc_dmabuf *bmp = NULL;
  1268. struct scatterlist *sgel = NULL;
  1269. int request_nseg;
  1270. int numbde;
  1271. dma_addr_t busaddr;
  1272. uint32_t tag = mgmt_resp->tag;
  1273. unsigned long reqbfrcnt =
  1274. (unsigned long)job->request_payload.payload_len;
  1275. int rc = 0;
  1276. /* in case no data is transferred */
  1277. job->reply->reply_payload_rcv_len = 0;
  1278. if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
  1279. rc = -ERANGE;
  1280. goto send_mgmt_rsp_exit;
  1281. }
  1282. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1283. if (!bmp) {
  1284. rc = -ENOMEM;
  1285. goto send_mgmt_rsp_exit;
  1286. }
  1287. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  1288. if (!bmp->virt) {
  1289. rc = -ENOMEM;
  1290. goto send_mgmt_rsp_free_bmp;
  1291. }
  1292. INIT_LIST_HEAD(&bmp->list);
  1293. bpl = (struct ulp_bde64 *) bmp->virt;
  1294. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  1295. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1296. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  1297. busaddr = sg_dma_address(sgel);
  1298. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  1299. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  1300. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  1301. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  1302. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  1303. bpl++;
  1304. }
  1305. rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
  1306. if (rc == IOCB_SUCCESS)
  1307. return 0; /* done for now */
  1308. /* TBD need to handle a timeout */
  1309. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  1310. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  1311. rc = -EACCES;
  1312. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  1313. send_mgmt_rsp_free_bmp:
  1314. kfree(bmp);
  1315. send_mgmt_rsp_exit:
  1316. /* make error code available to userspace */
  1317. job->reply->result = rc;
  1318. job->dd_data = NULL;
  1319. return rc;
  1320. }
  1321. /**
  1322. * lpfc_bsg_diag_mode - process a LPFC_BSG_VENDOR_DIAG_MODE bsg vendor command
  1323. * @job: LPFC_BSG_VENDOR_DIAG_MODE
  1324. *
  1325. * This function is responsible for placing a port into diagnostic loopback
  1326. * mode in order to perform a diagnostic loopback test.
  1327. * All new scsi requests are blocked, a small delay is used to allow the
  1328. * scsi requests to complete then the link is brought down. If the link is
  1329. * is placed in loopback mode then scsi requests are again allowed
  1330. * so the scsi mid-layer doesn't give up on the port.
  1331. * All of this is done in-line.
  1332. */
  1333. static int
  1334. lpfc_bsg_diag_mode(struct fc_bsg_job *job)
  1335. {
  1336. struct Scsi_Host *shost = job->shost;
  1337. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1338. struct lpfc_hba *phba = vport->phba;
  1339. struct diag_mode_set *loopback_mode;
  1340. struct lpfc_sli *psli = &phba->sli;
  1341. struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
  1342. uint32_t link_flags;
  1343. uint32_t timeout;
  1344. struct lpfc_vport **vports;
  1345. LPFC_MBOXQ_t *pmboxq;
  1346. int mbxstatus;
  1347. int i = 0;
  1348. int rc = 0;
  1349. /* no data to return just the return code */
  1350. job->reply->reply_payload_rcv_len = 0;
  1351. if (job->request_len <
  1352. sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_set)) {
  1353. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1354. "2738 Received DIAG MODE request below minimum "
  1355. "size\n");
  1356. rc = -EINVAL;
  1357. goto job_error;
  1358. }
  1359. loopback_mode = (struct diag_mode_set *)
  1360. job->request->rqst_data.h_vendor.vendor_cmd;
  1361. link_flags = loopback_mode->type;
  1362. timeout = loopback_mode->timeout;
  1363. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1364. (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
  1365. (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
  1366. rc = -EACCES;
  1367. goto job_error;
  1368. }
  1369. pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1370. if (!pmboxq) {
  1371. rc = -ENOMEM;
  1372. goto job_error;
  1373. }
  1374. vports = lpfc_create_vport_work_array(phba);
  1375. if (vports) {
  1376. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  1377. shost = lpfc_shost_from_vport(vports[i]);
  1378. scsi_block_requests(shost);
  1379. }
  1380. lpfc_destroy_vport_work_array(phba, vports);
  1381. } else {
  1382. shost = lpfc_shost_from_vport(phba->pport);
  1383. scsi_block_requests(shost);
  1384. }
  1385. while (pring->txcmplq_cnt) {
  1386. if (i++ > 500) /* wait up to 5 seconds */
  1387. break;
  1388. msleep(10);
  1389. }
  1390. memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  1391. pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
  1392. pmboxq->u.mb.mbxOwner = OWN_HOST;
  1393. mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
  1394. if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
  1395. /* wait for link down before proceeding */
  1396. i = 0;
  1397. while (phba->link_state != LPFC_LINK_DOWN) {
  1398. if (i++ > timeout) {
  1399. rc = -ETIMEDOUT;
  1400. goto loopback_mode_exit;
  1401. }
  1402. msleep(10);
  1403. }
  1404. memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  1405. if (link_flags == INTERNAL_LOOP_BACK)
  1406. pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
  1407. else
  1408. pmboxq->u.mb.un.varInitLnk.link_flags =
  1409. FLAGS_TOPOLOGY_MODE_LOOP;
  1410. pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
  1411. pmboxq->u.mb.mbxOwner = OWN_HOST;
  1412. mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
  1413. LPFC_MBOX_TMO);
  1414. if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
  1415. rc = -ENODEV;
  1416. else {
  1417. phba->link_flag |= LS_LOOPBACK_MODE;
  1418. /* wait for the link attention interrupt */
  1419. msleep(100);
  1420. i = 0;
  1421. while (phba->link_state != LPFC_HBA_READY) {
  1422. if (i++ > timeout) {
  1423. rc = -ETIMEDOUT;
  1424. break;
  1425. }
  1426. msleep(10);
  1427. }
  1428. }
  1429. } else
  1430. rc = -ENODEV;
  1431. loopback_mode_exit:
  1432. vports = lpfc_create_vport_work_array(phba);
  1433. if (vports) {
  1434. for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
  1435. shost = lpfc_shost_from_vport(vports[i]);
  1436. scsi_unblock_requests(shost);
  1437. }
  1438. lpfc_destroy_vport_work_array(phba, vports);
  1439. } else {
  1440. shost = lpfc_shost_from_vport(phba->pport);
  1441. scsi_unblock_requests(shost);
  1442. }
  1443. /*
  1444. * Let SLI layer release mboxq if mbox command completed after timeout.
  1445. */
  1446. if (mbxstatus != MBX_TIMEOUT)
  1447. mempool_free(pmboxq, phba->mbox_mem_pool);
  1448. job_error:
  1449. /* make error code available to userspace */
  1450. job->reply->result = rc;
  1451. /* complete the job back to userspace if no error */
  1452. if (rc == 0)
  1453. job->job_done(job);
  1454. return rc;
  1455. }
  1456. /**
  1457. * lpfcdiag_loop_self_reg - obtains a remote port login id
  1458. * @phba: Pointer to HBA context object
  1459. * @rpi: Pointer to a remote port login id
  1460. *
  1461. * This function obtains a remote port login id so the diag loopback test
  1462. * can send and receive its own unsolicited CT command.
  1463. **/
  1464. static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t * rpi)
  1465. {
  1466. LPFC_MBOXQ_t *mbox;
  1467. struct lpfc_dmabuf *dmabuff;
  1468. int status;
  1469. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1470. if (!mbox)
  1471. return ENOMEM;
  1472. status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
  1473. (uint8_t *)&phba->pport->fc_sparam, mbox, 0);
  1474. if (status) {
  1475. mempool_free(mbox, phba->mbox_mem_pool);
  1476. return ENOMEM;
  1477. }
  1478. dmabuff = (struct lpfc_dmabuf *) mbox->context1;
  1479. mbox->context1 = NULL;
  1480. status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
  1481. if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
  1482. lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
  1483. kfree(dmabuff);
  1484. if (status != MBX_TIMEOUT)
  1485. mempool_free(mbox, phba->mbox_mem_pool);
  1486. return ENODEV;
  1487. }
  1488. *rpi = mbox->u.mb.un.varWords[0];
  1489. lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
  1490. kfree(dmabuff);
  1491. mempool_free(mbox, phba->mbox_mem_pool);
  1492. return 0;
  1493. }
  1494. /**
  1495. * lpfcdiag_loop_self_unreg - unregs from the rpi
  1496. * @phba: Pointer to HBA context object
  1497. * @rpi: Remote port login id
  1498. *
  1499. * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
  1500. **/
  1501. static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
  1502. {
  1503. LPFC_MBOXQ_t *mbox;
  1504. int status;
  1505. /* Allocate mboxq structure */
  1506. mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  1507. if (mbox == NULL)
  1508. return ENOMEM;
  1509. lpfc_unreg_login(phba, 0, rpi, mbox);
  1510. status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
  1511. if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
  1512. if (status != MBX_TIMEOUT)
  1513. mempool_free(mbox, phba->mbox_mem_pool);
  1514. return EIO;
  1515. }
  1516. mempool_free(mbox, phba->mbox_mem_pool);
  1517. return 0;
  1518. }
  1519. /**
  1520. * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
  1521. * @phba: Pointer to HBA context object
  1522. * @rpi: Remote port login id
  1523. * @txxri: Pointer to transmit exchange id
  1524. * @rxxri: Pointer to response exchabge id
  1525. *
  1526. * This function obtains the transmit and receive ids required to send
  1527. * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
  1528. * flags are used to the unsolicted response handler is able to process
  1529. * the ct command sent on the same port.
  1530. **/
  1531. static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
  1532. uint16_t *txxri, uint16_t * rxxri)
  1533. {
  1534. struct lpfc_bsg_event *evt;
  1535. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  1536. IOCB_t *cmd, *rsp;
  1537. struct lpfc_dmabuf *dmabuf;
  1538. struct ulp_bde64 *bpl = NULL;
  1539. struct lpfc_sli_ct_request *ctreq = NULL;
  1540. int ret_val = 0;
  1541. unsigned long flags;
  1542. *txxri = 0;
  1543. *rxxri = 0;
  1544. evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
  1545. SLI_CT_ELX_LOOPBACK);
  1546. if (!evt)
  1547. return ENOMEM;
  1548. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1549. list_add(&evt->node, &phba->ct_ev_waiters);
  1550. lpfc_bsg_event_ref(evt);
  1551. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1552. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1553. rspiocbq = lpfc_sli_get_iocbq(phba);
  1554. dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1555. if (dmabuf) {
  1556. dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
  1557. if (dmabuf->virt) {
  1558. INIT_LIST_HEAD(&dmabuf->list);
  1559. bpl = (struct ulp_bde64 *) dmabuf->virt;
  1560. memset(bpl, 0, sizeof(*bpl));
  1561. ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
  1562. bpl->addrHigh =
  1563. le32_to_cpu(putPaddrHigh(dmabuf->phys +
  1564. sizeof(*bpl)));
  1565. bpl->addrLow =
  1566. le32_to_cpu(putPaddrLow(dmabuf->phys +
  1567. sizeof(*bpl)));
  1568. bpl->tus.f.bdeFlags = 0;
  1569. bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
  1570. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  1571. }
  1572. }
  1573. if (cmdiocbq == NULL || rspiocbq == NULL ||
  1574. dmabuf == NULL || bpl == NULL || ctreq == NULL ||
  1575. dmabuf->virt == NULL) {
  1576. ret_val = ENOMEM;
  1577. goto err_get_xri_exit;
  1578. }
  1579. cmd = &cmdiocbq->iocb;
  1580. rsp = &rspiocbq->iocb;
  1581. memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
  1582. ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
  1583. ctreq->RevisionId.bits.InId = 0;
  1584. ctreq->FsType = SLI_CT_ELX_LOOPBACK;
  1585. ctreq->FsSubType = 0;
  1586. ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
  1587. ctreq->CommandResponse.bits.Size = 0;
  1588. cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
  1589. cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
  1590. cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  1591. cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
  1592. cmd->un.xseq64.w5.hcsw.Fctl = LA;
  1593. cmd->un.xseq64.w5.hcsw.Dfctl = 0;
  1594. cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  1595. cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  1596. cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
  1597. cmd->ulpBdeCount = 1;
  1598. cmd->ulpLe = 1;
  1599. cmd->ulpClass = CLASS3;
  1600. cmd->ulpContext = rpi;
  1601. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  1602. cmdiocbq->vport = phba->pport;
  1603. ret_val = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
  1604. rspiocbq,
  1605. (phba->fc_ratov * 2)
  1606. + LPFC_DRVR_TIMEOUT);
  1607. if (ret_val)
  1608. goto err_get_xri_exit;
  1609. *txxri = rsp->ulpContext;
  1610. evt->waiting = 1;
  1611. evt->wait_time_stamp = jiffies;
  1612. ret_val = wait_event_interruptible_timeout(
  1613. evt->wq, !list_empty(&evt->events_to_see),
  1614. ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
  1615. if (list_empty(&evt->events_to_see))
  1616. ret_val = (ret_val) ? EINTR : ETIMEDOUT;
  1617. else {
  1618. ret_val = IOCB_SUCCESS;
  1619. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1620. list_move(evt->events_to_see.prev, &evt->events_to_get);
  1621. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1622. *rxxri = (list_entry(evt->events_to_get.prev,
  1623. typeof(struct event_data),
  1624. node))->immed_dat;
  1625. }
  1626. evt->waiting = 0;
  1627. err_get_xri_exit:
  1628. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1629. lpfc_bsg_event_unref(evt); /* release ref */
  1630. lpfc_bsg_event_unref(evt); /* delete */
  1631. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1632. if (dmabuf) {
  1633. if (dmabuf->virt)
  1634. lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
  1635. kfree(dmabuf);
  1636. }
  1637. if (cmdiocbq && (ret_val != IOCB_TIMEDOUT))
  1638. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1639. if (rspiocbq)
  1640. lpfc_sli_release_iocbq(phba, rspiocbq);
  1641. return ret_val;
  1642. }
  1643. /**
  1644. * diag_cmd_data_alloc - fills in a bde struct with dma buffers
  1645. * @phba: Pointer to HBA context object
  1646. * @bpl: Pointer to 64 bit bde structure
  1647. * @size: Number of bytes to process
  1648. * @nocopydata: Flag to copy user data into the allocated buffer
  1649. *
  1650. * This function allocates page size buffers and populates an lpfc_dmabufext.
  1651. * If allowed the user data pointed to with indataptr is copied into the kernel
  1652. * memory. The chained list of page size buffers is returned.
  1653. **/
  1654. static struct lpfc_dmabufext *
  1655. diag_cmd_data_alloc(struct lpfc_hba *phba,
  1656. struct ulp_bde64 *bpl, uint32_t size,
  1657. int nocopydata)
  1658. {
  1659. struct lpfc_dmabufext *mlist = NULL;
  1660. struct lpfc_dmabufext *dmp;
  1661. int cnt, offset = 0, i = 0;
  1662. struct pci_dev *pcidev;
  1663. pcidev = phba->pcidev;
  1664. while (size) {
  1665. /* We get chunks of 4K */
  1666. if (size > BUF_SZ_4K)
  1667. cnt = BUF_SZ_4K;
  1668. else
  1669. cnt = size;
  1670. /* allocate struct lpfc_dmabufext buffer header */
  1671. dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
  1672. if (!dmp)
  1673. goto out;
  1674. INIT_LIST_HEAD(&dmp->dma.list);
  1675. /* Queue it to a linked list */
  1676. if (mlist)
  1677. list_add_tail(&dmp->dma.list, &mlist->dma.list);
  1678. else
  1679. mlist = dmp;
  1680. /* allocate buffer */
  1681. dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
  1682. cnt,
  1683. &(dmp->dma.phys),
  1684. GFP_KERNEL);
  1685. if (!dmp->dma.virt)
  1686. goto out;
  1687. dmp->size = cnt;
  1688. if (nocopydata) {
  1689. bpl->tus.f.bdeFlags = 0;
  1690. pci_dma_sync_single_for_device(phba->pcidev,
  1691. dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
  1692. } else {
  1693. memset((uint8_t *)dmp->dma.virt, 0, cnt);
  1694. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  1695. }
  1696. /* build buffer ptr list for IOCB */
  1697. bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
  1698. bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
  1699. bpl->tus.f.bdeSize = (ushort) cnt;
  1700. bpl->tus.w = le32_to_cpu(bpl->tus.w);
  1701. bpl++;
  1702. i++;
  1703. offset += cnt;
  1704. size -= cnt;
  1705. }
  1706. mlist->flag = i;
  1707. return mlist;
  1708. out:
  1709. diag_cmd_data_free(phba, mlist);
  1710. return NULL;
  1711. }
  1712. /**
  1713. * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
  1714. * @phba: Pointer to HBA context object
  1715. * @rxxri: Receive exchange id
  1716. * @len: Number of data bytes
  1717. *
  1718. * This function allocates and posts a data buffer of sufficient size to recieve
  1719. * an unsolicted CT command.
  1720. **/
  1721. static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
  1722. size_t len)
  1723. {
  1724. struct lpfc_sli *psli = &phba->sli;
  1725. struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
  1726. struct lpfc_iocbq *cmdiocbq;
  1727. IOCB_t *cmd = NULL;
  1728. struct list_head head, *curr, *next;
  1729. struct lpfc_dmabuf *rxbmp;
  1730. struct lpfc_dmabuf *dmp;
  1731. struct lpfc_dmabuf *mp[2] = {NULL, NULL};
  1732. struct ulp_bde64 *rxbpl = NULL;
  1733. uint32_t num_bde;
  1734. struct lpfc_dmabufext *rxbuffer = NULL;
  1735. int ret_val = 0;
  1736. int i = 0;
  1737. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1738. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1739. if (rxbmp != NULL) {
  1740. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  1741. if (rxbmp->virt) {
  1742. INIT_LIST_HEAD(&rxbmp->list);
  1743. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  1744. rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
  1745. }
  1746. }
  1747. if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
  1748. ret_val = ENOMEM;
  1749. goto err_post_rxbufs_exit;
  1750. }
  1751. /* Queue buffers for the receive exchange */
  1752. num_bde = (uint32_t)rxbuffer->flag;
  1753. dmp = &rxbuffer->dma;
  1754. cmd = &cmdiocbq->iocb;
  1755. i = 0;
  1756. INIT_LIST_HEAD(&head);
  1757. list_add_tail(&head, &dmp->list);
  1758. list_for_each_safe(curr, next, &head) {
  1759. mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
  1760. list_del(curr);
  1761. if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
  1762. mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
  1763. cmd->un.quexri64cx.buff.bde.addrHigh =
  1764. putPaddrHigh(mp[i]->phys);
  1765. cmd->un.quexri64cx.buff.bde.addrLow =
  1766. putPaddrLow(mp[i]->phys);
  1767. cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
  1768. ((struct lpfc_dmabufext *)mp[i])->size;
  1769. cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
  1770. cmd->ulpCommand = CMD_QUE_XRI64_CX;
  1771. cmd->ulpPU = 0;
  1772. cmd->ulpLe = 1;
  1773. cmd->ulpBdeCount = 1;
  1774. cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
  1775. } else {
  1776. cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
  1777. cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
  1778. cmd->un.cont64[i].tus.f.bdeSize =
  1779. ((struct lpfc_dmabufext *)mp[i])->size;
  1780. cmd->ulpBdeCount = ++i;
  1781. if ((--num_bde > 0) && (i < 2))
  1782. continue;
  1783. cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
  1784. cmd->ulpLe = 1;
  1785. }
  1786. cmd->ulpClass = CLASS3;
  1787. cmd->ulpContext = rxxri;
  1788. ret_val = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
  1789. if (ret_val == IOCB_ERROR) {
  1790. diag_cmd_data_free(phba,
  1791. (struct lpfc_dmabufext *)mp[0]);
  1792. if (mp[1])
  1793. diag_cmd_data_free(phba,
  1794. (struct lpfc_dmabufext *)mp[1]);
  1795. dmp = list_entry(next, struct lpfc_dmabuf, list);
  1796. ret_val = EIO;
  1797. goto err_post_rxbufs_exit;
  1798. }
  1799. lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
  1800. if (mp[1]) {
  1801. lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
  1802. mp[1] = NULL;
  1803. }
  1804. /* The iocb was freed by lpfc_sli_issue_iocb */
  1805. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1806. if (!cmdiocbq) {
  1807. dmp = list_entry(next, struct lpfc_dmabuf, list);
  1808. ret_val = EIO;
  1809. goto err_post_rxbufs_exit;
  1810. }
  1811. cmd = &cmdiocbq->iocb;
  1812. i = 0;
  1813. }
  1814. list_del(&head);
  1815. err_post_rxbufs_exit:
  1816. if (rxbmp) {
  1817. if (rxbmp->virt)
  1818. lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
  1819. kfree(rxbmp);
  1820. }
  1821. if (cmdiocbq)
  1822. lpfc_sli_release_iocbq(phba, cmdiocbq);
  1823. return ret_val;
  1824. }
  1825. /**
  1826. * lpfc_bsg_diag_test - with a port in loopback issues a Ct cmd to itself
  1827. * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
  1828. *
  1829. * This function receives a user data buffer to be transmitted and received on
  1830. * the same port, the link must be up and in loopback mode prior
  1831. * to being called.
  1832. * 1. A kernel buffer is allocated to copy the user data into.
  1833. * 2. The port registers with "itself".
  1834. * 3. The transmit and receive exchange ids are obtained.
  1835. * 4. The receive exchange id is posted.
  1836. * 5. A new els loopback event is created.
  1837. * 6. The command and response iocbs are allocated.
  1838. * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
  1839. *
  1840. * This function is meant to be called n times while the port is in loopback
  1841. * so it is the apps responsibility to issue a reset to take the port out
  1842. * of loopback mode.
  1843. **/
  1844. static int
  1845. lpfc_bsg_diag_test(struct fc_bsg_job *job)
  1846. {
  1847. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  1848. struct lpfc_hba *phba = vport->phba;
  1849. struct diag_mode_test *diag_mode;
  1850. struct lpfc_bsg_event *evt;
  1851. struct event_data *evdat;
  1852. struct lpfc_sli *psli = &phba->sli;
  1853. uint32_t size;
  1854. uint32_t full_size;
  1855. size_t segment_len = 0, segment_offset = 0, current_offset = 0;
  1856. uint16_t rpi;
  1857. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  1858. IOCB_t *cmd, *rsp;
  1859. struct lpfc_sli_ct_request *ctreq;
  1860. struct lpfc_dmabuf *txbmp;
  1861. struct ulp_bde64 *txbpl = NULL;
  1862. struct lpfc_dmabufext *txbuffer = NULL;
  1863. struct list_head head;
  1864. struct lpfc_dmabuf *curr;
  1865. uint16_t txxri, rxxri;
  1866. uint32_t num_bde;
  1867. uint8_t *ptr = NULL, *rx_databuf = NULL;
  1868. int rc = 0;
  1869. unsigned long flags;
  1870. void *dataout = NULL;
  1871. uint32_t total_mem;
  1872. /* in case no data is returned return just the return code */
  1873. job->reply->reply_payload_rcv_len = 0;
  1874. if (job->request_len <
  1875. sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
  1876. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  1877. "2739 Received DIAG TEST request below minimum "
  1878. "size\n");
  1879. rc = -EINVAL;
  1880. goto loopback_test_exit;
  1881. }
  1882. if (job->request_payload.payload_len !=
  1883. job->reply_payload.payload_len) {
  1884. rc = -EINVAL;
  1885. goto loopback_test_exit;
  1886. }
  1887. diag_mode = (struct diag_mode_test *)
  1888. job->request->rqst_data.h_vendor.vendor_cmd;
  1889. if ((phba->link_state == LPFC_HBA_ERROR) ||
  1890. (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
  1891. (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
  1892. rc = -EACCES;
  1893. goto loopback_test_exit;
  1894. }
  1895. if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
  1896. rc = -EACCES;
  1897. goto loopback_test_exit;
  1898. }
  1899. size = job->request_payload.payload_len;
  1900. full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
  1901. if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
  1902. rc = -ERANGE;
  1903. goto loopback_test_exit;
  1904. }
  1905. if (size >= BUF_SZ_4K) {
  1906. /*
  1907. * Allocate memory for ioctl data. If buffer is bigger than 64k,
  1908. * then we allocate 64k and re-use that buffer over and over to
  1909. * xfer the whole block. This is because Linux kernel has a
  1910. * problem allocating more than 120k of kernel space memory. Saw
  1911. * problem with GET_FCPTARGETMAPPING...
  1912. */
  1913. if (size <= (64 * 1024))
  1914. total_mem = size;
  1915. else
  1916. total_mem = 64 * 1024;
  1917. } else
  1918. /* Allocate memory for ioctl data */
  1919. total_mem = BUF_SZ_4K;
  1920. dataout = kmalloc(total_mem, GFP_KERNEL);
  1921. if (dataout == NULL) {
  1922. rc = -ENOMEM;
  1923. goto loopback_test_exit;
  1924. }
  1925. ptr = dataout;
  1926. ptr += ELX_LOOPBACK_HEADER_SZ;
  1927. sg_copy_to_buffer(job->request_payload.sg_list,
  1928. job->request_payload.sg_cnt,
  1929. ptr, size);
  1930. rc = lpfcdiag_loop_self_reg(phba, &rpi);
  1931. if (rc) {
  1932. rc = -ENOMEM;
  1933. goto loopback_test_exit;
  1934. }
  1935. rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
  1936. if (rc) {
  1937. lpfcdiag_loop_self_unreg(phba, rpi);
  1938. rc = -ENOMEM;
  1939. goto loopback_test_exit;
  1940. }
  1941. rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
  1942. if (rc) {
  1943. lpfcdiag_loop_self_unreg(phba, rpi);
  1944. rc = -ENOMEM;
  1945. goto loopback_test_exit;
  1946. }
  1947. evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
  1948. SLI_CT_ELX_LOOPBACK);
  1949. if (!evt) {
  1950. lpfcdiag_loop_self_unreg(phba, rpi);
  1951. rc = -ENOMEM;
  1952. goto loopback_test_exit;
  1953. }
  1954. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  1955. list_add(&evt->node, &phba->ct_ev_waiters);
  1956. lpfc_bsg_event_ref(evt);
  1957. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  1958. cmdiocbq = lpfc_sli_get_iocbq(phba);
  1959. rspiocbq = lpfc_sli_get_iocbq(phba);
  1960. txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  1961. if (txbmp) {
  1962. txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
  1963. if (txbmp->virt) {
  1964. INIT_LIST_HEAD(&txbmp->list);
  1965. txbpl = (struct ulp_bde64 *) txbmp->virt;
  1966. txbuffer = diag_cmd_data_alloc(phba,
  1967. txbpl, full_size, 0);
  1968. }
  1969. }
  1970. if (!cmdiocbq || !rspiocbq || !txbmp || !txbpl || !txbuffer ||
  1971. !txbmp->virt) {
  1972. rc = -ENOMEM;
  1973. goto err_loopback_test_exit;
  1974. }
  1975. cmd = &cmdiocbq->iocb;
  1976. rsp = &rspiocbq->iocb;
  1977. INIT_LIST_HEAD(&head);
  1978. list_add_tail(&head, &txbuffer->dma.list);
  1979. list_for_each_entry(curr, &head, list) {
  1980. segment_len = ((struct lpfc_dmabufext *)curr)->size;
  1981. if (current_offset == 0) {
  1982. ctreq = curr->virt;
  1983. memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
  1984. ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
  1985. ctreq->RevisionId.bits.InId = 0;
  1986. ctreq->FsType = SLI_CT_ELX_LOOPBACK;
  1987. ctreq->FsSubType = 0;
  1988. ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
  1989. ctreq->CommandResponse.bits.Size = size;
  1990. segment_offset = ELX_LOOPBACK_HEADER_SZ;
  1991. } else
  1992. segment_offset = 0;
  1993. BUG_ON(segment_offset >= segment_len);
  1994. memcpy(curr->virt + segment_offset,
  1995. ptr + current_offset,
  1996. segment_len - segment_offset);
  1997. current_offset += segment_len - segment_offset;
  1998. BUG_ON(current_offset > size);
  1999. }
  2000. list_del(&head);
  2001. /* Build the XMIT_SEQUENCE iocb */
  2002. num_bde = (uint32_t)txbuffer->flag;
  2003. cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
  2004. cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
  2005. cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  2006. cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
  2007. cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
  2008. cmd->un.xseq64.w5.hcsw.Dfctl = 0;
  2009. cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
  2010. cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
  2011. cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
  2012. cmd->ulpBdeCount = 1;
  2013. cmd->ulpLe = 1;
  2014. cmd->ulpClass = CLASS3;
  2015. cmd->ulpContext = txxri;
  2016. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  2017. cmdiocbq->vport = phba->pport;
  2018. rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
  2019. (phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT);
  2020. if ((rc != IOCB_SUCCESS) || (rsp->ulpStatus != IOCB_SUCCESS)) {
  2021. rc = -EIO;
  2022. goto err_loopback_test_exit;
  2023. }
  2024. evt->waiting = 1;
  2025. rc = wait_event_interruptible_timeout(
  2026. evt->wq, !list_empty(&evt->events_to_see),
  2027. ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
  2028. evt->waiting = 0;
  2029. if (list_empty(&evt->events_to_see))
  2030. rc = (rc) ? -EINTR : -ETIMEDOUT;
  2031. else {
  2032. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2033. list_move(evt->events_to_see.prev, &evt->events_to_get);
  2034. evdat = list_entry(evt->events_to_get.prev,
  2035. typeof(*evdat), node);
  2036. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2037. rx_databuf = evdat->data;
  2038. if (evdat->len != full_size) {
  2039. lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
  2040. "1603 Loopback test did not receive expected "
  2041. "data length. actual length 0x%x expected "
  2042. "length 0x%x\n",
  2043. evdat->len, full_size);
  2044. rc = -EIO;
  2045. } else if (rx_databuf == NULL)
  2046. rc = -EIO;
  2047. else {
  2048. rc = IOCB_SUCCESS;
  2049. /* skip over elx loopback header */
  2050. rx_databuf += ELX_LOOPBACK_HEADER_SZ;
  2051. job->reply->reply_payload_rcv_len =
  2052. sg_copy_from_buffer(job->reply_payload.sg_list,
  2053. job->reply_payload.sg_cnt,
  2054. rx_databuf, size);
  2055. job->reply->reply_payload_rcv_len = size;
  2056. }
  2057. }
  2058. err_loopback_test_exit:
  2059. lpfcdiag_loop_self_unreg(phba, rpi);
  2060. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2061. lpfc_bsg_event_unref(evt); /* release ref */
  2062. lpfc_bsg_event_unref(evt); /* delete */
  2063. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2064. if (cmdiocbq != NULL)
  2065. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2066. if (rspiocbq != NULL)
  2067. lpfc_sli_release_iocbq(phba, rspiocbq);
  2068. if (txbmp != NULL) {
  2069. if (txbpl != NULL) {
  2070. if (txbuffer != NULL)
  2071. diag_cmd_data_free(phba, txbuffer);
  2072. lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
  2073. }
  2074. kfree(txbmp);
  2075. }
  2076. loopback_test_exit:
  2077. kfree(dataout);
  2078. /* make error code available to userspace */
  2079. job->reply->result = rc;
  2080. job->dd_data = NULL;
  2081. /* complete the job back to userspace if no error */
  2082. if (rc == 0)
  2083. job->job_done(job);
  2084. return rc;
  2085. }
  2086. /**
  2087. * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
  2088. * @job: GET_DFC_REV fc_bsg_job
  2089. **/
  2090. static int
  2091. lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
  2092. {
  2093. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2094. struct lpfc_hba *phba = vport->phba;
  2095. struct get_mgmt_rev *event_req;
  2096. struct get_mgmt_rev_reply *event_reply;
  2097. int rc = 0;
  2098. if (job->request_len <
  2099. sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
  2100. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2101. "2740 Received GET_DFC_REV request below "
  2102. "minimum size\n");
  2103. rc = -EINVAL;
  2104. goto job_error;
  2105. }
  2106. event_req = (struct get_mgmt_rev *)
  2107. job->request->rqst_data.h_vendor.vendor_cmd;
  2108. event_reply = (struct get_mgmt_rev_reply *)
  2109. job->reply->reply_data.vendor_reply.vendor_rsp;
  2110. if (job->reply_len <
  2111. sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
  2112. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2113. "2741 Received GET_DFC_REV reply below "
  2114. "minimum size\n");
  2115. rc = -EINVAL;
  2116. goto job_error;
  2117. }
  2118. event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
  2119. event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
  2120. job_error:
  2121. job->reply->result = rc;
  2122. if (rc == 0)
  2123. job->job_done(job);
  2124. return rc;
  2125. }
  2126. /**
  2127. * lpfc_bsg_wake_mbox_wait - lpfc_bsg_issue_mbox mbox completion handler
  2128. * @phba: Pointer to HBA context object.
  2129. * @pmboxq: Pointer to mailbox command.
  2130. *
  2131. * This is completion handler function for mailbox commands issued from
  2132. * lpfc_bsg_issue_mbox function. This function is called by the
  2133. * mailbox event handler function with no lock held. This function
  2134. * will wake up thread waiting on the wait queue pointed by context1
  2135. * of the mailbox.
  2136. **/
  2137. void
  2138. lpfc_bsg_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
  2139. {
  2140. struct bsg_job_data *dd_data;
  2141. struct fc_bsg_job *job;
  2142. uint32_t size;
  2143. unsigned long flags;
  2144. uint8_t *to;
  2145. uint8_t *from;
  2146. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2147. dd_data = pmboxq->context1;
  2148. /* job already timed out? */
  2149. if (!dd_data) {
  2150. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2151. return;
  2152. }
  2153. /* build the outgoing buffer to do an sg copy
  2154. * the format is the response mailbox followed by any extended
  2155. * mailbox data
  2156. */
  2157. from = (uint8_t *)&pmboxq->u.mb;
  2158. to = (uint8_t *)dd_data->context_un.mbox.mb;
  2159. memcpy(to, from, sizeof(MAILBOX_t));
  2160. if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS) {
  2161. /* copy the extended data if any, count is in words */
  2162. if (dd_data->context_un.mbox.outExtWLen) {
  2163. from = (uint8_t *)dd_data->context_un.mbox.ext;
  2164. to += sizeof(MAILBOX_t);
  2165. size = dd_data->context_un.mbox.outExtWLen *
  2166. sizeof(uint32_t);
  2167. memcpy(to, from, size);
  2168. } else if (pmboxq->u.mb.mbxCommand == MBX_RUN_BIU_DIAG64) {
  2169. from = (uint8_t *)dd_data->context_un.mbox.
  2170. dmp->dma.virt;
  2171. to += sizeof(MAILBOX_t);
  2172. size = dd_data->context_un.mbox.dmp->size;
  2173. memcpy(to, from, size);
  2174. } else if ((phba->sli_rev == LPFC_SLI_REV4) &&
  2175. (pmboxq->u.mb.mbxCommand == MBX_DUMP_MEMORY)) {
  2176. from = (uint8_t *)dd_data->context_un.mbox.dmp->dma.
  2177. virt;
  2178. to += sizeof(MAILBOX_t);
  2179. size = pmboxq->u.mb.un.varWords[5];
  2180. memcpy(to, from, size);
  2181. } else if (pmboxq->u.mb.mbxCommand == MBX_READ_EVENT_LOG) {
  2182. from = (uint8_t *)dd_data->context_un.
  2183. mbox.dmp->dma.virt;
  2184. to += sizeof(MAILBOX_t);
  2185. size = dd_data->context_un.mbox.dmp->size;
  2186. memcpy(to, from, size);
  2187. }
  2188. }
  2189. from = (uint8_t *)dd_data->context_un.mbox.mb;
  2190. job = dd_data->context_un.mbox.set_job;
  2191. size = job->reply_payload.payload_len;
  2192. job->reply->reply_payload_rcv_len =
  2193. sg_copy_from_buffer(job->reply_payload.sg_list,
  2194. job->reply_payload.sg_cnt,
  2195. from, size);
  2196. job->reply->result = 0;
  2197. dd_data->context_un.mbox.set_job = NULL;
  2198. job->dd_data = NULL;
  2199. job->job_done(job);
  2200. /* need to hold the lock until we call job done to hold off
  2201. * the timeout handler returning to the midlayer while
  2202. * we are stillprocessing the job
  2203. */
  2204. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2205. kfree(dd_data->context_un.mbox.mb);
  2206. mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
  2207. kfree(dd_data->context_un.mbox.ext);
  2208. if (dd_data->context_un.mbox.dmp) {
  2209. dma_free_coherent(&phba->pcidev->dev,
  2210. dd_data->context_un.mbox.dmp->size,
  2211. dd_data->context_un.mbox.dmp->dma.virt,
  2212. dd_data->context_un.mbox.dmp->dma.phys);
  2213. kfree(dd_data->context_un.mbox.dmp);
  2214. }
  2215. if (dd_data->context_un.mbox.rxbmp) {
  2216. lpfc_mbuf_free(phba, dd_data->context_un.mbox.rxbmp->virt,
  2217. dd_data->context_un.mbox.rxbmp->phys);
  2218. kfree(dd_data->context_un.mbox.rxbmp);
  2219. }
  2220. kfree(dd_data);
  2221. return;
  2222. }
  2223. /**
  2224. * lpfc_bsg_check_cmd_access - test for a supported mailbox command
  2225. * @phba: Pointer to HBA context object.
  2226. * @mb: Pointer to a mailbox object.
  2227. * @vport: Pointer to a vport object.
  2228. *
  2229. * Some commands require the port to be offline, some may not be called from
  2230. * the application.
  2231. **/
  2232. static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
  2233. MAILBOX_t *mb, struct lpfc_vport *vport)
  2234. {
  2235. /* return negative error values for bsg job */
  2236. switch (mb->mbxCommand) {
  2237. /* Offline only */
  2238. case MBX_INIT_LINK:
  2239. case MBX_DOWN_LINK:
  2240. case MBX_CONFIG_LINK:
  2241. case MBX_CONFIG_RING:
  2242. case MBX_RESET_RING:
  2243. case MBX_UNREG_LOGIN:
  2244. case MBX_CLEAR_LA:
  2245. case MBX_DUMP_CONTEXT:
  2246. case MBX_RUN_DIAGS:
  2247. case MBX_RESTART:
  2248. case MBX_SET_MASK:
  2249. if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
  2250. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2251. "2743 Command 0x%x is illegal in on-line "
  2252. "state\n",
  2253. mb->mbxCommand);
  2254. return -EPERM;
  2255. }
  2256. case MBX_WRITE_NV:
  2257. case MBX_WRITE_VPARMS:
  2258. case MBX_LOAD_SM:
  2259. case MBX_READ_NV:
  2260. case MBX_READ_CONFIG:
  2261. case MBX_READ_RCONFIG:
  2262. case MBX_READ_STATUS:
  2263. case MBX_READ_XRI:
  2264. case MBX_READ_REV:
  2265. case MBX_READ_LNK_STAT:
  2266. case MBX_DUMP_MEMORY:
  2267. case MBX_DOWN_LOAD:
  2268. case MBX_UPDATE_CFG:
  2269. case MBX_KILL_BOARD:
  2270. case MBX_LOAD_AREA:
  2271. case MBX_LOAD_EXP_ROM:
  2272. case MBX_BEACON:
  2273. case MBX_DEL_LD_ENTRY:
  2274. case MBX_SET_DEBUG:
  2275. case MBX_WRITE_WWN:
  2276. case MBX_SLI4_CONFIG:
  2277. case MBX_READ_EVENT_LOG:
  2278. case MBX_READ_EVENT_LOG_STATUS:
  2279. case MBX_WRITE_EVENT_LOG:
  2280. case MBX_PORT_CAPABILITIES:
  2281. case MBX_PORT_IOV_CONTROL:
  2282. case MBX_RUN_BIU_DIAG64:
  2283. break;
  2284. case MBX_SET_VARIABLE:
  2285. lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
  2286. "1226 mbox: set_variable 0x%x, 0x%x\n",
  2287. mb->un.varWords[0],
  2288. mb->un.varWords[1]);
  2289. if ((mb->un.varWords[0] == SETVAR_MLOMNT)
  2290. && (mb->un.varWords[1] == 1)) {
  2291. phba->wait_4_mlo_maint_flg = 1;
  2292. } else if (mb->un.varWords[0] == SETVAR_MLORST) {
  2293. phba->link_flag &= ~LS_LOOPBACK_MODE;
  2294. phba->fc_topology = TOPOLOGY_PT_PT;
  2295. }
  2296. break;
  2297. case MBX_READ_SPARM64:
  2298. case MBX_READ_LA:
  2299. case MBX_READ_LA64:
  2300. case MBX_REG_LOGIN:
  2301. case MBX_REG_LOGIN64:
  2302. case MBX_CONFIG_PORT:
  2303. case MBX_RUN_BIU_DIAG:
  2304. default:
  2305. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2306. "2742 Unknown Command 0x%x\n",
  2307. mb->mbxCommand);
  2308. return -EPERM;
  2309. }
  2310. return 0; /* ok */
  2311. }
  2312. /**
  2313. * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
  2314. * @phba: Pointer to HBA context object.
  2315. * @mb: Pointer to a mailbox object.
  2316. * @vport: Pointer to a vport object.
  2317. *
  2318. * Allocate a tracking object, mailbox command memory, get a mailbox
  2319. * from the mailbox pool, copy the caller mailbox command.
  2320. *
  2321. * If offline and the sli is active we need to poll for the command (port is
  2322. * being reset) and com-plete the job, otherwise issue the mailbox command and
  2323. * let our completion handler finish the command.
  2324. **/
  2325. static uint32_t
  2326. lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
  2327. struct lpfc_vport *vport)
  2328. {
  2329. LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
  2330. MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
  2331. /* a 4k buffer to hold the mb and extended data from/to the bsg */
  2332. MAILBOX_t *mb = NULL;
  2333. struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
  2334. uint32_t size;
  2335. struct lpfc_dmabuf *rxbmp = NULL; /* for biu diag */
  2336. struct lpfc_dmabufext *dmp = NULL; /* for biu diag */
  2337. struct ulp_bde64 *rxbpl = NULL;
  2338. struct dfc_mbox_req *mbox_req = (struct dfc_mbox_req *)
  2339. job->request->rqst_data.h_vendor.vendor_cmd;
  2340. uint8_t *ext = NULL;
  2341. int rc = 0;
  2342. uint8_t *from;
  2343. /* in case no data is transferred */
  2344. job->reply->reply_payload_rcv_len = 0;
  2345. /* check if requested extended data lengths are valid */
  2346. if ((mbox_req->inExtWLen > MAILBOX_EXT_SIZE) ||
  2347. (mbox_req->outExtWLen > MAILBOX_EXT_SIZE)) {
  2348. rc = -ERANGE;
  2349. goto job_done;
  2350. }
  2351. /* allocate our bsg tracking structure */
  2352. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  2353. if (!dd_data) {
  2354. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2355. "2727 Failed allocation of dd_data\n");
  2356. rc = -ENOMEM;
  2357. goto job_done;
  2358. }
  2359. mb = kzalloc(BSG_MBOX_SIZE, GFP_KERNEL);
  2360. if (!mb) {
  2361. rc = -ENOMEM;
  2362. goto job_done;
  2363. }
  2364. pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
  2365. if (!pmboxq) {
  2366. rc = -ENOMEM;
  2367. goto job_done;
  2368. }
  2369. memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
  2370. size = job->request_payload.payload_len;
  2371. sg_copy_to_buffer(job->request_payload.sg_list,
  2372. job->request_payload.sg_cnt,
  2373. mb, size);
  2374. rc = lpfc_bsg_check_cmd_access(phba, mb, vport);
  2375. if (rc != 0)
  2376. goto job_done; /* must be negative */
  2377. pmb = &pmboxq->u.mb;
  2378. memcpy(pmb, mb, sizeof(*pmb));
  2379. pmb->mbxOwner = OWN_HOST;
  2380. pmboxq->vport = vport;
  2381. /* If HBA encountered an error attention, allow only DUMP
  2382. * or RESTART mailbox commands until the HBA is restarted.
  2383. */
  2384. if (phba->pport->stopped &&
  2385. pmb->mbxCommand != MBX_DUMP_MEMORY &&
  2386. pmb->mbxCommand != MBX_RESTART &&
  2387. pmb->mbxCommand != MBX_WRITE_VPARMS &&
  2388. pmb->mbxCommand != MBX_WRITE_WWN)
  2389. lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
  2390. "2797 mbox: Issued mailbox cmd "
  2391. "0x%x while in stopped state.\n",
  2392. pmb->mbxCommand);
  2393. /* Don't allow mailbox commands to be sent when blocked
  2394. * or when in the middle of discovery
  2395. */
  2396. if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
  2397. rc = -EAGAIN;
  2398. goto job_done;
  2399. }
  2400. /* extended mailbox commands will need an extended buffer */
  2401. if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
  2402. ext = kzalloc(MAILBOX_EXT_SIZE, GFP_KERNEL);
  2403. if (!ext) {
  2404. rc = -ENOMEM;
  2405. goto job_done;
  2406. }
  2407. /* any data for the device? */
  2408. if (mbox_req->inExtWLen) {
  2409. from = (uint8_t *)mb;
  2410. from += sizeof(MAILBOX_t);
  2411. memcpy((uint8_t *)ext, from,
  2412. mbox_req->inExtWLen * sizeof(uint32_t));
  2413. }
  2414. pmboxq->context2 = ext;
  2415. pmboxq->in_ext_byte_len =
  2416. mbox_req->inExtWLen *
  2417. sizeof(uint32_t);
  2418. pmboxq->out_ext_byte_len =
  2419. mbox_req->outExtWLen *
  2420. sizeof(uint32_t);
  2421. pmboxq->mbox_offset_word =
  2422. mbox_req->mbOffset;
  2423. pmboxq->context2 = ext;
  2424. pmboxq->in_ext_byte_len =
  2425. mbox_req->inExtWLen * sizeof(uint32_t);
  2426. pmboxq->out_ext_byte_len =
  2427. mbox_req->outExtWLen * sizeof(uint32_t);
  2428. pmboxq->mbox_offset_word = mbox_req->mbOffset;
  2429. }
  2430. /* biu diag will need a kernel buffer to transfer the data
  2431. * allocate our own buffer and setup the mailbox command to
  2432. * use ours
  2433. */
  2434. if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
  2435. uint32_t transmit_length = pmb->un.varWords[1];
  2436. uint32_t receive_length = pmb->un.varWords[4];
  2437. /* transmit length cannot be greater than receive length or
  2438. * mailbox extension size
  2439. */
  2440. if ((transmit_length > receive_length) ||
  2441. (transmit_length > MAILBOX_EXT_SIZE)) {
  2442. rc = -ERANGE;
  2443. goto job_done;
  2444. }
  2445. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2446. if (!rxbmp) {
  2447. rc = -ENOMEM;
  2448. goto job_done;
  2449. }
  2450. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2451. if (!rxbmp->virt) {
  2452. rc = -ENOMEM;
  2453. goto job_done;
  2454. }
  2455. INIT_LIST_HEAD(&rxbmp->list);
  2456. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2457. dmp = diag_cmd_data_alloc(phba, rxbpl, transmit_length, 0);
  2458. if (!dmp) {
  2459. rc = -ENOMEM;
  2460. goto job_done;
  2461. }
  2462. INIT_LIST_HEAD(&dmp->dma.list);
  2463. pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
  2464. putPaddrHigh(dmp->dma.phys);
  2465. pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
  2466. putPaddrLow(dmp->dma.phys);
  2467. pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
  2468. putPaddrHigh(dmp->dma.phys +
  2469. pmb->un.varBIUdiag.un.s2.
  2470. xmit_bde64.tus.f.bdeSize);
  2471. pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
  2472. putPaddrLow(dmp->dma.phys +
  2473. pmb->un.varBIUdiag.un.s2.
  2474. xmit_bde64.tus.f.bdeSize);
  2475. /* copy the transmit data found in the mailbox extension area */
  2476. from = (uint8_t *)mb;
  2477. from += sizeof(MAILBOX_t);
  2478. memcpy((uint8_t *)dmp->dma.virt, from, transmit_length);
  2479. } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
  2480. struct READ_EVENT_LOG_VAR *rdEventLog =
  2481. &pmb->un.varRdEventLog ;
  2482. uint32_t receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
  2483. uint32_t mode = bf_get(lpfc_event_log, rdEventLog);
  2484. /* receive length cannot be greater than mailbox
  2485. * extension size
  2486. */
  2487. if (receive_length > MAILBOX_EXT_SIZE) {
  2488. rc = -ERANGE;
  2489. goto job_done;
  2490. }
  2491. /* mode zero uses a bde like biu diags command */
  2492. if (mode == 0) {
  2493. /* rebuild the command for sli4 using our own buffers
  2494. * like we do for biu diags
  2495. */
  2496. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2497. if (!rxbmp) {
  2498. rc = -ENOMEM;
  2499. goto job_done;
  2500. }
  2501. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2502. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2503. if (rxbpl) {
  2504. INIT_LIST_HEAD(&rxbmp->list);
  2505. dmp = diag_cmd_data_alloc(phba, rxbpl,
  2506. receive_length, 0);
  2507. }
  2508. if (!dmp) {
  2509. rc = -ENOMEM;
  2510. goto job_done;
  2511. }
  2512. INIT_LIST_HEAD(&dmp->dma.list);
  2513. pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
  2514. pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
  2515. }
  2516. } else if (phba->sli_rev == LPFC_SLI_REV4) {
  2517. if (pmb->mbxCommand == MBX_DUMP_MEMORY) {
  2518. /* rebuild the command for sli4 using our own buffers
  2519. * like we do for biu diags
  2520. */
  2521. uint32_t receive_length = pmb->un.varWords[2];
  2522. /* receive length cannot be greater than mailbox
  2523. * extension size
  2524. */
  2525. if ((receive_length == 0) ||
  2526. (receive_length > MAILBOX_EXT_SIZE)) {
  2527. rc = -ERANGE;
  2528. goto job_done;
  2529. }
  2530. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2531. if (!rxbmp) {
  2532. rc = -ENOMEM;
  2533. goto job_done;
  2534. }
  2535. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2536. if (!rxbmp->virt) {
  2537. rc = -ENOMEM;
  2538. goto job_done;
  2539. }
  2540. INIT_LIST_HEAD(&rxbmp->list);
  2541. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2542. dmp = diag_cmd_data_alloc(phba, rxbpl, receive_length,
  2543. 0);
  2544. if (!dmp) {
  2545. rc = -ENOMEM;
  2546. goto job_done;
  2547. }
  2548. INIT_LIST_HEAD(&dmp->dma.list);
  2549. pmb->un.varWords[3] = putPaddrLow(dmp->dma.phys);
  2550. pmb->un.varWords[4] = putPaddrHigh(dmp->dma.phys);
  2551. } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
  2552. pmb->un.varUpdateCfg.co) {
  2553. struct ulp_bde64 *bde =
  2554. (struct ulp_bde64 *)&pmb->un.varWords[4];
  2555. /* bde size cannot be greater than mailbox ext size */
  2556. if (bde->tus.f.bdeSize > MAILBOX_EXT_SIZE) {
  2557. rc = -ERANGE;
  2558. goto job_done;
  2559. }
  2560. rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2561. if (!rxbmp) {
  2562. rc = -ENOMEM;
  2563. goto job_done;
  2564. }
  2565. rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
  2566. if (!rxbmp->virt) {
  2567. rc = -ENOMEM;
  2568. goto job_done;
  2569. }
  2570. INIT_LIST_HEAD(&rxbmp->list);
  2571. rxbpl = (struct ulp_bde64 *) rxbmp->virt;
  2572. dmp = diag_cmd_data_alloc(phba, rxbpl,
  2573. bde->tus.f.bdeSize, 0);
  2574. if (!dmp) {
  2575. rc = -ENOMEM;
  2576. goto job_done;
  2577. }
  2578. INIT_LIST_HEAD(&dmp->dma.list);
  2579. bde->addrHigh = putPaddrHigh(dmp->dma.phys);
  2580. bde->addrLow = putPaddrLow(dmp->dma.phys);
  2581. /* copy the transmit data found in the mailbox
  2582. * extension area
  2583. */
  2584. from = (uint8_t *)mb;
  2585. from += sizeof(MAILBOX_t);
  2586. memcpy((uint8_t *)dmp->dma.virt, from,
  2587. bde->tus.f.bdeSize);
  2588. }
  2589. }
  2590. dd_data->context_un.mbox.rxbmp = rxbmp;
  2591. dd_data->context_un.mbox.dmp = dmp;
  2592. /* setup wake call as IOCB callback */
  2593. pmboxq->mbox_cmpl = lpfc_bsg_wake_mbox_wait;
  2594. /* setup context field to pass wait_queue pointer to wake function */
  2595. pmboxq->context1 = dd_data;
  2596. dd_data->type = TYPE_MBOX;
  2597. dd_data->context_un.mbox.pmboxq = pmboxq;
  2598. dd_data->context_un.mbox.mb = mb;
  2599. dd_data->context_un.mbox.set_job = job;
  2600. dd_data->context_un.mbox.ext = ext;
  2601. dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
  2602. dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
  2603. dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
  2604. job->dd_data = dd_data;
  2605. if ((vport->fc_flag & FC_OFFLINE_MODE) ||
  2606. (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
  2607. rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
  2608. if (rc != MBX_SUCCESS) {
  2609. rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
  2610. goto job_done;
  2611. }
  2612. /* job finished, copy the data */
  2613. memcpy(mb, pmb, sizeof(*pmb));
  2614. job->reply->reply_payload_rcv_len =
  2615. sg_copy_from_buffer(job->reply_payload.sg_list,
  2616. job->reply_payload.sg_cnt,
  2617. mb, size);
  2618. /* not waiting mbox already done */
  2619. rc = 0;
  2620. goto job_done;
  2621. }
  2622. rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
  2623. if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
  2624. return 1; /* job started */
  2625. job_done:
  2626. /* common exit for error or job completed inline */
  2627. kfree(mb);
  2628. if (pmboxq)
  2629. mempool_free(pmboxq, phba->mbox_mem_pool);
  2630. kfree(ext);
  2631. if (dmp) {
  2632. dma_free_coherent(&phba->pcidev->dev,
  2633. dmp->size, dmp->dma.virt,
  2634. dmp->dma.phys);
  2635. kfree(dmp);
  2636. }
  2637. if (rxbmp) {
  2638. lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
  2639. kfree(rxbmp);
  2640. }
  2641. kfree(dd_data);
  2642. return rc;
  2643. }
  2644. /**
  2645. * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
  2646. * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
  2647. **/
  2648. static int
  2649. lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
  2650. {
  2651. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2652. struct lpfc_hba *phba = vport->phba;
  2653. int rc = 0;
  2654. /* in case no data is transferred */
  2655. job->reply->reply_payload_rcv_len = 0;
  2656. if (job->request_len <
  2657. sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
  2658. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2659. "2737 Received MBOX_REQ request below "
  2660. "minimum size\n");
  2661. rc = -EINVAL;
  2662. goto job_error;
  2663. }
  2664. if (job->request_payload.payload_len != BSG_MBOX_SIZE) {
  2665. rc = -EINVAL;
  2666. goto job_error;
  2667. }
  2668. if (job->reply_payload.payload_len != BSG_MBOX_SIZE) {
  2669. rc = -EINVAL;
  2670. goto job_error;
  2671. }
  2672. if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
  2673. rc = -EAGAIN;
  2674. goto job_error;
  2675. }
  2676. rc = lpfc_bsg_issue_mbox(phba, job, vport);
  2677. job_error:
  2678. if (rc == 0) {
  2679. /* job done */
  2680. job->reply->result = 0;
  2681. job->dd_data = NULL;
  2682. job->job_done(job);
  2683. } else if (rc == 1)
  2684. /* job submitted, will complete later*/
  2685. rc = 0; /* return zero, no error */
  2686. else {
  2687. /* some error occurred */
  2688. job->reply->result = rc;
  2689. job->dd_data = NULL;
  2690. }
  2691. return rc;
  2692. }
  2693. /**
  2694. * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
  2695. * @phba: Pointer to HBA context object.
  2696. * @cmdiocbq: Pointer to command iocb.
  2697. * @rspiocbq: Pointer to response iocb.
  2698. *
  2699. * This function is the completion handler for iocbs issued using
  2700. * lpfc_menlo_cmd function. This function is called by the
  2701. * ring event handler function without any lock held. This function
  2702. * can be called from both worker thread context and interrupt
  2703. * context. This function also can be called from another thread which
  2704. * cleans up the SLI layer objects.
  2705. * This function copies the contents of the response iocb to the
  2706. * response iocb memory object provided by the caller of
  2707. * lpfc_sli_issue_iocb_wait and then wakes up the thread which
  2708. * sleeps for the iocb completion.
  2709. **/
  2710. static void
  2711. lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
  2712. struct lpfc_iocbq *cmdiocbq,
  2713. struct lpfc_iocbq *rspiocbq)
  2714. {
  2715. struct bsg_job_data *dd_data;
  2716. struct fc_bsg_job *job;
  2717. IOCB_t *rsp;
  2718. struct lpfc_dmabuf *bmp;
  2719. struct lpfc_bsg_menlo *menlo;
  2720. unsigned long flags;
  2721. struct menlo_response *menlo_resp;
  2722. int rc = 0;
  2723. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  2724. dd_data = cmdiocbq->context1;
  2725. if (!dd_data) {
  2726. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2727. return;
  2728. }
  2729. menlo = &dd_data->context_un.menlo;
  2730. job = menlo->set_job;
  2731. job->dd_data = NULL; /* so timeout handler does not reply */
  2732. spin_lock_irqsave(&phba->hbalock, flags);
  2733. cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
  2734. if (cmdiocbq->context2 && rspiocbq)
  2735. memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
  2736. &rspiocbq->iocb, sizeof(IOCB_t));
  2737. spin_unlock_irqrestore(&phba->hbalock, flags);
  2738. bmp = menlo->bmp;
  2739. rspiocbq = menlo->rspiocbq;
  2740. rsp = &rspiocbq->iocb;
  2741. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  2742. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2743. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  2744. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2745. /* always return the xri, this would be used in the case
  2746. * of a menlo download to allow the data to be sent as a continuation
  2747. * of the exchange.
  2748. */
  2749. menlo_resp = (struct menlo_response *)
  2750. job->reply->reply_data.vendor_reply.vendor_rsp;
  2751. menlo_resp->xri = rsp->ulpContext;
  2752. if (rsp->ulpStatus) {
  2753. if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
  2754. switch (rsp->un.ulpWord[4] & 0xff) {
  2755. case IOERR_SEQUENCE_TIMEOUT:
  2756. rc = -ETIMEDOUT;
  2757. break;
  2758. case IOERR_INVALID_RPI:
  2759. rc = -EFAULT;
  2760. break;
  2761. default:
  2762. rc = -EACCES;
  2763. break;
  2764. }
  2765. } else
  2766. rc = -EACCES;
  2767. } else
  2768. job->reply->reply_payload_rcv_len =
  2769. rsp->un.genreq64.bdl.bdeSize;
  2770. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  2771. lpfc_sli_release_iocbq(phba, rspiocbq);
  2772. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2773. kfree(bmp);
  2774. kfree(dd_data);
  2775. /* make error code available to userspace */
  2776. job->reply->result = rc;
  2777. /* complete the job back to userspace */
  2778. job->job_done(job);
  2779. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  2780. return;
  2781. }
  2782. /**
  2783. * lpfc_menlo_cmd - send an ioctl for menlo hardware
  2784. * @job: fc_bsg_job to handle
  2785. *
  2786. * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
  2787. * all the command completions will return the xri for the command.
  2788. * For menlo data requests a gen request 64 CX is used to continue the exchange
  2789. * supplied in the menlo request header xri field.
  2790. **/
  2791. static int
  2792. lpfc_menlo_cmd(struct fc_bsg_job *job)
  2793. {
  2794. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  2795. struct lpfc_hba *phba = vport->phba;
  2796. struct lpfc_iocbq *cmdiocbq, *rspiocbq;
  2797. IOCB_t *cmd, *rsp;
  2798. int rc = 0;
  2799. struct menlo_command *menlo_cmd;
  2800. struct menlo_response *menlo_resp;
  2801. struct lpfc_dmabuf *bmp = NULL;
  2802. int request_nseg;
  2803. int reply_nseg;
  2804. struct scatterlist *sgel = NULL;
  2805. int numbde;
  2806. dma_addr_t busaddr;
  2807. struct bsg_job_data *dd_data;
  2808. struct ulp_bde64 *bpl = NULL;
  2809. /* in case no data is returned return just the return code */
  2810. job->reply->reply_payload_rcv_len = 0;
  2811. if (job->request_len <
  2812. sizeof(struct fc_bsg_request) +
  2813. sizeof(struct menlo_command)) {
  2814. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2815. "2784 Received MENLO_CMD request below "
  2816. "minimum size\n");
  2817. rc = -ERANGE;
  2818. goto no_dd_data;
  2819. }
  2820. if (job->reply_len <
  2821. sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
  2822. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2823. "2785 Received MENLO_CMD reply below "
  2824. "minimum size\n");
  2825. rc = -ERANGE;
  2826. goto no_dd_data;
  2827. }
  2828. if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
  2829. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2830. "2786 Adapter does not support menlo "
  2831. "commands\n");
  2832. rc = -EPERM;
  2833. goto no_dd_data;
  2834. }
  2835. menlo_cmd = (struct menlo_command *)
  2836. job->request->rqst_data.h_vendor.vendor_cmd;
  2837. menlo_resp = (struct menlo_response *)
  2838. job->reply->reply_data.vendor_reply.vendor_rsp;
  2839. /* allocate our bsg tracking structure */
  2840. dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
  2841. if (!dd_data) {
  2842. lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
  2843. "2787 Failed allocation of dd_data\n");
  2844. rc = -ENOMEM;
  2845. goto no_dd_data;
  2846. }
  2847. bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
  2848. if (!bmp) {
  2849. rc = -ENOMEM;
  2850. goto free_dd;
  2851. }
  2852. cmdiocbq = lpfc_sli_get_iocbq(phba);
  2853. if (!cmdiocbq) {
  2854. rc = -ENOMEM;
  2855. goto free_bmp;
  2856. }
  2857. rspiocbq = lpfc_sli_get_iocbq(phba);
  2858. if (!rspiocbq) {
  2859. rc = -ENOMEM;
  2860. goto free_cmdiocbq;
  2861. }
  2862. rsp = &rspiocbq->iocb;
  2863. bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
  2864. if (!bmp->virt) {
  2865. rc = -ENOMEM;
  2866. goto free_rspiocbq;
  2867. }
  2868. INIT_LIST_HEAD(&bmp->list);
  2869. bpl = (struct ulp_bde64 *) bmp->virt;
  2870. request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
  2871. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2872. for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
  2873. busaddr = sg_dma_address(sgel);
  2874. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
  2875. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  2876. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  2877. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  2878. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  2879. bpl++;
  2880. }
  2881. reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
  2882. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2883. for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
  2884. busaddr = sg_dma_address(sgel);
  2885. bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
  2886. bpl->tus.f.bdeSize = sg_dma_len(sgel);
  2887. bpl->tus.w = cpu_to_le32(bpl->tus.w);
  2888. bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
  2889. bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
  2890. bpl++;
  2891. }
  2892. cmd = &cmdiocbq->iocb;
  2893. cmd->un.genreq64.bdl.ulpIoTag32 = 0;
  2894. cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
  2895. cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
  2896. cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
  2897. cmd->un.genreq64.bdl.bdeSize =
  2898. (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
  2899. cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
  2900. cmd->un.genreq64.w5.hcsw.Dfctl = 0;
  2901. cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
  2902. cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
  2903. cmd->ulpBdeCount = 1;
  2904. cmd->ulpClass = CLASS3;
  2905. cmd->ulpOwner = OWN_CHIP;
  2906. cmd->ulpLe = 1; /* Limited Edition */
  2907. cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
  2908. cmdiocbq->vport = phba->pport;
  2909. /* We want the firmware to timeout before we do */
  2910. cmd->ulpTimeout = MENLO_TIMEOUT - 5;
  2911. cmdiocbq->context3 = bmp;
  2912. cmdiocbq->context2 = rspiocbq;
  2913. cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
  2914. cmdiocbq->context1 = dd_data;
  2915. cmdiocbq->context2 = rspiocbq;
  2916. if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
  2917. cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
  2918. cmd->ulpPU = MENLO_PU; /* 3 */
  2919. cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
  2920. cmd->ulpContext = MENLO_CONTEXT; /* 0 */
  2921. } else {
  2922. cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
  2923. cmd->ulpPU = 1;
  2924. cmd->un.ulpWord[4] = 0;
  2925. cmd->ulpContext = menlo_cmd->xri;
  2926. }
  2927. dd_data->type = TYPE_MENLO;
  2928. dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
  2929. dd_data->context_un.menlo.rspiocbq = rspiocbq;
  2930. dd_data->context_un.menlo.set_job = job;
  2931. dd_data->context_un.menlo.bmp = bmp;
  2932. rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
  2933. MENLO_TIMEOUT - 5);
  2934. if (rc == IOCB_SUCCESS)
  2935. return 0; /* done for now */
  2936. /* iocb failed so cleanup */
  2937. pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
  2938. job->request_payload.sg_cnt, DMA_TO_DEVICE);
  2939. pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
  2940. job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
  2941. lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
  2942. free_rspiocbq:
  2943. lpfc_sli_release_iocbq(phba, rspiocbq);
  2944. free_cmdiocbq:
  2945. lpfc_sli_release_iocbq(phba, cmdiocbq);
  2946. free_bmp:
  2947. kfree(bmp);
  2948. free_dd:
  2949. kfree(dd_data);
  2950. no_dd_data:
  2951. /* make error code available to userspace */
  2952. job->reply->result = rc;
  2953. job->dd_data = NULL;
  2954. return rc;
  2955. }
  2956. /**
  2957. * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  2958. * @job: fc_bsg_job to handle
  2959. **/
  2960. static int
  2961. lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
  2962. {
  2963. int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
  2964. int rc;
  2965. switch (command) {
  2966. case LPFC_BSG_VENDOR_SET_CT_EVENT:
  2967. rc = lpfc_bsg_hba_set_event(job);
  2968. break;
  2969. case LPFC_BSG_VENDOR_GET_CT_EVENT:
  2970. rc = lpfc_bsg_hba_get_event(job);
  2971. break;
  2972. case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
  2973. rc = lpfc_bsg_send_mgmt_rsp(job);
  2974. break;
  2975. case LPFC_BSG_VENDOR_DIAG_MODE:
  2976. rc = lpfc_bsg_diag_mode(job);
  2977. break;
  2978. case LPFC_BSG_VENDOR_DIAG_TEST:
  2979. rc = lpfc_bsg_diag_test(job);
  2980. break;
  2981. case LPFC_BSG_VENDOR_GET_MGMT_REV:
  2982. rc = lpfc_bsg_get_dfc_rev(job);
  2983. break;
  2984. case LPFC_BSG_VENDOR_MBOX:
  2985. rc = lpfc_bsg_mbox_cmd(job);
  2986. break;
  2987. case LPFC_BSG_VENDOR_MENLO_CMD:
  2988. case LPFC_BSG_VENDOR_MENLO_DATA:
  2989. rc = lpfc_menlo_cmd(job);
  2990. break;
  2991. default:
  2992. rc = -EINVAL;
  2993. job->reply->reply_payload_rcv_len = 0;
  2994. /* make error code available to userspace */
  2995. job->reply->result = rc;
  2996. break;
  2997. }
  2998. return rc;
  2999. }
  3000. /**
  3001. * lpfc_bsg_request - handle a bsg request from the FC transport
  3002. * @job: fc_bsg_job to handle
  3003. **/
  3004. int
  3005. lpfc_bsg_request(struct fc_bsg_job *job)
  3006. {
  3007. uint32_t msgcode;
  3008. int rc;
  3009. msgcode = job->request->msgcode;
  3010. switch (msgcode) {
  3011. case FC_BSG_HST_VENDOR:
  3012. rc = lpfc_bsg_hst_vendor(job);
  3013. break;
  3014. case FC_BSG_RPT_ELS:
  3015. rc = lpfc_bsg_rport_els(job);
  3016. break;
  3017. case FC_BSG_RPT_CT:
  3018. rc = lpfc_bsg_send_mgmt_cmd(job);
  3019. break;
  3020. default:
  3021. rc = -EINVAL;
  3022. job->reply->reply_payload_rcv_len = 0;
  3023. /* make error code available to userspace */
  3024. job->reply->result = rc;
  3025. break;
  3026. }
  3027. return rc;
  3028. }
  3029. /**
  3030. * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
  3031. * @job: fc_bsg_job that has timed out
  3032. *
  3033. * This function just aborts the job's IOCB. The aborted IOCB will return to
  3034. * the waiting function which will handle passing the error back to userspace
  3035. **/
  3036. int
  3037. lpfc_bsg_timeout(struct fc_bsg_job *job)
  3038. {
  3039. struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
  3040. struct lpfc_hba *phba = vport->phba;
  3041. struct lpfc_iocbq *cmdiocb;
  3042. struct lpfc_bsg_event *evt;
  3043. struct lpfc_bsg_iocb *iocb;
  3044. struct lpfc_bsg_mbox *mbox;
  3045. struct lpfc_bsg_menlo *menlo;
  3046. struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
  3047. struct bsg_job_data *dd_data;
  3048. unsigned long flags;
  3049. spin_lock_irqsave(&phba->ct_ev_lock, flags);
  3050. dd_data = (struct bsg_job_data *)job->dd_data;
  3051. /* timeout and completion crossed paths if no dd_data */
  3052. if (!dd_data) {
  3053. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3054. return 0;
  3055. }
  3056. switch (dd_data->type) {
  3057. case TYPE_IOCB:
  3058. iocb = &dd_data->context_un.iocb;
  3059. cmdiocb = iocb->cmdiocbq;
  3060. /* hint to completion handler that the job timed out */
  3061. job->reply->result = -EAGAIN;
  3062. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3063. /* this will call our completion handler */
  3064. spin_lock_irq(&phba->hbalock);
  3065. lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
  3066. spin_unlock_irq(&phba->hbalock);
  3067. break;
  3068. case TYPE_EVT:
  3069. evt = dd_data->context_un.evt;
  3070. /* this event has no job anymore */
  3071. evt->set_job = NULL;
  3072. job->dd_data = NULL;
  3073. job->reply->reply_payload_rcv_len = 0;
  3074. /* Return -EAGAIN which is our way of signallying the
  3075. * app to retry.
  3076. */
  3077. job->reply->result = -EAGAIN;
  3078. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3079. job->job_done(job);
  3080. break;
  3081. case TYPE_MBOX:
  3082. mbox = &dd_data->context_un.mbox;
  3083. /* this mbox has no job anymore */
  3084. mbox->set_job = NULL;
  3085. job->dd_data = NULL;
  3086. job->reply->reply_payload_rcv_len = 0;
  3087. job->reply->result = -EAGAIN;
  3088. /* the mbox completion handler can now be run */
  3089. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3090. job->job_done(job);
  3091. break;
  3092. case TYPE_MENLO:
  3093. menlo = &dd_data->context_un.menlo;
  3094. cmdiocb = menlo->cmdiocbq;
  3095. /* hint to completion handler that the job timed out */
  3096. job->reply->result = -EAGAIN;
  3097. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3098. /* this will call our completion handler */
  3099. spin_lock_irq(&phba->hbalock);
  3100. lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
  3101. spin_unlock_irq(&phba->hbalock);
  3102. break;
  3103. default:
  3104. spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
  3105. break;
  3106. }
  3107. /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
  3108. * otherwise an error message will be displayed on the console
  3109. * so always return success (zero)
  3110. */
  3111. return 0;
  3112. }