bfad_bsg.c 96 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272327332743275327632773278327932803281328232833284328532863287328832893290329132923293329432953296329732983299330033013302330333043305330633073308330933103311331233133314331533163317331833193320332133223323332433253326332733283329333033313332333333343335333633373338333933403341334233433344334533463347334833493350335133523353335433553356335733583359336033613362336333643365336633673368336933703371337233733374337533763377337833793380338133823383338433853386338733883389339033913392339333943395339633973398339934003401340234033404340534063407340834093410341134123413341434153416341734183419342034213422342334243425342634273428342934303431343234333434343534363437343834393440344134423443344434453446344734483449345034513452345334543455345634573458345934603461346234633464346534663467346834693470347134723473347434753476347734783479348034813482348334843485348634873488348934903491349234933494349534963497349834993500350135023503350435053506350735083509351035113512351335143515351635173518351935203521352235233524352535263527352835293530353135323533353435353536353735383539354035413542354335443545354635473548354935503551355235533554355535563557355835593560356135623563356435653566356735683569357035713572357335743575357635773578
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/uaccess.h>
  18. #include "bfad_drv.h"
  19. #include "bfad_im.h"
  20. #include "bfad_bsg.h"
  21. BFA_TRC_FILE(LDRV, BSG);
  22. int
  23. bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
  24. {
  25. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  26. int rc = 0;
  27. unsigned long flags;
  28. spin_lock_irqsave(&bfad->bfad_lock, flags);
  29. /* If IOC is not in disabled state - return */
  30. if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
  31. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  32. iocmd->status = BFA_STATUS_OK;
  33. return rc;
  34. }
  35. init_completion(&bfad->enable_comp);
  36. bfa_iocfc_enable(&bfad->bfa);
  37. iocmd->status = BFA_STATUS_OK;
  38. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  39. wait_for_completion(&bfad->enable_comp);
  40. return rc;
  41. }
  42. int
  43. bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
  44. {
  45. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  46. int rc = 0;
  47. unsigned long flags;
  48. spin_lock_irqsave(&bfad->bfad_lock, flags);
  49. if (bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
  50. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  51. iocmd->status = BFA_STATUS_OK;
  52. return rc;
  53. }
  54. if (bfad->disable_active) {
  55. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  56. return -EBUSY;
  57. }
  58. bfad->disable_active = BFA_TRUE;
  59. init_completion(&bfad->disable_comp);
  60. bfa_iocfc_disable(&bfad->bfa);
  61. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  62. wait_for_completion(&bfad->disable_comp);
  63. bfad->disable_active = BFA_FALSE;
  64. iocmd->status = BFA_STATUS_OK;
  65. return rc;
  66. }
  67. static int
  68. bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
  69. {
  70. int i;
  71. struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
  72. struct bfad_im_port_s *im_port;
  73. struct bfa_port_attr_s pattr;
  74. unsigned long flags;
  75. spin_lock_irqsave(&bfad->bfad_lock, flags);
  76. bfa_fcport_get_attr(&bfad->bfa, &pattr);
  77. iocmd->nwwn = pattr.nwwn;
  78. iocmd->pwwn = pattr.pwwn;
  79. iocmd->ioc_type = bfa_get_type(&bfad->bfa);
  80. iocmd->mac = bfa_get_mac(&bfad->bfa);
  81. iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
  82. bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
  83. iocmd->factorynwwn = pattr.factorynwwn;
  84. iocmd->factorypwwn = pattr.factorypwwn;
  85. iocmd->bfad_num = bfad->inst_no;
  86. im_port = bfad->pport.im_port;
  87. iocmd->host = im_port->shost->host_no;
  88. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  89. strcpy(iocmd->name, bfad->adapter_name);
  90. strcpy(iocmd->port_name, bfad->port_name);
  91. strcpy(iocmd->hwpath, bfad->pci_name);
  92. /* set adapter hw path */
  93. strcpy(iocmd->adapter_hwpath, bfad->pci_name);
  94. for (i = 0; iocmd->adapter_hwpath[i] != ':' && i < BFA_STRING_32; i++)
  95. ;
  96. for (; iocmd->adapter_hwpath[++i] != ':' && i < BFA_STRING_32; )
  97. ;
  98. iocmd->adapter_hwpath[i] = '\0';
  99. iocmd->status = BFA_STATUS_OK;
  100. return 0;
  101. }
  102. static int
  103. bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
  104. {
  105. struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
  106. unsigned long flags;
  107. spin_lock_irqsave(&bfad->bfad_lock, flags);
  108. bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
  109. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  110. /* fill in driver attr info */
  111. strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
  112. strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
  113. BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
  114. strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
  115. iocmd->ioc_attr.adapter_attr.fw_ver);
  116. strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
  117. iocmd->ioc_attr.adapter_attr.optrom_ver);
  118. /* copy chip rev info first otherwise it will be overwritten */
  119. memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
  120. sizeof(bfad->pci_attr.chip_rev));
  121. memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
  122. sizeof(struct bfa_ioc_pci_attr_s));
  123. iocmd->status = BFA_STATUS_OK;
  124. return 0;
  125. }
  126. int
  127. bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
  128. {
  129. struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
  130. bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
  131. iocmd->status = BFA_STATUS_OK;
  132. return 0;
  133. }
  134. int
  135. bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
  136. unsigned int payload_len)
  137. {
  138. struct bfa_bsg_ioc_fwstats_s *iocmd =
  139. (struct bfa_bsg_ioc_fwstats_s *)cmd;
  140. void *iocmd_bufptr;
  141. unsigned long flags;
  142. if (bfad_chk_iocmd_sz(payload_len,
  143. sizeof(struct bfa_bsg_ioc_fwstats_s),
  144. sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
  145. iocmd->status = BFA_STATUS_VERSION_FAIL;
  146. goto out;
  147. }
  148. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
  149. spin_lock_irqsave(&bfad->bfad_lock, flags);
  150. iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
  151. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  152. if (iocmd->status != BFA_STATUS_OK) {
  153. bfa_trc(bfad, iocmd->status);
  154. goto out;
  155. }
  156. out:
  157. bfa_trc(bfad, 0x6666);
  158. return 0;
  159. }
  160. int
  161. bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  162. {
  163. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  164. unsigned long flags;
  165. if (v_cmd == IOCMD_IOC_RESET_STATS) {
  166. bfa_ioc_clear_stats(&bfad->bfa);
  167. iocmd->status = BFA_STATUS_OK;
  168. } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
  169. spin_lock_irqsave(&bfad->bfad_lock, flags);
  170. iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
  171. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  172. }
  173. return 0;
  174. }
  175. int
  176. bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  177. {
  178. struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
  179. if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
  180. strcpy(bfad->adapter_name, iocmd->name);
  181. else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
  182. strcpy(bfad->port_name, iocmd->name);
  183. iocmd->status = BFA_STATUS_OK;
  184. return 0;
  185. }
  186. int
  187. bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
  188. {
  189. struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
  190. iocmd->status = BFA_STATUS_OK;
  191. bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
  192. return 0;
  193. }
  194. int
  195. bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
  196. {
  197. struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
  198. unsigned long flags;
  199. spin_lock_irqsave(&bfad->bfad_lock, flags);
  200. iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
  201. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  202. return 0;
  203. }
  204. int
  205. bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
  206. {
  207. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  208. struct bfad_hal_comp fcomp;
  209. unsigned long flags;
  210. init_completion(&fcomp.comp);
  211. spin_lock_irqsave(&bfad->bfad_lock, flags);
  212. iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
  213. bfad_hcb_comp, &fcomp);
  214. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  215. if (iocmd->status != BFA_STATUS_OK) {
  216. bfa_trc(bfad, iocmd->status);
  217. return 0;
  218. }
  219. wait_for_completion(&fcomp.comp);
  220. iocmd->status = fcomp.status;
  221. return 0;
  222. }
  223. int
  224. bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
  225. {
  226. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  227. struct bfad_hal_comp fcomp;
  228. unsigned long flags;
  229. init_completion(&fcomp.comp);
  230. spin_lock_irqsave(&bfad->bfad_lock, flags);
  231. iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
  232. bfad_hcb_comp, &fcomp);
  233. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  234. if (iocmd->status != BFA_STATUS_OK) {
  235. bfa_trc(bfad, iocmd->status);
  236. return 0;
  237. }
  238. wait_for_completion(&fcomp.comp);
  239. iocmd->status = fcomp.status;
  240. return 0;
  241. }
  242. static int
  243. bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
  244. {
  245. struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
  246. struct bfa_lport_attr_s port_attr;
  247. unsigned long flags;
  248. spin_lock_irqsave(&bfad->bfad_lock, flags);
  249. bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
  250. bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
  251. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  252. if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
  253. iocmd->attr.pid = port_attr.pid;
  254. else
  255. iocmd->attr.pid = 0;
  256. iocmd->attr.port_type = port_attr.port_type;
  257. iocmd->attr.loopback = port_attr.loopback;
  258. iocmd->attr.authfail = port_attr.authfail;
  259. strncpy(iocmd->attr.port_symname.symname,
  260. port_attr.port_cfg.sym_name.symname,
  261. sizeof(port_attr.port_cfg.sym_name.symname));
  262. iocmd->status = BFA_STATUS_OK;
  263. return 0;
  264. }
  265. int
  266. bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
  267. unsigned int payload_len)
  268. {
  269. struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
  270. struct bfad_hal_comp fcomp;
  271. void *iocmd_bufptr;
  272. unsigned long flags;
  273. if (bfad_chk_iocmd_sz(payload_len,
  274. sizeof(struct bfa_bsg_port_stats_s),
  275. sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
  276. iocmd->status = BFA_STATUS_VERSION_FAIL;
  277. return 0;
  278. }
  279. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
  280. init_completion(&fcomp.comp);
  281. spin_lock_irqsave(&bfad->bfad_lock, flags);
  282. iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
  283. iocmd_bufptr, bfad_hcb_comp, &fcomp);
  284. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  285. if (iocmd->status != BFA_STATUS_OK) {
  286. bfa_trc(bfad, iocmd->status);
  287. goto out;
  288. }
  289. wait_for_completion(&fcomp.comp);
  290. iocmd->status = fcomp.status;
  291. out:
  292. return 0;
  293. }
  294. int
  295. bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
  296. {
  297. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  298. struct bfad_hal_comp fcomp;
  299. unsigned long flags;
  300. init_completion(&fcomp.comp);
  301. spin_lock_irqsave(&bfad->bfad_lock, flags);
  302. iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
  303. bfad_hcb_comp, &fcomp);
  304. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  305. if (iocmd->status != BFA_STATUS_OK) {
  306. bfa_trc(bfad, iocmd->status);
  307. return 0;
  308. }
  309. wait_for_completion(&fcomp.comp);
  310. iocmd->status = fcomp.status;
  311. return 0;
  312. }
  313. int
  314. bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
  315. {
  316. struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
  317. unsigned long flags;
  318. spin_lock_irqsave(&bfad->bfad_lock, flags);
  319. if (v_cmd == IOCMD_PORT_CFG_TOPO)
  320. cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
  321. else if (v_cmd == IOCMD_PORT_CFG_SPEED)
  322. cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
  323. else if (v_cmd == IOCMD_PORT_CFG_ALPA)
  324. cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
  325. else if (v_cmd == IOCMD_PORT_CLR_ALPA)
  326. cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
  327. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  328. return 0;
  329. }
  330. int
  331. bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
  332. {
  333. struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
  334. (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
  335. unsigned long flags;
  336. spin_lock_irqsave(&bfad->bfad_lock, flags);
  337. iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
  338. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  339. return 0;
  340. }
  341. int
  342. bfad_iocmd_port_cfg_bbcr(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  343. {
  344. struct bfa_bsg_bbcr_enable_s *iocmd =
  345. (struct bfa_bsg_bbcr_enable_s *)pcmd;
  346. unsigned long flags;
  347. int rc;
  348. spin_lock_irqsave(&bfad->bfad_lock, flags);
  349. if (cmd == IOCMD_PORT_BBCR_ENABLE)
  350. rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_TRUE, iocmd->bb_scn);
  351. else if (cmd == IOCMD_PORT_BBCR_DISABLE)
  352. rc = bfa_fcport_cfg_bbcr(&bfad->bfa, BFA_FALSE, 0);
  353. else {
  354. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  355. return -EINVAL;
  356. }
  357. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  358. iocmd->status = rc;
  359. return 0;
  360. }
  361. int
  362. bfad_iocmd_port_get_bbcr_attr(struct bfad_s *bfad, void *pcmd)
  363. {
  364. struct bfa_bsg_bbcr_attr_s *iocmd = (struct bfa_bsg_bbcr_attr_s *) pcmd;
  365. unsigned long flags;
  366. spin_lock_irqsave(&bfad->bfad_lock, flags);
  367. iocmd->status =
  368. bfa_fcport_get_bbcr_attr(&bfad->bfa, &iocmd->attr);
  369. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  370. return 0;
  371. }
  372. static int
  373. bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
  374. {
  375. struct bfa_fcs_lport_s *fcs_port;
  376. struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
  377. unsigned long flags;
  378. spin_lock_irqsave(&bfad->bfad_lock, flags);
  379. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  380. iocmd->vf_id, iocmd->pwwn);
  381. if (fcs_port == NULL) {
  382. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  383. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  384. goto out;
  385. }
  386. bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
  387. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  388. iocmd->status = BFA_STATUS_OK;
  389. out:
  390. return 0;
  391. }
  392. int
  393. bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
  394. {
  395. struct bfa_fcs_lport_s *fcs_port;
  396. struct bfa_bsg_lport_stats_s *iocmd =
  397. (struct bfa_bsg_lport_stats_s *)cmd;
  398. unsigned long flags;
  399. spin_lock_irqsave(&bfad->bfad_lock, flags);
  400. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  401. iocmd->vf_id, iocmd->pwwn);
  402. if (fcs_port == NULL) {
  403. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  404. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  405. goto out;
  406. }
  407. bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
  408. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  409. iocmd->status = BFA_STATUS_OK;
  410. out:
  411. return 0;
  412. }
  413. int
  414. bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
  415. {
  416. struct bfa_fcs_lport_s *fcs_port;
  417. struct bfa_bsg_reset_stats_s *iocmd =
  418. (struct bfa_bsg_reset_stats_s *)cmd;
  419. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  420. struct list_head *qe, *qen;
  421. struct bfa_itnim_s *itnim;
  422. unsigned long flags;
  423. spin_lock_irqsave(&bfad->bfad_lock, flags);
  424. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  425. iocmd->vf_id, iocmd->vpwwn);
  426. if (fcs_port == NULL) {
  427. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  428. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  429. goto out;
  430. }
  431. bfa_fcs_lport_clear_stats(fcs_port);
  432. /* clear IO stats from all active itnims */
  433. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  434. itnim = (struct bfa_itnim_s *) qe;
  435. if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
  436. continue;
  437. bfa_itnim_clear_stats(itnim);
  438. }
  439. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  440. iocmd->status = BFA_STATUS_OK;
  441. out:
  442. return 0;
  443. }
  444. int
  445. bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
  446. {
  447. struct bfa_fcs_lport_s *fcs_port;
  448. struct bfa_bsg_lport_iostats_s *iocmd =
  449. (struct bfa_bsg_lport_iostats_s *)cmd;
  450. unsigned long flags;
  451. spin_lock_irqsave(&bfad->bfad_lock, flags);
  452. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  453. iocmd->vf_id, iocmd->pwwn);
  454. if (fcs_port == NULL) {
  455. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  456. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  457. goto out;
  458. }
  459. bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
  460. fcs_port->lp_tag);
  461. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  462. iocmd->status = BFA_STATUS_OK;
  463. out:
  464. return 0;
  465. }
  466. int
  467. bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
  468. unsigned int payload_len)
  469. {
  470. struct bfa_bsg_lport_get_rports_s *iocmd =
  471. (struct bfa_bsg_lport_get_rports_s *)cmd;
  472. struct bfa_fcs_lport_s *fcs_port;
  473. unsigned long flags;
  474. void *iocmd_bufptr;
  475. if (iocmd->nrports == 0)
  476. return -EINVAL;
  477. if (bfad_chk_iocmd_sz(payload_len,
  478. sizeof(struct bfa_bsg_lport_get_rports_s),
  479. sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
  480. != BFA_STATUS_OK) {
  481. iocmd->status = BFA_STATUS_VERSION_FAIL;
  482. return 0;
  483. }
  484. iocmd_bufptr = (char *)iocmd +
  485. sizeof(struct bfa_bsg_lport_get_rports_s);
  486. spin_lock_irqsave(&bfad->bfad_lock, flags);
  487. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  488. iocmd->vf_id, iocmd->pwwn);
  489. if (fcs_port == NULL) {
  490. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  491. bfa_trc(bfad, 0);
  492. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  493. goto out;
  494. }
  495. bfa_fcs_lport_get_rport_quals(fcs_port,
  496. (struct bfa_rport_qualifier_s *)iocmd_bufptr,
  497. &iocmd->nrports);
  498. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  499. iocmd->status = BFA_STATUS_OK;
  500. out:
  501. return 0;
  502. }
  503. int
  504. bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
  505. {
  506. struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
  507. struct bfa_fcs_lport_s *fcs_port;
  508. struct bfa_fcs_rport_s *fcs_rport;
  509. unsigned long flags;
  510. spin_lock_irqsave(&bfad->bfad_lock, flags);
  511. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  512. iocmd->vf_id, iocmd->pwwn);
  513. if (fcs_port == NULL) {
  514. bfa_trc(bfad, 0);
  515. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  516. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  517. goto out;
  518. }
  519. if (iocmd->pid)
  520. fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
  521. iocmd->rpwwn, iocmd->pid);
  522. else
  523. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  524. if (fcs_rport == NULL) {
  525. bfa_trc(bfad, 0);
  526. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  527. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  528. goto out;
  529. }
  530. bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
  531. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  532. iocmd->status = BFA_STATUS_OK;
  533. out:
  534. return 0;
  535. }
  536. static int
  537. bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
  538. {
  539. struct bfa_bsg_rport_scsi_addr_s *iocmd =
  540. (struct bfa_bsg_rport_scsi_addr_s *)cmd;
  541. struct bfa_fcs_lport_s *fcs_port;
  542. struct bfa_fcs_itnim_s *fcs_itnim;
  543. struct bfad_itnim_s *drv_itnim;
  544. unsigned long flags;
  545. spin_lock_irqsave(&bfad->bfad_lock, flags);
  546. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  547. iocmd->vf_id, iocmd->pwwn);
  548. if (fcs_port == NULL) {
  549. bfa_trc(bfad, 0);
  550. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  551. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  552. goto out;
  553. }
  554. fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  555. if (fcs_itnim == NULL) {
  556. bfa_trc(bfad, 0);
  557. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  558. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  559. goto out;
  560. }
  561. drv_itnim = fcs_itnim->itnim_drv;
  562. if (drv_itnim && drv_itnim->im_port)
  563. iocmd->host = drv_itnim->im_port->shost->host_no;
  564. else {
  565. bfa_trc(bfad, 0);
  566. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  567. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  568. goto out;
  569. }
  570. iocmd->target = drv_itnim->scsi_tgt_id;
  571. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  572. iocmd->bus = 0;
  573. iocmd->lun = 0;
  574. iocmd->status = BFA_STATUS_OK;
  575. out:
  576. return 0;
  577. }
  578. int
  579. bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
  580. {
  581. struct bfa_bsg_rport_stats_s *iocmd =
  582. (struct bfa_bsg_rport_stats_s *)cmd;
  583. struct bfa_fcs_lport_s *fcs_port;
  584. struct bfa_fcs_rport_s *fcs_rport;
  585. unsigned long flags;
  586. spin_lock_irqsave(&bfad->bfad_lock, flags);
  587. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  588. iocmd->vf_id, iocmd->pwwn);
  589. if (fcs_port == NULL) {
  590. bfa_trc(bfad, 0);
  591. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  592. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  593. goto out;
  594. }
  595. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  596. if (fcs_rport == NULL) {
  597. bfa_trc(bfad, 0);
  598. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  599. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  600. goto out;
  601. }
  602. memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
  603. sizeof(struct bfa_rport_stats_s));
  604. if (bfa_fcs_rport_get_halrport(fcs_rport)) {
  605. memcpy((void *)&iocmd->stats.hal_stats,
  606. (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
  607. sizeof(struct bfa_rport_hal_stats_s));
  608. }
  609. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  610. iocmd->status = BFA_STATUS_OK;
  611. out:
  612. return 0;
  613. }
  614. int
  615. bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
  616. {
  617. struct bfa_bsg_rport_reset_stats_s *iocmd =
  618. (struct bfa_bsg_rport_reset_stats_s *)cmd;
  619. struct bfa_fcs_lport_s *fcs_port;
  620. struct bfa_fcs_rport_s *fcs_rport;
  621. struct bfa_rport_s *rport;
  622. unsigned long flags;
  623. spin_lock_irqsave(&bfad->bfad_lock, flags);
  624. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  625. iocmd->vf_id, iocmd->pwwn);
  626. if (fcs_port == NULL) {
  627. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  628. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  629. goto out;
  630. }
  631. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  632. if (fcs_rport == NULL) {
  633. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  634. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  635. goto out;
  636. }
  637. memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
  638. rport = bfa_fcs_rport_get_halrport(fcs_rport);
  639. if (rport)
  640. memset(&rport->stats, 0, sizeof(rport->stats));
  641. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  642. iocmd->status = BFA_STATUS_OK;
  643. out:
  644. return 0;
  645. }
  646. int
  647. bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
  648. {
  649. struct bfa_bsg_rport_set_speed_s *iocmd =
  650. (struct bfa_bsg_rport_set_speed_s *)cmd;
  651. struct bfa_fcs_lport_s *fcs_port;
  652. struct bfa_fcs_rport_s *fcs_rport;
  653. unsigned long flags;
  654. spin_lock_irqsave(&bfad->bfad_lock, flags);
  655. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  656. iocmd->vf_id, iocmd->pwwn);
  657. if (fcs_port == NULL) {
  658. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  659. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  660. goto out;
  661. }
  662. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  663. if (fcs_rport == NULL) {
  664. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  665. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  666. goto out;
  667. }
  668. fcs_rport->rpf.assigned_speed = iocmd->speed;
  669. /* Set this speed in f/w only if the RPSC speed is not available */
  670. if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
  671. if (fcs_rport->bfa_rport)
  672. bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
  673. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  674. iocmd->status = BFA_STATUS_OK;
  675. out:
  676. return 0;
  677. }
  678. int
  679. bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
  680. {
  681. struct bfa_fcs_vport_s *fcs_vport;
  682. struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
  683. unsigned long flags;
  684. spin_lock_irqsave(&bfad->bfad_lock, flags);
  685. fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
  686. iocmd->vf_id, iocmd->vpwwn);
  687. if (fcs_vport == NULL) {
  688. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  689. iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
  690. goto out;
  691. }
  692. bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
  693. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  694. iocmd->status = BFA_STATUS_OK;
  695. out:
  696. return 0;
  697. }
  698. int
  699. bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
  700. {
  701. struct bfa_fcs_vport_s *fcs_vport;
  702. struct bfa_bsg_vport_stats_s *iocmd =
  703. (struct bfa_bsg_vport_stats_s *)cmd;
  704. unsigned long flags;
  705. spin_lock_irqsave(&bfad->bfad_lock, flags);
  706. fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
  707. iocmd->vf_id, iocmd->vpwwn);
  708. if (fcs_vport == NULL) {
  709. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  710. iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
  711. goto out;
  712. }
  713. memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
  714. sizeof(struct bfa_vport_stats_s));
  715. memcpy((void *)&iocmd->vport_stats.port_stats,
  716. (void *)&fcs_vport->lport.stats,
  717. sizeof(struct bfa_lport_stats_s));
  718. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  719. iocmd->status = BFA_STATUS_OK;
  720. out:
  721. return 0;
  722. }
  723. int
  724. bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
  725. {
  726. struct bfa_fcs_vport_s *fcs_vport;
  727. struct bfa_bsg_reset_stats_s *iocmd =
  728. (struct bfa_bsg_reset_stats_s *)cmd;
  729. unsigned long flags;
  730. spin_lock_irqsave(&bfad->bfad_lock, flags);
  731. fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
  732. iocmd->vf_id, iocmd->vpwwn);
  733. if (fcs_vport == NULL) {
  734. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  735. iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
  736. goto out;
  737. }
  738. memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
  739. memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
  740. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  741. iocmd->status = BFA_STATUS_OK;
  742. out:
  743. return 0;
  744. }
  745. static int
  746. bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
  747. unsigned int payload_len)
  748. {
  749. struct bfa_bsg_fabric_get_lports_s *iocmd =
  750. (struct bfa_bsg_fabric_get_lports_s *)cmd;
  751. bfa_fcs_vf_t *fcs_vf;
  752. uint32_t nports = iocmd->nports;
  753. unsigned long flags;
  754. void *iocmd_bufptr;
  755. if (nports == 0) {
  756. iocmd->status = BFA_STATUS_EINVAL;
  757. goto out;
  758. }
  759. if (bfad_chk_iocmd_sz(payload_len,
  760. sizeof(struct bfa_bsg_fabric_get_lports_s),
  761. sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
  762. iocmd->status = BFA_STATUS_VERSION_FAIL;
  763. goto out;
  764. }
  765. iocmd_bufptr = (char *)iocmd +
  766. sizeof(struct bfa_bsg_fabric_get_lports_s);
  767. spin_lock_irqsave(&bfad->bfad_lock, flags);
  768. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  769. if (fcs_vf == NULL) {
  770. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  771. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  772. goto out;
  773. }
  774. bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
  775. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  776. iocmd->nports = nports;
  777. iocmd->status = BFA_STATUS_OK;
  778. out:
  779. return 0;
  780. }
  781. int
  782. bfad_iocmd_qos_set_bw(struct bfad_s *bfad, void *pcmd)
  783. {
  784. struct bfa_bsg_qos_bw_s *iocmd = (struct bfa_bsg_qos_bw_s *)pcmd;
  785. unsigned long flags;
  786. spin_lock_irqsave(&bfad->bfad_lock, flags);
  787. iocmd->status = bfa_fcport_set_qos_bw(&bfad->bfa, &iocmd->qos_bw);
  788. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  789. return 0;
  790. }
  791. int
  792. bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  793. {
  794. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  795. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  796. unsigned long flags;
  797. spin_lock_irqsave(&bfad->bfad_lock, flags);
  798. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  799. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  800. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  801. else {
  802. if (cmd == IOCMD_RATELIM_ENABLE)
  803. fcport->cfg.ratelimit = BFA_TRUE;
  804. else if (cmd == IOCMD_RATELIM_DISABLE)
  805. fcport->cfg.ratelimit = BFA_FALSE;
  806. if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
  807. fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
  808. iocmd->status = BFA_STATUS_OK;
  809. }
  810. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  811. return 0;
  812. }
  813. int
  814. bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  815. {
  816. struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
  817. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  818. unsigned long flags;
  819. spin_lock_irqsave(&bfad->bfad_lock, flags);
  820. /* Auto and speeds greater than the supported speed, are invalid */
  821. if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
  822. (iocmd->speed > fcport->speed_sup)) {
  823. iocmd->status = BFA_STATUS_UNSUPP_SPEED;
  824. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  825. return 0;
  826. }
  827. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  828. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  829. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  830. else {
  831. fcport->cfg.trl_def_speed = iocmd->speed;
  832. iocmd->status = BFA_STATUS_OK;
  833. }
  834. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  835. return 0;
  836. }
  837. int
  838. bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
  839. {
  840. struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
  841. unsigned long flags;
  842. spin_lock_irqsave(&bfad->bfad_lock, flags);
  843. bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
  844. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  845. iocmd->status = BFA_STATUS_OK;
  846. return 0;
  847. }
  848. int
  849. bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
  850. {
  851. struct bfa_bsg_fcpim_modstats_s *iocmd =
  852. (struct bfa_bsg_fcpim_modstats_s *)cmd;
  853. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  854. struct list_head *qe, *qen;
  855. struct bfa_itnim_s *itnim;
  856. unsigned long flags;
  857. spin_lock_irqsave(&bfad->bfad_lock, flags);
  858. /* accumulate IO stats from itnim */
  859. memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
  860. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  861. itnim = (struct bfa_itnim_s *) qe;
  862. bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
  863. }
  864. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  865. iocmd->status = BFA_STATUS_OK;
  866. return 0;
  867. }
  868. int
  869. bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
  870. {
  871. struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
  872. (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
  873. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  874. struct list_head *qe, *qen;
  875. struct bfa_itnim_s *itnim;
  876. unsigned long flags;
  877. spin_lock_irqsave(&bfad->bfad_lock, flags);
  878. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  879. itnim = (struct bfa_itnim_s *) qe;
  880. bfa_itnim_clear_stats(itnim);
  881. }
  882. memset(&fcpim->del_itn_stats, 0,
  883. sizeof(struct bfa_fcpim_del_itn_stats_s));
  884. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  885. iocmd->status = BFA_STATUS_OK;
  886. return 0;
  887. }
  888. int
  889. bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
  890. {
  891. struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
  892. (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
  893. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  894. unsigned long flags;
  895. spin_lock_irqsave(&bfad->bfad_lock, flags);
  896. memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
  897. sizeof(struct bfa_fcpim_del_itn_stats_s));
  898. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  899. iocmd->status = BFA_STATUS_OK;
  900. return 0;
  901. }
  902. static int
  903. bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
  904. {
  905. struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
  906. struct bfa_fcs_lport_s *fcs_port;
  907. unsigned long flags;
  908. spin_lock_irqsave(&bfad->bfad_lock, flags);
  909. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  910. iocmd->vf_id, iocmd->lpwwn);
  911. if (!fcs_port)
  912. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  913. else
  914. iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
  915. iocmd->rpwwn, &iocmd->attr);
  916. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  917. return 0;
  918. }
  919. static int
  920. bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
  921. {
  922. struct bfa_bsg_itnim_iostats_s *iocmd =
  923. (struct bfa_bsg_itnim_iostats_s *)cmd;
  924. struct bfa_fcs_lport_s *fcs_port;
  925. struct bfa_fcs_itnim_s *itnim;
  926. unsigned long flags;
  927. spin_lock_irqsave(&bfad->bfad_lock, flags);
  928. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  929. iocmd->vf_id, iocmd->lpwwn);
  930. if (!fcs_port) {
  931. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  932. bfa_trc(bfad, 0);
  933. } else {
  934. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  935. if (itnim == NULL)
  936. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  937. else {
  938. iocmd->status = BFA_STATUS_OK;
  939. if (bfa_fcs_itnim_get_halitn(itnim))
  940. memcpy((void *)&iocmd->iostats, (void *)
  941. &(bfa_fcs_itnim_get_halitn(itnim)->stats),
  942. sizeof(struct bfa_itnim_iostats_s));
  943. }
  944. }
  945. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  946. return 0;
  947. }
  948. static int
  949. bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
  950. {
  951. struct bfa_bsg_rport_reset_stats_s *iocmd =
  952. (struct bfa_bsg_rport_reset_stats_s *)cmd;
  953. struct bfa_fcs_lport_s *fcs_port;
  954. struct bfa_fcs_itnim_s *itnim;
  955. unsigned long flags;
  956. spin_lock_irqsave(&bfad->bfad_lock, flags);
  957. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  958. iocmd->vf_id, iocmd->pwwn);
  959. if (!fcs_port)
  960. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  961. else {
  962. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  963. if (itnim == NULL)
  964. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  965. else {
  966. iocmd->status = BFA_STATUS_OK;
  967. bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
  968. bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
  969. }
  970. }
  971. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  972. return 0;
  973. }
  974. static int
  975. bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
  976. {
  977. struct bfa_bsg_itnim_itnstats_s *iocmd =
  978. (struct bfa_bsg_itnim_itnstats_s *)cmd;
  979. struct bfa_fcs_lport_s *fcs_port;
  980. struct bfa_fcs_itnim_s *itnim;
  981. unsigned long flags;
  982. spin_lock_irqsave(&bfad->bfad_lock, flags);
  983. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  984. iocmd->vf_id, iocmd->lpwwn);
  985. if (!fcs_port) {
  986. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  987. bfa_trc(bfad, 0);
  988. } else {
  989. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  990. if (itnim == NULL)
  991. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  992. else {
  993. iocmd->status = BFA_STATUS_OK;
  994. bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
  995. &iocmd->itnstats);
  996. }
  997. }
  998. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  999. return 0;
  1000. }
  1001. int
  1002. bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
  1003. {
  1004. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1005. unsigned long flags;
  1006. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1007. iocmd->status = bfa_fcport_enable(&bfad->bfa);
  1008. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1009. return 0;
  1010. }
  1011. int
  1012. bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
  1013. {
  1014. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1015. unsigned long flags;
  1016. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1017. iocmd->status = bfa_fcport_disable(&bfad->bfa);
  1018. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1019. return 0;
  1020. }
  1021. int
  1022. bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
  1023. {
  1024. struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
  1025. struct bfad_hal_comp fcomp;
  1026. unsigned long flags;
  1027. init_completion(&fcomp.comp);
  1028. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1029. iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
  1030. &iocmd->pcifn_cfg,
  1031. bfad_hcb_comp, &fcomp);
  1032. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1033. if (iocmd->status != BFA_STATUS_OK)
  1034. goto out;
  1035. wait_for_completion(&fcomp.comp);
  1036. iocmd->status = fcomp.status;
  1037. out:
  1038. return 0;
  1039. }
  1040. int
  1041. bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
  1042. {
  1043. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  1044. struct bfad_hal_comp fcomp;
  1045. unsigned long flags;
  1046. init_completion(&fcomp.comp);
  1047. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1048. iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
  1049. &iocmd->pcifn_id, iocmd->port,
  1050. iocmd->pcifn_class, iocmd->bw_min,
  1051. iocmd->bw_max, bfad_hcb_comp, &fcomp);
  1052. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1053. if (iocmd->status != BFA_STATUS_OK)
  1054. goto out;
  1055. wait_for_completion(&fcomp.comp);
  1056. iocmd->status = fcomp.status;
  1057. out:
  1058. return 0;
  1059. }
  1060. int
  1061. bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
  1062. {
  1063. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  1064. struct bfad_hal_comp fcomp;
  1065. unsigned long flags;
  1066. init_completion(&fcomp.comp);
  1067. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1068. iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
  1069. iocmd->pcifn_id,
  1070. bfad_hcb_comp, &fcomp);
  1071. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1072. if (iocmd->status != BFA_STATUS_OK)
  1073. goto out;
  1074. wait_for_completion(&fcomp.comp);
  1075. iocmd->status = fcomp.status;
  1076. out:
  1077. return 0;
  1078. }
  1079. int
  1080. bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
  1081. {
  1082. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  1083. struct bfad_hal_comp fcomp;
  1084. unsigned long flags;
  1085. init_completion(&fcomp.comp);
  1086. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1087. iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
  1088. iocmd->pcifn_id, iocmd->bw_min,
  1089. iocmd->bw_max, bfad_hcb_comp, &fcomp);
  1090. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1091. bfa_trc(bfad, iocmd->status);
  1092. if (iocmd->status != BFA_STATUS_OK)
  1093. goto out;
  1094. wait_for_completion(&fcomp.comp);
  1095. iocmd->status = fcomp.status;
  1096. bfa_trc(bfad, iocmd->status);
  1097. out:
  1098. return 0;
  1099. }
  1100. int
  1101. bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
  1102. {
  1103. struct bfa_bsg_adapter_cfg_mode_s *iocmd =
  1104. (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
  1105. struct bfad_hal_comp fcomp;
  1106. unsigned long flags = 0;
  1107. init_completion(&fcomp.comp);
  1108. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1109. iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
  1110. iocmd->cfg.mode, iocmd->cfg.max_pf,
  1111. iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
  1112. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1113. if (iocmd->status != BFA_STATUS_OK)
  1114. goto out;
  1115. wait_for_completion(&fcomp.comp);
  1116. iocmd->status = fcomp.status;
  1117. out:
  1118. return 0;
  1119. }
  1120. int
  1121. bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
  1122. {
  1123. struct bfa_bsg_port_cfg_mode_s *iocmd =
  1124. (struct bfa_bsg_port_cfg_mode_s *)cmd;
  1125. struct bfad_hal_comp fcomp;
  1126. unsigned long flags = 0;
  1127. init_completion(&fcomp.comp);
  1128. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1129. iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
  1130. iocmd->instance, iocmd->cfg.mode,
  1131. iocmd->cfg.max_pf, iocmd->cfg.max_vf,
  1132. bfad_hcb_comp, &fcomp);
  1133. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1134. if (iocmd->status != BFA_STATUS_OK)
  1135. goto out;
  1136. wait_for_completion(&fcomp.comp);
  1137. iocmd->status = fcomp.status;
  1138. out:
  1139. return 0;
  1140. }
  1141. int
  1142. bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  1143. {
  1144. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  1145. struct bfad_hal_comp fcomp;
  1146. unsigned long flags;
  1147. init_completion(&fcomp.comp);
  1148. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1149. if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
  1150. iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
  1151. bfad_hcb_comp, &fcomp);
  1152. else
  1153. iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
  1154. bfad_hcb_comp, &fcomp);
  1155. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1156. if (iocmd->status != BFA_STATUS_OK)
  1157. goto out;
  1158. wait_for_completion(&fcomp.comp);
  1159. iocmd->status = fcomp.status;
  1160. out:
  1161. return 0;
  1162. }
  1163. int
  1164. bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
  1165. {
  1166. struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
  1167. struct bfad_hal_comp fcomp;
  1168. unsigned long flags;
  1169. init_completion(&fcomp.comp);
  1170. iocmd->status = BFA_STATUS_OK;
  1171. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1172. iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
  1173. bfad_hcb_comp, &fcomp);
  1174. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1175. if (iocmd->status != BFA_STATUS_OK)
  1176. goto out;
  1177. wait_for_completion(&fcomp.comp);
  1178. iocmd->status = fcomp.status;
  1179. out:
  1180. return 0;
  1181. }
  1182. int
  1183. bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1184. {
  1185. struct bfa_bsg_cee_attr_s *iocmd =
  1186. (struct bfa_bsg_cee_attr_s *)cmd;
  1187. void *iocmd_bufptr;
  1188. struct bfad_hal_comp cee_comp;
  1189. unsigned long flags;
  1190. if (bfad_chk_iocmd_sz(payload_len,
  1191. sizeof(struct bfa_bsg_cee_attr_s),
  1192. sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
  1193. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1194. return 0;
  1195. }
  1196. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
  1197. cee_comp.status = 0;
  1198. init_completion(&cee_comp.comp);
  1199. mutex_lock(&bfad_mutex);
  1200. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1201. iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
  1202. bfad_hcb_comp, &cee_comp);
  1203. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1204. if (iocmd->status != BFA_STATUS_OK) {
  1205. mutex_unlock(&bfad_mutex);
  1206. bfa_trc(bfad, 0x5555);
  1207. goto out;
  1208. }
  1209. wait_for_completion(&cee_comp.comp);
  1210. mutex_unlock(&bfad_mutex);
  1211. out:
  1212. return 0;
  1213. }
  1214. int
  1215. bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
  1216. unsigned int payload_len)
  1217. {
  1218. struct bfa_bsg_cee_stats_s *iocmd =
  1219. (struct bfa_bsg_cee_stats_s *)cmd;
  1220. void *iocmd_bufptr;
  1221. struct bfad_hal_comp cee_comp;
  1222. unsigned long flags;
  1223. if (bfad_chk_iocmd_sz(payload_len,
  1224. sizeof(struct bfa_bsg_cee_stats_s),
  1225. sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
  1226. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1227. return 0;
  1228. }
  1229. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
  1230. cee_comp.status = 0;
  1231. init_completion(&cee_comp.comp);
  1232. mutex_lock(&bfad_mutex);
  1233. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1234. iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
  1235. bfad_hcb_comp, &cee_comp);
  1236. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1237. if (iocmd->status != BFA_STATUS_OK) {
  1238. mutex_unlock(&bfad_mutex);
  1239. bfa_trc(bfad, 0x5555);
  1240. goto out;
  1241. }
  1242. wait_for_completion(&cee_comp.comp);
  1243. mutex_unlock(&bfad_mutex);
  1244. out:
  1245. return 0;
  1246. }
  1247. int
  1248. bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
  1249. {
  1250. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1251. unsigned long flags;
  1252. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1253. iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
  1254. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1255. if (iocmd->status != BFA_STATUS_OK)
  1256. bfa_trc(bfad, 0x5555);
  1257. return 0;
  1258. }
  1259. int
  1260. bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
  1261. {
  1262. struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
  1263. struct bfad_hal_comp fcomp;
  1264. unsigned long flags;
  1265. init_completion(&fcomp.comp);
  1266. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1267. iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
  1268. bfad_hcb_comp, &fcomp);
  1269. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1270. bfa_trc(bfad, iocmd->status);
  1271. if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
  1272. goto out;
  1273. wait_for_completion(&fcomp.comp);
  1274. iocmd->status = fcomp.status;
  1275. out:
  1276. return 0;
  1277. }
  1278. int
  1279. bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
  1280. {
  1281. struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
  1282. struct bfad_hal_comp fcomp;
  1283. unsigned long flags;
  1284. init_completion(&fcomp.comp);
  1285. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1286. iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
  1287. bfad_hcb_comp, &fcomp);
  1288. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1289. bfa_trc(bfad, iocmd->status);
  1290. if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
  1291. goto out;
  1292. wait_for_completion(&fcomp.comp);
  1293. iocmd->status = fcomp.status;
  1294. out:
  1295. return 0;
  1296. }
  1297. int
  1298. bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
  1299. {
  1300. struct bfa_bsg_flash_attr_s *iocmd =
  1301. (struct bfa_bsg_flash_attr_s *)cmd;
  1302. struct bfad_hal_comp fcomp;
  1303. unsigned long flags;
  1304. init_completion(&fcomp.comp);
  1305. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1306. iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
  1307. bfad_hcb_comp, &fcomp);
  1308. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1309. if (iocmd->status != BFA_STATUS_OK)
  1310. goto out;
  1311. wait_for_completion(&fcomp.comp);
  1312. iocmd->status = fcomp.status;
  1313. out:
  1314. return 0;
  1315. }
  1316. int
  1317. bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
  1318. {
  1319. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1320. struct bfad_hal_comp fcomp;
  1321. unsigned long flags;
  1322. init_completion(&fcomp.comp);
  1323. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1324. iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
  1325. iocmd->instance, bfad_hcb_comp, &fcomp);
  1326. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1327. if (iocmd->status != BFA_STATUS_OK)
  1328. goto out;
  1329. wait_for_completion(&fcomp.comp);
  1330. iocmd->status = fcomp.status;
  1331. out:
  1332. return 0;
  1333. }
  1334. int
  1335. bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
  1336. unsigned int payload_len)
  1337. {
  1338. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1339. void *iocmd_bufptr;
  1340. struct bfad_hal_comp fcomp;
  1341. unsigned long flags;
  1342. if (bfad_chk_iocmd_sz(payload_len,
  1343. sizeof(struct bfa_bsg_flash_s),
  1344. iocmd->bufsz) != BFA_STATUS_OK) {
  1345. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1346. return 0;
  1347. }
  1348. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
  1349. init_completion(&fcomp.comp);
  1350. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1351. iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
  1352. iocmd->type, iocmd->instance, iocmd_bufptr,
  1353. iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
  1354. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1355. if (iocmd->status != BFA_STATUS_OK)
  1356. goto out;
  1357. wait_for_completion(&fcomp.comp);
  1358. iocmd->status = fcomp.status;
  1359. out:
  1360. return 0;
  1361. }
  1362. int
  1363. bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
  1364. unsigned int payload_len)
  1365. {
  1366. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1367. struct bfad_hal_comp fcomp;
  1368. void *iocmd_bufptr;
  1369. unsigned long flags;
  1370. if (bfad_chk_iocmd_sz(payload_len,
  1371. sizeof(struct bfa_bsg_flash_s),
  1372. iocmd->bufsz) != BFA_STATUS_OK) {
  1373. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1374. return 0;
  1375. }
  1376. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
  1377. init_completion(&fcomp.comp);
  1378. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1379. iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
  1380. iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
  1381. bfad_hcb_comp, &fcomp);
  1382. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1383. if (iocmd->status != BFA_STATUS_OK)
  1384. goto out;
  1385. wait_for_completion(&fcomp.comp);
  1386. iocmd->status = fcomp.status;
  1387. out:
  1388. return 0;
  1389. }
  1390. int
  1391. bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
  1392. {
  1393. struct bfa_bsg_diag_get_temp_s *iocmd =
  1394. (struct bfa_bsg_diag_get_temp_s *)cmd;
  1395. struct bfad_hal_comp fcomp;
  1396. unsigned long flags;
  1397. init_completion(&fcomp.comp);
  1398. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1399. iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
  1400. &iocmd->result, bfad_hcb_comp, &fcomp);
  1401. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1402. bfa_trc(bfad, iocmd->status);
  1403. if (iocmd->status != BFA_STATUS_OK)
  1404. goto out;
  1405. wait_for_completion(&fcomp.comp);
  1406. iocmd->status = fcomp.status;
  1407. out:
  1408. return 0;
  1409. }
  1410. int
  1411. bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
  1412. {
  1413. struct bfa_bsg_diag_memtest_s *iocmd =
  1414. (struct bfa_bsg_diag_memtest_s *)cmd;
  1415. struct bfad_hal_comp fcomp;
  1416. unsigned long flags;
  1417. init_completion(&fcomp.comp);
  1418. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1419. iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
  1420. &iocmd->memtest, iocmd->pat,
  1421. &iocmd->result, bfad_hcb_comp, &fcomp);
  1422. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1423. bfa_trc(bfad, iocmd->status);
  1424. if (iocmd->status != BFA_STATUS_OK)
  1425. goto out;
  1426. wait_for_completion(&fcomp.comp);
  1427. iocmd->status = fcomp.status;
  1428. out:
  1429. return 0;
  1430. }
  1431. int
  1432. bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
  1433. {
  1434. struct bfa_bsg_diag_loopback_s *iocmd =
  1435. (struct bfa_bsg_diag_loopback_s *)cmd;
  1436. struct bfad_hal_comp fcomp;
  1437. unsigned long flags;
  1438. init_completion(&fcomp.comp);
  1439. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1440. iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
  1441. iocmd->speed, iocmd->lpcnt, iocmd->pat,
  1442. &iocmd->result, bfad_hcb_comp, &fcomp);
  1443. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1444. bfa_trc(bfad, iocmd->status);
  1445. if (iocmd->status != BFA_STATUS_OK)
  1446. goto out;
  1447. wait_for_completion(&fcomp.comp);
  1448. iocmd->status = fcomp.status;
  1449. out:
  1450. return 0;
  1451. }
  1452. int
  1453. bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
  1454. {
  1455. struct bfa_bsg_diag_fwping_s *iocmd =
  1456. (struct bfa_bsg_diag_fwping_s *)cmd;
  1457. struct bfad_hal_comp fcomp;
  1458. unsigned long flags;
  1459. init_completion(&fcomp.comp);
  1460. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1461. iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
  1462. iocmd->pattern, &iocmd->result,
  1463. bfad_hcb_comp, &fcomp);
  1464. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1465. bfa_trc(bfad, iocmd->status);
  1466. if (iocmd->status != BFA_STATUS_OK)
  1467. goto out;
  1468. bfa_trc(bfad, 0x77771);
  1469. wait_for_completion(&fcomp.comp);
  1470. iocmd->status = fcomp.status;
  1471. out:
  1472. return 0;
  1473. }
  1474. int
  1475. bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
  1476. {
  1477. struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
  1478. struct bfad_hal_comp fcomp;
  1479. unsigned long flags;
  1480. init_completion(&fcomp.comp);
  1481. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1482. iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
  1483. iocmd->queue, &iocmd->result,
  1484. bfad_hcb_comp, &fcomp);
  1485. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1486. if (iocmd->status != BFA_STATUS_OK)
  1487. goto out;
  1488. wait_for_completion(&fcomp.comp);
  1489. iocmd->status = fcomp.status;
  1490. out:
  1491. return 0;
  1492. }
  1493. int
  1494. bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
  1495. {
  1496. struct bfa_bsg_sfp_show_s *iocmd =
  1497. (struct bfa_bsg_sfp_show_s *)cmd;
  1498. struct bfad_hal_comp fcomp;
  1499. unsigned long flags;
  1500. init_completion(&fcomp.comp);
  1501. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1502. iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
  1503. bfad_hcb_comp, &fcomp);
  1504. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1505. bfa_trc(bfad, iocmd->status);
  1506. if (iocmd->status != BFA_STATUS_OK)
  1507. goto out;
  1508. wait_for_completion(&fcomp.comp);
  1509. iocmd->status = fcomp.status;
  1510. bfa_trc(bfad, iocmd->status);
  1511. out:
  1512. return 0;
  1513. }
  1514. int
  1515. bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
  1516. {
  1517. struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
  1518. unsigned long flags;
  1519. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1520. iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
  1521. &iocmd->ledtest);
  1522. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1523. return 0;
  1524. }
  1525. int
  1526. bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
  1527. {
  1528. struct bfa_bsg_diag_beacon_s *iocmd =
  1529. (struct bfa_bsg_diag_beacon_s *)cmd;
  1530. unsigned long flags;
  1531. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1532. iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
  1533. iocmd->beacon, iocmd->link_e2e_beacon,
  1534. iocmd->second);
  1535. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1536. return 0;
  1537. }
  1538. int
  1539. bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
  1540. {
  1541. struct bfa_bsg_diag_lb_stat_s *iocmd =
  1542. (struct bfa_bsg_diag_lb_stat_s *)cmd;
  1543. unsigned long flags;
  1544. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1545. iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
  1546. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1547. bfa_trc(bfad, iocmd->status);
  1548. return 0;
  1549. }
  1550. int
  1551. bfad_iocmd_diag_dport_enable(struct bfad_s *bfad, void *pcmd)
  1552. {
  1553. struct bfa_bsg_dport_enable_s *iocmd =
  1554. (struct bfa_bsg_dport_enable_s *)pcmd;
  1555. unsigned long flags;
  1556. struct bfad_hal_comp fcomp;
  1557. init_completion(&fcomp.comp);
  1558. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1559. iocmd->status = bfa_dport_enable(&bfad->bfa, iocmd->lpcnt,
  1560. iocmd->pat, bfad_hcb_comp, &fcomp);
  1561. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1562. if (iocmd->status != BFA_STATUS_OK)
  1563. bfa_trc(bfad, iocmd->status);
  1564. else {
  1565. wait_for_completion(&fcomp.comp);
  1566. iocmd->status = fcomp.status;
  1567. }
  1568. return 0;
  1569. }
  1570. int
  1571. bfad_iocmd_diag_dport_disable(struct bfad_s *bfad, void *pcmd)
  1572. {
  1573. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  1574. unsigned long flags;
  1575. struct bfad_hal_comp fcomp;
  1576. init_completion(&fcomp.comp);
  1577. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1578. iocmd->status = bfa_dport_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
  1579. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1580. if (iocmd->status != BFA_STATUS_OK)
  1581. bfa_trc(bfad, iocmd->status);
  1582. else {
  1583. wait_for_completion(&fcomp.comp);
  1584. iocmd->status = fcomp.status;
  1585. }
  1586. return 0;
  1587. }
  1588. int
  1589. bfad_iocmd_diag_dport_start(struct bfad_s *bfad, void *pcmd)
  1590. {
  1591. struct bfa_bsg_dport_enable_s *iocmd =
  1592. (struct bfa_bsg_dport_enable_s *)pcmd;
  1593. unsigned long flags;
  1594. struct bfad_hal_comp fcomp;
  1595. init_completion(&fcomp.comp);
  1596. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1597. iocmd->status = bfa_dport_start(&bfad->bfa, iocmd->lpcnt,
  1598. iocmd->pat, bfad_hcb_comp,
  1599. &fcomp);
  1600. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1601. if (iocmd->status != BFA_STATUS_OK) {
  1602. bfa_trc(bfad, iocmd->status);
  1603. } else {
  1604. wait_for_completion(&fcomp.comp);
  1605. iocmd->status = fcomp.status;
  1606. }
  1607. return 0;
  1608. }
  1609. int
  1610. bfad_iocmd_diag_dport_show(struct bfad_s *bfad, void *pcmd)
  1611. {
  1612. struct bfa_bsg_diag_dport_show_s *iocmd =
  1613. (struct bfa_bsg_diag_dport_show_s *)pcmd;
  1614. unsigned long flags;
  1615. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1616. iocmd->status = bfa_dport_show(&bfad->bfa, &iocmd->result);
  1617. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1618. return 0;
  1619. }
  1620. int
  1621. bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
  1622. {
  1623. struct bfa_bsg_phy_attr_s *iocmd =
  1624. (struct bfa_bsg_phy_attr_s *)cmd;
  1625. struct bfad_hal_comp fcomp;
  1626. unsigned long flags;
  1627. init_completion(&fcomp.comp);
  1628. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1629. iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
  1630. &iocmd->attr, bfad_hcb_comp, &fcomp);
  1631. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1632. if (iocmd->status != BFA_STATUS_OK)
  1633. goto out;
  1634. wait_for_completion(&fcomp.comp);
  1635. iocmd->status = fcomp.status;
  1636. out:
  1637. return 0;
  1638. }
  1639. int
  1640. bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
  1641. {
  1642. struct bfa_bsg_phy_stats_s *iocmd =
  1643. (struct bfa_bsg_phy_stats_s *)cmd;
  1644. struct bfad_hal_comp fcomp;
  1645. unsigned long flags;
  1646. init_completion(&fcomp.comp);
  1647. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1648. iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
  1649. &iocmd->stats, bfad_hcb_comp, &fcomp);
  1650. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1651. if (iocmd->status != BFA_STATUS_OK)
  1652. goto out;
  1653. wait_for_completion(&fcomp.comp);
  1654. iocmd->status = fcomp.status;
  1655. out:
  1656. return 0;
  1657. }
  1658. int
  1659. bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1660. {
  1661. struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
  1662. struct bfad_hal_comp fcomp;
  1663. void *iocmd_bufptr;
  1664. unsigned long flags;
  1665. if (bfad_chk_iocmd_sz(payload_len,
  1666. sizeof(struct bfa_bsg_phy_s),
  1667. iocmd->bufsz) != BFA_STATUS_OK) {
  1668. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1669. return 0;
  1670. }
  1671. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
  1672. init_completion(&fcomp.comp);
  1673. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1674. iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
  1675. iocmd->instance, iocmd_bufptr, iocmd->bufsz,
  1676. 0, bfad_hcb_comp, &fcomp);
  1677. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1678. if (iocmd->status != BFA_STATUS_OK)
  1679. goto out;
  1680. wait_for_completion(&fcomp.comp);
  1681. iocmd->status = fcomp.status;
  1682. if (iocmd->status != BFA_STATUS_OK)
  1683. goto out;
  1684. out:
  1685. return 0;
  1686. }
  1687. int
  1688. bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
  1689. {
  1690. struct bfa_bsg_vhba_attr_s *iocmd =
  1691. (struct bfa_bsg_vhba_attr_s *)cmd;
  1692. struct bfa_vhba_attr_s *attr = &iocmd->attr;
  1693. unsigned long flags;
  1694. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1695. attr->pwwn = bfad->bfa.ioc.attr->pwwn;
  1696. attr->nwwn = bfad->bfa.ioc.attr->nwwn;
  1697. attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
  1698. attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
  1699. attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
  1700. iocmd->status = BFA_STATUS_OK;
  1701. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1702. return 0;
  1703. }
  1704. int
  1705. bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1706. {
  1707. struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
  1708. void *iocmd_bufptr;
  1709. struct bfad_hal_comp fcomp;
  1710. unsigned long flags;
  1711. if (bfad_chk_iocmd_sz(payload_len,
  1712. sizeof(struct bfa_bsg_phy_s),
  1713. iocmd->bufsz) != BFA_STATUS_OK) {
  1714. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1715. return 0;
  1716. }
  1717. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
  1718. init_completion(&fcomp.comp);
  1719. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1720. iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
  1721. iocmd->instance, iocmd_bufptr, iocmd->bufsz,
  1722. 0, bfad_hcb_comp, &fcomp);
  1723. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1724. if (iocmd->status != BFA_STATUS_OK)
  1725. goto out;
  1726. wait_for_completion(&fcomp.comp);
  1727. iocmd->status = fcomp.status;
  1728. out:
  1729. return 0;
  1730. }
  1731. int
  1732. bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
  1733. {
  1734. struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
  1735. void *iocmd_bufptr;
  1736. if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
  1737. bfa_trc(bfad, sizeof(struct bfa_plog_s));
  1738. iocmd->status = BFA_STATUS_EINVAL;
  1739. goto out;
  1740. }
  1741. iocmd->status = BFA_STATUS_OK;
  1742. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
  1743. memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
  1744. out:
  1745. return 0;
  1746. }
  1747. #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
  1748. int
  1749. bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
  1750. unsigned int payload_len)
  1751. {
  1752. struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
  1753. void *iocmd_bufptr;
  1754. unsigned long flags;
  1755. u32 offset;
  1756. if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
  1757. BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
  1758. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1759. return 0;
  1760. }
  1761. if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
  1762. !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
  1763. !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
  1764. bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
  1765. iocmd->status = BFA_STATUS_EINVAL;
  1766. goto out;
  1767. }
  1768. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
  1769. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1770. offset = iocmd->offset;
  1771. iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
  1772. &offset, &iocmd->bufsz);
  1773. iocmd->offset = offset;
  1774. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1775. out:
  1776. return 0;
  1777. }
  1778. int
  1779. bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  1780. {
  1781. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1782. unsigned long flags;
  1783. if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
  1784. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1785. bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
  1786. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1787. } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
  1788. bfad->plog_buf.head = bfad->plog_buf.tail = 0;
  1789. else if (v_cmd == IOCMD_DEBUG_START_DTRC)
  1790. bfa_trc_init(bfad->trcmod);
  1791. else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
  1792. bfa_trc_stop(bfad->trcmod);
  1793. iocmd->status = BFA_STATUS_OK;
  1794. return 0;
  1795. }
  1796. int
  1797. bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
  1798. {
  1799. struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
  1800. if (iocmd->ctl == BFA_TRUE)
  1801. bfad->plog_buf.plog_enabled = 1;
  1802. else
  1803. bfad->plog_buf.plog_enabled = 0;
  1804. iocmd->status = BFA_STATUS_OK;
  1805. return 0;
  1806. }
  1807. int
  1808. bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  1809. {
  1810. struct bfa_bsg_fcpim_profile_s *iocmd =
  1811. (struct bfa_bsg_fcpim_profile_s *)cmd;
  1812. struct timeval tv;
  1813. unsigned long flags;
  1814. do_gettimeofday(&tv);
  1815. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1816. if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
  1817. iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
  1818. else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
  1819. iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
  1820. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1821. return 0;
  1822. }
  1823. static int
  1824. bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
  1825. {
  1826. struct bfa_bsg_itnim_ioprofile_s *iocmd =
  1827. (struct bfa_bsg_itnim_ioprofile_s *)cmd;
  1828. struct bfa_fcs_lport_s *fcs_port;
  1829. struct bfa_fcs_itnim_s *itnim;
  1830. unsigned long flags;
  1831. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1832. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  1833. iocmd->vf_id, iocmd->lpwwn);
  1834. if (!fcs_port)
  1835. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  1836. else {
  1837. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  1838. if (itnim == NULL)
  1839. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  1840. else
  1841. iocmd->status = bfa_itnim_get_ioprofile(
  1842. bfa_fcs_itnim_get_halitn(itnim),
  1843. &iocmd->ioprofile);
  1844. }
  1845. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1846. return 0;
  1847. }
  1848. int
  1849. bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
  1850. {
  1851. struct bfa_bsg_fcport_stats_s *iocmd =
  1852. (struct bfa_bsg_fcport_stats_s *)cmd;
  1853. struct bfad_hal_comp fcomp;
  1854. unsigned long flags;
  1855. struct bfa_cb_pending_q_s cb_qe;
  1856. init_completion(&fcomp.comp);
  1857. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
  1858. &fcomp, &iocmd->stats);
  1859. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1860. iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
  1861. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1862. if (iocmd->status != BFA_STATUS_OK) {
  1863. bfa_trc(bfad, iocmd->status);
  1864. goto out;
  1865. }
  1866. wait_for_completion(&fcomp.comp);
  1867. iocmd->status = fcomp.status;
  1868. out:
  1869. return 0;
  1870. }
  1871. int
  1872. bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
  1873. {
  1874. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1875. struct bfad_hal_comp fcomp;
  1876. unsigned long flags;
  1877. struct bfa_cb_pending_q_s cb_qe;
  1878. init_completion(&fcomp.comp);
  1879. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
  1880. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1881. iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
  1882. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1883. if (iocmd->status != BFA_STATUS_OK) {
  1884. bfa_trc(bfad, iocmd->status);
  1885. goto out;
  1886. }
  1887. wait_for_completion(&fcomp.comp);
  1888. iocmd->status = fcomp.status;
  1889. out:
  1890. return 0;
  1891. }
  1892. int
  1893. bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
  1894. {
  1895. struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
  1896. struct bfad_hal_comp fcomp;
  1897. unsigned long flags;
  1898. init_completion(&fcomp.comp);
  1899. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1900. iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
  1901. BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
  1902. &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
  1903. bfad_hcb_comp, &fcomp);
  1904. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1905. if (iocmd->status != BFA_STATUS_OK)
  1906. goto out;
  1907. wait_for_completion(&fcomp.comp);
  1908. iocmd->status = fcomp.status;
  1909. out:
  1910. return 0;
  1911. }
  1912. int
  1913. bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
  1914. {
  1915. struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
  1916. struct bfad_hal_comp fcomp;
  1917. unsigned long flags;
  1918. init_completion(&fcomp.comp);
  1919. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1920. iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
  1921. BFA_FLASH_PART_BOOT, bfad->bfa.ioc.port_id,
  1922. &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
  1923. bfad_hcb_comp, &fcomp);
  1924. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1925. if (iocmd->status != BFA_STATUS_OK)
  1926. goto out;
  1927. wait_for_completion(&fcomp.comp);
  1928. iocmd->status = fcomp.status;
  1929. out:
  1930. return 0;
  1931. }
  1932. int
  1933. bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
  1934. {
  1935. struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
  1936. struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
  1937. struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
  1938. unsigned long flags;
  1939. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1940. pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
  1941. pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
  1942. pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
  1943. memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
  1944. iocmd->status = BFA_STATUS_OK;
  1945. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1946. return 0;
  1947. }
  1948. int
  1949. bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
  1950. {
  1951. struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
  1952. struct bfad_hal_comp fcomp;
  1953. unsigned long flags;
  1954. init_completion(&fcomp.comp);
  1955. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1956. iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
  1957. BFA_FLASH_PART_PXECFG,
  1958. bfad->bfa.ioc.port_id, &iocmd->cfg,
  1959. sizeof(struct bfa_ethboot_cfg_s), 0,
  1960. bfad_hcb_comp, &fcomp);
  1961. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1962. if (iocmd->status != BFA_STATUS_OK)
  1963. goto out;
  1964. wait_for_completion(&fcomp.comp);
  1965. iocmd->status = fcomp.status;
  1966. out:
  1967. return 0;
  1968. }
  1969. int
  1970. bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
  1971. {
  1972. struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
  1973. struct bfad_hal_comp fcomp;
  1974. unsigned long flags;
  1975. init_completion(&fcomp.comp);
  1976. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1977. iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
  1978. BFA_FLASH_PART_PXECFG,
  1979. bfad->bfa.ioc.port_id, &iocmd->cfg,
  1980. sizeof(struct bfa_ethboot_cfg_s), 0,
  1981. bfad_hcb_comp, &fcomp);
  1982. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1983. if (iocmd->status != BFA_STATUS_OK)
  1984. goto out;
  1985. wait_for_completion(&fcomp.comp);
  1986. iocmd->status = fcomp.status;
  1987. out:
  1988. return 0;
  1989. }
  1990. int
  1991. bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  1992. {
  1993. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1994. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  1995. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  1996. unsigned long flags;
  1997. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1998. if (bfa_fcport_is_dport(&bfad->bfa))
  1999. return BFA_STATUS_DPORT_ERR;
  2000. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
  2001. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2002. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2003. else {
  2004. if (v_cmd == IOCMD_TRUNK_ENABLE) {
  2005. trunk->attr.state = BFA_TRUNK_OFFLINE;
  2006. bfa_fcport_disable(&bfad->bfa);
  2007. fcport->cfg.trunked = BFA_TRUE;
  2008. } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
  2009. trunk->attr.state = BFA_TRUNK_DISABLED;
  2010. bfa_fcport_disable(&bfad->bfa);
  2011. fcport->cfg.trunked = BFA_FALSE;
  2012. }
  2013. if (!bfa_fcport_is_disabled(&bfad->bfa))
  2014. bfa_fcport_enable(&bfad->bfa);
  2015. iocmd->status = BFA_STATUS_OK;
  2016. }
  2017. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2018. return 0;
  2019. }
  2020. int
  2021. bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
  2022. {
  2023. struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
  2024. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2025. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  2026. unsigned long flags;
  2027. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2028. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
  2029. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2030. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2031. else {
  2032. memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
  2033. sizeof(struct bfa_trunk_attr_s));
  2034. iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
  2035. iocmd->status = BFA_STATUS_OK;
  2036. }
  2037. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2038. return 0;
  2039. }
  2040. int
  2041. bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  2042. {
  2043. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  2044. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2045. unsigned long flags;
  2046. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2047. if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
  2048. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  2049. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2050. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2051. else {
  2052. if (v_cmd == IOCMD_QOS_ENABLE)
  2053. fcport->cfg.qos_enabled = BFA_TRUE;
  2054. else if (v_cmd == IOCMD_QOS_DISABLE) {
  2055. fcport->cfg.qos_enabled = BFA_FALSE;
  2056. fcport->cfg.qos_bw.high = BFA_QOS_BW_HIGH;
  2057. fcport->cfg.qos_bw.med = BFA_QOS_BW_MED;
  2058. fcport->cfg.qos_bw.low = BFA_QOS_BW_LOW;
  2059. }
  2060. }
  2061. }
  2062. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2063. return 0;
  2064. }
  2065. int
  2066. bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
  2067. {
  2068. struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
  2069. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2070. unsigned long flags;
  2071. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2072. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  2073. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2074. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2075. else {
  2076. iocmd->attr.state = fcport->qos_attr.state;
  2077. iocmd->attr.total_bb_cr =
  2078. be32_to_cpu(fcport->qos_attr.total_bb_cr);
  2079. iocmd->attr.qos_bw.high = fcport->cfg.qos_bw.high;
  2080. iocmd->attr.qos_bw.med = fcport->cfg.qos_bw.med;
  2081. iocmd->attr.qos_bw.low = fcport->cfg.qos_bw.low;
  2082. iocmd->attr.qos_bw_op = fcport->qos_attr.qos_bw_op;
  2083. iocmd->status = BFA_STATUS_OK;
  2084. }
  2085. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2086. return 0;
  2087. }
  2088. int
  2089. bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
  2090. {
  2091. struct bfa_bsg_qos_vc_attr_s *iocmd =
  2092. (struct bfa_bsg_qos_vc_attr_s *)cmd;
  2093. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2094. struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
  2095. unsigned long flags;
  2096. u32 i = 0;
  2097. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2098. iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
  2099. iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
  2100. iocmd->attr.elp_opmode_flags =
  2101. be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
  2102. /* Individual VC info */
  2103. while (i < iocmd->attr.total_vc_count) {
  2104. iocmd->attr.vc_info[i].vc_credit =
  2105. bfa_vc_attr->vc_info[i].vc_credit;
  2106. iocmd->attr.vc_info[i].borrow_credit =
  2107. bfa_vc_attr->vc_info[i].borrow_credit;
  2108. iocmd->attr.vc_info[i].priority =
  2109. bfa_vc_attr->vc_info[i].priority;
  2110. i++;
  2111. }
  2112. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2113. iocmd->status = BFA_STATUS_OK;
  2114. return 0;
  2115. }
  2116. int
  2117. bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
  2118. {
  2119. struct bfa_bsg_fcport_stats_s *iocmd =
  2120. (struct bfa_bsg_fcport_stats_s *)cmd;
  2121. struct bfad_hal_comp fcomp;
  2122. unsigned long flags;
  2123. struct bfa_cb_pending_q_s cb_qe;
  2124. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2125. init_completion(&fcomp.comp);
  2126. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
  2127. &fcomp, &iocmd->stats);
  2128. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2129. WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
  2130. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  2131. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2132. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2133. else
  2134. iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
  2135. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2136. if (iocmd->status != BFA_STATUS_OK) {
  2137. bfa_trc(bfad, iocmd->status);
  2138. goto out;
  2139. }
  2140. wait_for_completion(&fcomp.comp);
  2141. iocmd->status = fcomp.status;
  2142. out:
  2143. return 0;
  2144. }
  2145. int
  2146. bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
  2147. {
  2148. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  2149. struct bfad_hal_comp fcomp;
  2150. unsigned long flags;
  2151. struct bfa_cb_pending_q_s cb_qe;
  2152. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2153. init_completion(&fcomp.comp);
  2154. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
  2155. &fcomp, NULL);
  2156. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2157. WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
  2158. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  2159. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2160. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2161. else
  2162. iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
  2163. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2164. if (iocmd->status != BFA_STATUS_OK) {
  2165. bfa_trc(bfad, iocmd->status);
  2166. goto out;
  2167. }
  2168. wait_for_completion(&fcomp.comp);
  2169. iocmd->status = fcomp.status;
  2170. out:
  2171. return 0;
  2172. }
  2173. int
  2174. bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
  2175. {
  2176. struct bfa_bsg_vf_stats_s *iocmd =
  2177. (struct bfa_bsg_vf_stats_s *)cmd;
  2178. struct bfa_fcs_fabric_s *fcs_vf;
  2179. unsigned long flags;
  2180. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2181. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  2182. if (fcs_vf == NULL) {
  2183. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2184. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  2185. goto out;
  2186. }
  2187. memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
  2188. sizeof(struct bfa_vf_stats_s));
  2189. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2190. iocmd->status = BFA_STATUS_OK;
  2191. out:
  2192. return 0;
  2193. }
  2194. int
  2195. bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
  2196. {
  2197. struct bfa_bsg_vf_reset_stats_s *iocmd =
  2198. (struct bfa_bsg_vf_reset_stats_s *)cmd;
  2199. struct bfa_fcs_fabric_s *fcs_vf;
  2200. unsigned long flags;
  2201. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2202. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  2203. if (fcs_vf == NULL) {
  2204. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2205. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  2206. goto out;
  2207. }
  2208. memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
  2209. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2210. iocmd->status = BFA_STATUS_OK;
  2211. out:
  2212. return 0;
  2213. }
  2214. /* Function to reset the LUN SCAN mode */
  2215. static void
  2216. bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
  2217. {
  2218. struct bfad_im_port_s *pport_im = bfad->pport.im_port;
  2219. struct bfad_vport_s *vport = NULL;
  2220. /* Set the scsi device LUN SCAN flags for base port */
  2221. bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
  2222. /* Set the scsi device LUN SCAN flags for the vports */
  2223. list_for_each_entry(vport, &bfad->vport_list, list_entry)
  2224. bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
  2225. }
  2226. int
  2227. bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
  2228. {
  2229. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  2230. unsigned long flags;
  2231. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2232. if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
  2233. iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
  2234. /* Set the LUN Scanning mode to be Sequential scan */
  2235. if (iocmd->status == BFA_STATUS_OK)
  2236. bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
  2237. } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
  2238. iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
  2239. /* Set the LUN Scanning mode to default REPORT_LUNS scan */
  2240. if (iocmd->status == BFA_STATUS_OK)
  2241. bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
  2242. } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
  2243. iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
  2244. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2245. return 0;
  2246. }
  2247. int
  2248. bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
  2249. {
  2250. struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
  2251. (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
  2252. struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
  2253. unsigned long flags;
  2254. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2255. iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
  2256. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2257. return 0;
  2258. }
  2259. int
  2260. bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  2261. {
  2262. struct bfa_bsg_fcpim_lunmask_s *iocmd =
  2263. (struct bfa_bsg_fcpim_lunmask_s *)cmd;
  2264. unsigned long flags;
  2265. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2266. if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
  2267. iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
  2268. &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
  2269. else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
  2270. iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
  2271. iocmd->vf_id, &iocmd->pwwn,
  2272. iocmd->rpwwn, iocmd->lun);
  2273. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2274. return 0;
  2275. }
  2276. int
  2277. bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd)
  2278. {
  2279. struct bfa_bsg_fcpim_throttle_s *iocmd =
  2280. (struct bfa_bsg_fcpim_throttle_s *)cmd;
  2281. unsigned long flags;
  2282. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2283. iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa,
  2284. (void *)&iocmd->throttle);
  2285. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2286. return 0;
  2287. }
  2288. int
  2289. bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd)
  2290. {
  2291. struct bfa_bsg_fcpim_throttle_s *iocmd =
  2292. (struct bfa_bsg_fcpim_throttle_s *)cmd;
  2293. unsigned long flags;
  2294. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2295. iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa,
  2296. iocmd->throttle.cfg_value);
  2297. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2298. return 0;
  2299. }
  2300. int
  2301. bfad_iocmd_tfru_read(struct bfad_s *bfad, void *cmd)
  2302. {
  2303. struct bfa_bsg_tfru_s *iocmd =
  2304. (struct bfa_bsg_tfru_s *)cmd;
  2305. struct bfad_hal_comp fcomp;
  2306. unsigned long flags = 0;
  2307. init_completion(&fcomp.comp);
  2308. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2309. iocmd->status = bfa_tfru_read(BFA_FRU(&bfad->bfa),
  2310. &iocmd->data, iocmd->len, iocmd->offset,
  2311. bfad_hcb_comp, &fcomp);
  2312. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2313. if (iocmd->status == BFA_STATUS_OK) {
  2314. wait_for_completion(&fcomp.comp);
  2315. iocmd->status = fcomp.status;
  2316. }
  2317. return 0;
  2318. }
  2319. int
  2320. bfad_iocmd_tfru_write(struct bfad_s *bfad, void *cmd)
  2321. {
  2322. struct bfa_bsg_tfru_s *iocmd =
  2323. (struct bfa_bsg_tfru_s *)cmd;
  2324. struct bfad_hal_comp fcomp;
  2325. unsigned long flags = 0;
  2326. init_completion(&fcomp.comp);
  2327. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2328. iocmd->status = bfa_tfru_write(BFA_FRU(&bfad->bfa),
  2329. &iocmd->data, iocmd->len, iocmd->offset,
  2330. bfad_hcb_comp, &fcomp);
  2331. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2332. if (iocmd->status == BFA_STATUS_OK) {
  2333. wait_for_completion(&fcomp.comp);
  2334. iocmd->status = fcomp.status;
  2335. }
  2336. return 0;
  2337. }
  2338. int
  2339. bfad_iocmd_fruvpd_read(struct bfad_s *bfad, void *cmd)
  2340. {
  2341. struct bfa_bsg_fruvpd_s *iocmd =
  2342. (struct bfa_bsg_fruvpd_s *)cmd;
  2343. struct bfad_hal_comp fcomp;
  2344. unsigned long flags = 0;
  2345. init_completion(&fcomp.comp);
  2346. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2347. iocmd->status = bfa_fruvpd_read(BFA_FRU(&bfad->bfa),
  2348. &iocmd->data, iocmd->len, iocmd->offset,
  2349. bfad_hcb_comp, &fcomp);
  2350. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2351. if (iocmd->status == BFA_STATUS_OK) {
  2352. wait_for_completion(&fcomp.comp);
  2353. iocmd->status = fcomp.status;
  2354. }
  2355. return 0;
  2356. }
  2357. int
  2358. bfad_iocmd_fruvpd_update(struct bfad_s *bfad, void *cmd)
  2359. {
  2360. struct bfa_bsg_fruvpd_s *iocmd =
  2361. (struct bfa_bsg_fruvpd_s *)cmd;
  2362. struct bfad_hal_comp fcomp;
  2363. unsigned long flags = 0;
  2364. init_completion(&fcomp.comp);
  2365. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2366. iocmd->status = bfa_fruvpd_update(BFA_FRU(&bfad->bfa),
  2367. &iocmd->data, iocmd->len, iocmd->offset,
  2368. bfad_hcb_comp, &fcomp, iocmd->trfr_cmpl);
  2369. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2370. if (iocmd->status == BFA_STATUS_OK) {
  2371. wait_for_completion(&fcomp.comp);
  2372. iocmd->status = fcomp.status;
  2373. }
  2374. return 0;
  2375. }
  2376. int
  2377. bfad_iocmd_fruvpd_get_max_size(struct bfad_s *bfad, void *cmd)
  2378. {
  2379. struct bfa_bsg_fruvpd_max_size_s *iocmd =
  2380. (struct bfa_bsg_fruvpd_max_size_s *)cmd;
  2381. unsigned long flags = 0;
  2382. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2383. iocmd->status = bfa_fruvpd_get_max_size(BFA_FRU(&bfad->bfa),
  2384. &iocmd->max_size);
  2385. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2386. return 0;
  2387. }
  2388. static int
  2389. bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
  2390. unsigned int payload_len)
  2391. {
  2392. int rc = -EINVAL;
  2393. switch (cmd) {
  2394. case IOCMD_IOC_ENABLE:
  2395. rc = bfad_iocmd_ioc_enable(bfad, iocmd);
  2396. break;
  2397. case IOCMD_IOC_DISABLE:
  2398. rc = bfad_iocmd_ioc_disable(bfad, iocmd);
  2399. break;
  2400. case IOCMD_IOC_GET_INFO:
  2401. rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
  2402. break;
  2403. case IOCMD_IOC_GET_ATTR:
  2404. rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
  2405. break;
  2406. case IOCMD_IOC_GET_STATS:
  2407. rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
  2408. break;
  2409. case IOCMD_IOC_GET_FWSTATS:
  2410. rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
  2411. break;
  2412. case IOCMD_IOC_RESET_STATS:
  2413. case IOCMD_IOC_RESET_FWSTATS:
  2414. rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
  2415. break;
  2416. case IOCMD_IOC_SET_ADAPTER_NAME:
  2417. case IOCMD_IOC_SET_PORT_NAME:
  2418. rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
  2419. break;
  2420. case IOCMD_IOCFC_GET_ATTR:
  2421. rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
  2422. break;
  2423. case IOCMD_IOCFC_SET_INTR:
  2424. rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
  2425. break;
  2426. case IOCMD_PORT_ENABLE:
  2427. rc = bfad_iocmd_port_enable(bfad, iocmd);
  2428. break;
  2429. case IOCMD_PORT_DISABLE:
  2430. rc = bfad_iocmd_port_disable(bfad, iocmd);
  2431. break;
  2432. case IOCMD_PORT_GET_ATTR:
  2433. rc = bfad_iocmd_port_get_attr(bfad, iocmd);
  2434. break;
  2435. case IOCMD_PORT_GET_STATS:
  2436. rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
  2437. break;
  2438. case IOCMD_PORT_RESET_STATS:
  2439. rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
  2440. break;
  2441. case IOCMD_PORT_CFG_TOPO:
  2442. case IOCMD_PORT_CFG_SPEED:
  2443. case IOCMD_PORT_CFG_ALPA:
  2444. case IOCMD_PORT_CLR_ALPA:
  2445. rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
  2446. break;
  2447. case IOCMD_PORT_CFG_MAXFRSZ:
  2448. rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
  2449. break;
  2450. case IOCMD_PORT_BBCR_ENABLE:
  2451. case IOCMD_PORT_BBCR_DISABLE:
  2452. rc = bfad_iocmd_port_cfg_bbcr(bfad, cmd, iocmd);
  2453. break;
  2454. case IOCMD_PORT_BBCR_GET_ATTR:
  2455. rc = bfad_iocmd_port_get_bbcr_attr(bfad, iocmd);
  2456. break;
  2457. case IOCMD_LPORT_GET_ATTR:
  2458. rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
  2459. break;
  2460. case IOCMD_LPORT_GET_STATS:
  2461. rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
  2462. break;
  2463. case IOCMD_LPORT_RESET_STATS:
  2464. rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
  2465. break;
  2466. case IOCMD_LPORT_GET_IOSTATS:
  2467. rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
  2468. break;
  2469. case IOCMD_LPORT_GET_RPORTS:
  2470. rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
  2471. break;
  2472. case IOCMD_RPORT_GET_ATTR:
  2473. rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
  2474. break;
  2475. case IOCMD_RPORT_GET_ADDR:
  2476. rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
  2477. break;
  2478. case IOCMD_RPORT_GET_STATS:
  2479. rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
  2480. break;
  2481. case IOCMD_RPORT_RESET_STATS:
  2482. rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
  2483. break;
  2484. case IOCMD_RPORT_SET_SPEED:
  2485. rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
  2486. break;
  2487. case IOCMD_VPORT_GET_ATTR:
  2488. rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
  2489. break;
  2490. case IOCMD_VPORT_GET_STATS:
  2491. rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
  2492. break;
  2493. case IOCMD_VPORT_RESET_STATS:
  2494. rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
  2495. break;
  2496. case IOCMD_FABRIC_GET_LPORTS:
  2497. rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
  2498. break;
  2499. case IOCMD_RATELIM_ENABLE:
  2500. case IOCMD_RATELIM_DISABLE:
  2501. rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
  2502. break;
  2503. case IOCMD_RATELIM_DEF_SPEED:
  2504. rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
  2505. break;
  2506. case IOCMD_FCPIM_FAILOVER:
  2507. rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
  2508. break;
  2509. case IOCMD_FCPIM_MODSTATS:
  2510. rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
  2511. break;
  2512. case IOCMD_FCPIM_MODSTATSCLR:
  2513. rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
  2514. break;
  2515. case IOCMD_FCPIM_DEL_ITN_STATS:
  2516. rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
  2517. break;
  2518. case IOCMD_ITNIM_GET_ATTR:
  2519. rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
  2520. break;
  2521. case IOCMD_ITNIM_GET_IOSTATS:
  2522. rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
  2523. break;
  2524. case IOCMD_ITNIM_RESET_STATS:
  2525. rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
  2526. break;
  2527. case IOCMD_ITNIM_GET_ITNSTATS:
  2528. rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
  2529. break;
  2530. case IOCMD_FCPORT_ENABLE:
  2531. rc = bfad_iocmd_fcport_enable(bfad, iocmd);
  2532. break;
  2533. case IOCMD_FCPORT_DISABLE:
  2534. rc = bfad_iocmd_fcport_disable(bfad, iocmd);
  2535. break;
  2536. case IOCMD_IOC_PCIFN_CFG:
  2537. rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
  2538. break;
  2539. case IOCMD_PCIFN_CREATE:
  2540. rc = bfad_iocmd_pcifn_create(bfad, iocmd);
  2541. break;
  2542. case IOCMD_PCIFN_DELETE:
  2543. rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
  2544. break;
  2545. case IOCMD_PCIFN_BW:
  2546. rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
  2547. break;
  2548. case IOCMD_ADAPTER_CFG_MODE:
  2549. rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
  2550. break;
  2551. case IOCMD_PORT_CFG_MODE:
  2552. rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
  2553. break;
  2554. case IOCMD_FLASH_ENABLE_OPTROM:
  2555. case IOCMD_FLASH_DISABLE_OPTROM:
  2556. rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
  2557. break;
  2558. case IOCMD_FAA_QUERY:
  2559. rc = bfad_iocmd_faa_query(bfad, iocmd);
  2560. break;
  2561. case IOCMD_CEE_GET_ATTR:
  2562. rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
  2563. break;
  2564. case IOCMD_CEE_GET_STATS:
  2565. rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
  2566. break;
  2567. case IOCMD_CEE_RESET_STATS:
  2568. rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
  2569. break;
  2570. case IOCMD_SFP_MEDIA:
  2571. rc = bfad_iocmd_sfp_media(bfad, iocmd);
  2572. break;
  2573. case IOCMD_SFP_SPEED:
  2574. rc = bfad_iocmd_sfp_speed(bfad, iocmd);
  2575. break;
  2576. case IOCMD_FLASH_GET_ATTR:
  2577. rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
  2578. break;
  2579. case IOCMD_FLASH_ERASE_PART:
  2580. rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
  2581. break;
  2582. case IOCMD_FLASH_UPDATE_PART:
  2583. rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
  2584. break;
  2585. case IOCMD_FLASH_READ_PART:
  2586. rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
  2587. break;
  2588. case IOCMD_DIAG_TEMP:
  2589. rc = bfad_iocmd_diag_temp(bfad, iocmd);
  2590. break;
  2591. case IOCMD_DIAG_MEMTEST:
  2592. rc = bfad_iocmd_diag_memtest(bfad, iocmd);
  2593. break;
  2594. case IOCMD_DIAG_LOOPBACK:
  2595. rc = bfad_iocmd_diag_loopback(bfad, iocmd);
  2596. break;
  2597. case IOCMD_DIAG_FWPING:
  2598. rc = bfad_iocmd_diag_fwping(bfad, iocmd);
  2599. break;
  2600. case IOCMD_DIAG_QUEUETEST:
  2601. rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
  2602. break;
  2603. case IOCMD_DIAG_SFP:
  2604. rc = bfad_iocmd_diag_sfp(bfad, iocmd);
  2605. break;
  2606. case IOCMD_DIAG_LED:
  2607. rc = bfad_iocmd_diag_led(bfad, iocmd);
  2608. break;
  2609. case IOCMD_DIAG_BEACON_LPORT:
  2610. rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
  2611. break;
  2612. case IOCMD_DIAG_LB_STAT:
  2613. rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
  2614. break;
  2615. case IOCMD_DIAG_DPORT_ENABLE:
  2616. rc = bfad_iocmd_diag_dport_enable(bfad, iocmd);
  2617. break;
  2618. case IOCMD_DIAG_DPORT_DISABLE:
  2619. rc = bfad_iocmd_diag_dport_disable(bfad, iocmd);
  2620. break;
  2621. case IOCMD_DIAG_DPORT_SHOW:
  2622. rc = bfad_iocmd_diag_dport_show(bfad, iocmd);
  2623. break;
  2624. case IOCMD_DIAG_DPORT_START:
  2625. rc = bfad_iocmd_diag_dport_start(bfad, iocmd);
  2626. break;
  2627. case IOCMD_PHY_GET_ATTR:
  2628. rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
  2629. break;
  2630. case IOCMD_PHY_GET_STATS:
  2631. rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
  2632. break;
  2633. case IOCMD_PHY_UPDATE_FW:
  2634. rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
  2635. break;
  2636. case IOCMD_PHY_READ_FW:
  2637. rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
  2638. break;
  2639. case IOCMD_VHBA_QUERY:
  2640. rc = bfad_iocmd_vhba_query(bfad, iocmd);
  2641. break;
  2642. case IOCMD_DEBUG_PORTLOG:
  2643. rc = bfad_iocmd_porglog_get(bfad, iocmd);
  2644. break;
  2645. case IOCMD_DEBUG_FW_CORE:
  2646. rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
  2647. break;
  2648. case IOCMD_DEBUG_FW_STATE_CLR:
  2649. case IOCMD_DEBUG_PORTLOG_CLR:
  2650. case IOCMD_DEBUG_START_DTRC:
  2651. case IOCMD_DEBUG_STOP_DTRC:
  2652. rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
  2653. break;
  2654. case IOCMD_DEBUG_PORTLOG_CTL:
  2655. rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
  2656. break;
  2657. case IOCMD_FCPIM_PROFILE_ON:
  2658. case IOCMD_FCPIM_PROFILE_OFF:
  2659. rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
  2660. break;
  2661. case IOCMD_ITNIM_GET_IOPROFILE:
  2662. rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
  2663. break;
  2664. case IOCMD_FCPORT_GET_STATS:
  2665. rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
  2666. break;
  2667. case IOCMD_FCPORT_RESET_STATS:
  2668. rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
  2669. break;
  2670. case IOCMD_BOOT_CFG:
  2671. rc = bfad_iocmd_boot_cfg(bfad, iocmd);
  2672. break;
  2673. case IOCMD_BOOT_QUERY:
  2674. rc = bfad_iocmd_boot_query(bfad, iocmd);
  2675. break;
  2676. case IOCMD_PREBOOT_QUERY:
  2677. rc = bfad_iocmd_preboot_query(bfad, iocmd);
  2678. break;
  2679. case IOCMD_ETHBOOT_CFG:
  2680. rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
  2681. break;
  2682. case IOCMD_ETHBOOT_QUERY:
  2683. rc = bfad_iocmd_ethboot_query(bfad, iocmd);
  2684. break;
  2685. case IOCMD_TRUNK_ENABLE:
  2686. case IOCMD_TRUNK_DISABLE:
  2687. rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
  2688. break;
  2689. case IOCMD_TRUNK_GET_ATTR:
  2690. rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
  2691. break;
  2692. case IOCMD_QOS_ENABLE:
  2693. case IOCMD_QOS_DISABLE:
  2694. rc = bfad_iocmd_qos(bfad, iocmd, cmd);
  2695. break;
  2696. case IOCMD_QOS_GET_ATTR:
  2697. rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
  2698. break;
  2699. case IOCMD_QOS_GET_VC_ATTR:
  2700. rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
  2701. break;
  2702. case IOCMD_QOS_GET_STATS:
  2703. rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
  2704. break;
  2705. case IOCMD_QOS_RESET_STATS:
  2706. rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
  2707. break;
  2708. case IOCMD_QOS_SET_BW:
  2709. rc = bfad_iocmd_qos_set_bw(bfad, iocmd);
  2710. break;
  2711. case IOCMD_VF_GET_STATS:
  2712. rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
  2713. break;
  2714. case IOCMD_VF_RESET_STATS:
  2715. rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
  2716. break;
  2717. case IOCMD_FCPIM_LUNMASK_ENABLE:
  2718. case IOCMD_FCPIM_LUNMASK_DISABLE:
  2719. case IOCMD_FCPIM_LUNMASK_CLEAR:
  2720. rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
  2721. break;
  2722. case IOCMD_FCPIM_LUNMASK_QUERY:
  2723. rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
  2724. break;
  2725. case IOCMD_FCPIM_LUNMASK_ADD:
  2726. case IOCMD_FCPIM_LUNMASK_DELETE:
  2727. rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
  2728. break;
  2729. case IOCMD_FCPIM_THROTTLE_QUERY:
  2730. rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd);
  2731. break;
  2732. case IOCMD_FCPIM_THROTTLE_SET:
  2733. rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd);
  2734. break;
  2735. /* TFRU */
  2736. case IOCMD_TFRU_READ:
  2737. rc = bfad_iocmd_tfru_read(bfad, iocmd);
  2738. break;
  2739. case IOCMD_TFRU_WRITE:
  2740. rc = bfad_iocmd_tfru_write(bfad, iocmd);
  2741. break;
  2742. /* FRU */
  2743. case IOCMD_FRUVPD_READ:
  2744. rc = bfad_iocmd_fruvpd_read(bfad, iocmd);
  2745. break;
  2746. case IOCMD_FRUVPD_UPDATE:
  2747. rc = bfad_iocmd_fruvpd_update(bfad, iocmd);
  2748. break;
  2749. case IOCMD_FRUVPD_GET_MAX_SIZE:
  2750. rc = bfad_iocmd_fruvpd_get_max_size(bfad, iocmd);
  2751. break;
  2752. default:
  2753. rc = -EINVAL;
  2754. break;
  2755. }
  2756. return rc;
  2757. }
  2758. static int
  2759. bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
  2760. {
  2761. uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
  2762. struct bfad_im_port_s *im_port =
  2763. (struct bfad_im_port_s *) job->shost->hostdata[0];
  2764. struct bfad_s *bfad = im_port->bfad;
  2765. struct request_queue *request_q = job->req->q;
  2766. void *payload_kbuf;
  2767. int rc = -EINVAL;
  2768. /*
  2769. * Set the BSG device request_queue size to 256 to support
  2770. * payloads larger than 512*1024K bytes.
  2771. */
  2772. blk_queue_max_segments(request_q, 256);
  2773. /* Allocate a temp buffer to hold the passed in user space command */
  2774. payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  2775. if (!payload_kbuf) {
  2776. rc = -ENOMEM;
  2777. goto out;
  2778. }
  2779. /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
  2780. sg_copy_to_buffer(job->request_payload.sg_list,
  2781. job->request_payload.sg_cnt, payload_kbuf,
  2782. job->request_payload.payload_len);
  2783. /* Invoke IOCMD handler - to handle all the vendor command requests */
  2784. rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
  2785. job->request_payload.payload_len);
  2786. if (rc != BFA_STATUS_OK)
  2787. goto error;
  2788. /* Copy the response data to the job->reply_payload sg_list */
  2789. sg_copy_from_buffer(job->reply_payload.sg_list,
  2790. job->reply_payload.sg_cnt,
  2791. payload_kbuf,
  2792. job->reply_payload.payload_len);
  2793. /* free the command buffer */
  2794. kfree(payload_kbuf);
  2795. /* Fill the BSG job reply data */
  2796. job->reply_len = job->reply_payload.payload_len;
  2797. job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
  2798. job->reply->result = rc;
  2799. job->job_done(job);
  2800. return rc;
  2801. error:
  2802. /* free the command buffer */
  2803. kfree(payload_kbuf);
  2804. out:
  2805. job->reply->result = rc;
  2806. job->reply_len = sizeof(uint32_t);
  2807. job->reply->reply_payload_rcv_len = 0;
  2808. return rc;
  2809. }
  2810. /* FC passthru call backs */
  2811. u64
  2812. bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
  2813. {
  2814. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2815. struct bfa_sge_s *sge;
  2816. u64 addr;
  2817. sge = drv_fcxp->req_sge + sgeid;
  2818. addr = (u64)(size_t) sge->sg_addr;
  2819. return addr;
  2820. }
  2821. u32
  2822. bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
  2823. {
  2824. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2825. struct bfa_sge_s *sge;
  2826. sge = drv_fcxp->req_sge + sgeid;
  2827. return sge->sg_len;
  2828. }
  2829. u64
  2830. bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
  2831. {
  2832. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2833. struct bfa_sge_s *sge;
  2834. u64 addr;
  2835. sge = drv_fcxp->rsp_sge + sgeid;
  2836. addr = (u64)(size_t) sge->sg_addr;
  2837. return addr;
  2838. }
  2839. u32
  2840. bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
  2841. {
  2842. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2843. struct bfa_sge_s *sge;
  2844. sge = drv_fcxp->rsp_sge + sgeid;
  2845. return sge->sg_len;
  2846. }
  2847. void
  2848. bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
  2849. bfa_status_t req_status, u32 rsp_len, u32 resid_len,
  2850. struct fchs_s *rsp_fchs)
  2851. {
  2852. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2853. drv_fcxp->req_status = req_status;
  2854. drv_fcxp->rsp_len = rsp_len;
  2855. /* bfa_fcxp will be automatically freed by BFA */
  2856. drv_fcxp->bfa_fcxp = NULL;
  2857. complete(&drv_fcxp->comp);
  2858. }
  2859. struct bfad_buf_info *
  2860. bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
  2861. uint32_t payload_len, uint32_t *num_sgles)
  2862. {
  2863. struct bfad_buf_info *buf_base, *buf_info;
  2864. struct bfa_sge_s *sg_table;
  2865. int sge_num = 1;
  2866. buf_base = kzalloc((sizeof(struct bfad_buf_info) +
  2867. sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
  2868. if (!buf_base)
  2869. return NULL;
  2870. sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
  2871. (sizeof(struct bfad_buf_info) * sge_num));
  2872. /* Allocate dma coherent memory */
  2873. buf_info = buf_base;
  2874. buf_info->size = payload_len;
  2875. buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
  2876. &buf_info->phys, GFP_KERNEL);
  2877. if (!buf_info->virt)
  2878. goto out_free_mem;
  2879. /* copy the linear bsg buffer to buf_info */
  2880. memset(buf_info->virt, 0, buf_info->size);
  2881. memcpy(buf_info->virt, payload_kbuf, buf_info->size);
  2882. /*
  2883. * Setup SG table
  2884. */
  2885. sg_table->sg_len = buf_info->size;
  2886. sg_table->sg_addr = (void *)(size_t) buf_info->phys;
  2887. *num_sgles = sge_num;
  2888. return buf_base;
  2889. out_free_mem:
  2890. kfree(buf_base);
  2891. return NULL;
  2892. }
  2893. void
  2894. bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
  2895. uint32_t num_sgles)
  2896. {
  2897. int i;
  2898. struct bfad_buf_info *buf_info = buf_base;
  2899. if (buf_base) {
  2900. for (i = 0; i < num_sgles; buf_info++, i++) {
  2901. if (buf_info->virt != NULL)
  2902. dma_free_coherent(&bfad->pcidev->dev,
  2903. buf_info->size, buf_info->virt,
  2904. buf_info->phys);
  2905. }
  2906. kfree(buf_base);
  2907. }
  2908. }
  2909. int
  2910. bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
  2911. bfa_bsg_fcpt_t *bsg_fcpt)
  2912. {
  2913. struct bfa_fcxp_s *hal_fcxp;
  2914. struct bfad_s *bfad = drv_fcxp->port->bfad;
  2915. unsigned long flags;
  2916. uint8_t lp_tag;
  2917. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2918. /* Allocate bfa_fcxp structure */
  2919. hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
  2920. drv_fcxp->num_req_sgles,
  2921. drv_fcxp->num_rsp_sgles,
  2922. bfad_fcxp_get_req_sgaddr_cb,
  2923. bfad_fcxp_get_req_sglen_cb,
  2924. bfad_fcxp_get_rsp_sgaddr_cb,
  2925. bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
  2926. if (!hal_fcxp) {
  2927. bfa_trc(bfad, 0);
  2928. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2929. return BFA_STATUS_ENOMEM;
  2930. }
  2931. drv_fcxp->bfa_fcxp = hal_fcxp;
  2932. lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
  2933. bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
  2934. bsg_fcpt->cts, bsg_fcpt->cos,
  2935. job->request_payload.payload_len,
  2936. &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
  2937. job->reply_payload.payload_len, bsg_fcpt->tsecs);
  2938. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2939. return BFA_STATUS_OK;
  2940. }
  2941. int
  2942. bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
  2943. {
  2944. struct bfa_bsg_data *bsg_data;
  2945. struct bfad_im_port_s *im_port =
  2946. (struct bfad_im_port_s *) job->shost->hostdata[0];
  2947. struct bfad_s *bfad = im_port->bfad;
  2948. bfa_bsg_fcpt_t *bsg_fcpt;
  2949. struct bfad_fcxp *drv_fcxp;
  2950. struct bfa_fcs_lport_s *fcs_port;
  2951. struct bfa_fcs_rport_s *fcs_rport;
  2952. uint32_t command_type = job->request->msgcode;
  2953. unsigned long flags;
  2954. struct bfad_buf_info *rsp_buf_info;
  2955. void *req_kbuf = NULL, *rsp_kbuf = NULL;
  2956. int rc = -EINVAL;
  2957. job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
  2958. job->reply->reply_payload_rcv_len = 0;
  2959. /* Get the payload passed in from userspace */
  2960. bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
  2961. sizeof(struct fc_bsg_request));
  2962. if (bsg_data == NULL)
  2963. goto out;
  2964. /*
  2965. * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
  2966. * buffer of size bsg_data->payload_len
  2967. */
  2968. bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
  2969. if (!bsg_fcpt) {
  2970. rc = -ENOMEM;
  2971. goto out;
  2972. }
  2973. if (copy_from_user((uint8_t *)bsg_fcpt,
  2974. (void *)(unsigned long)bsg_data->payload,
  2975. bsg_data->payload_len)) {
  2976. kfree(bsg_fcpt);
  2977. rc = -EIO;
  2978. goto out;
  2979. }
  2980. drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
  2981. if (drv_fcxp == NULL) {
  2982. kfree(bsg_fcpt);
  2983. rc = -ENOMEM;
  2984. goto out;
  2985. }
  2986. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2987. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
  2988. bsg_fcpt->lpwwn);
  2989. if (fcs_port == NULL) {
  2990. bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
  2991. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2992. goto out_free_mem;
  2993. }
  2994. /* Check if the port is online before sending FC Passthru cmd */
  2995. if (!bfa_fcs_lport_is_online(fcs_port)) {
  2996. bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
  2997. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2998. goto out_free_mem;
  2999. }
  3000. drv_fcxp->port = fcs_port->bfad_port;
  3001. if (drv_fcxp->port->bfad == 0)
  3002. drv_fcxp->port->bfad = bfad;
  3003. /* Fetch the bfa_rport - if nexus needed */
  3004. if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
  3005. command_type == FC_BSG_HST_CT) {
  3006. /* BSG HST commands: no nexus needed */
  3007. drv_fcxp->bfa_rport = NULL;
  3008. } else if (command_type == FC_BSG_RPT_ELS ||
  3009. command_type == FC_BSG_RPT_CT) {
  3010. /* BSG RPT commands: nexus needed */
  3011. fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
  3012. bsg_fcpt->dpwwn);
  3013. if (fcs_rport == NULL) {
  3014. bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
  3015. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  3016. goto out_free_mem;
  3017. }
  3018. drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
  3019. } else { /* Unknown BSG msgcode; return -EINVAL */
  3020. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  3021. goto out_free_mem;
  3022. }
  3023. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  3024. /* allocate memory for req / rsp buffers */
  3025. req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  3026. if (!req_kbuf) {
  3027. printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
  3028. bfad->pci_name);
  3029. rc = -ENOMEM;
  3030. goto out_free_mem;
  3031. }
  3032. rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
  3033. if (!rsp_kbuf) {
  3034. printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
  3035. bfad->pci_name);
  3036. rc = -ENOMEM;
  3037. goto out_free_mem;
  3038. }
  3039. /* map req sg - copy the sg_list passed in to the linear buffer */
  3040. sg_copy_to_buffer(job->request_payload.sg_list,
  3041. job->request_payload.sg_cnt, req_kbuf,
  3042. job->request_payload.payload_len);
  3043. drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
  3044. job->request_payload.payload_len,
  3045. &drv_fcxp->num_req_sgles);
  3046. if (!drv_fcxp->reqbuf_info) {
  3047. printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
  3048. bfad->pci_name);
  3049. rc = -ENOMEM;
  3050. goto out_free_mem;
  3051. }
  3052. drv_fcxp->req_sge = (struct bfa_sge_s *)
  3053. (((uint8_t *)drv_fcxp->reqbuf_info) +
  3054. (sizeof(struct bfad_buf_info) *
  3055. drv_fcxp->num_req_sgles));
  3056. /* map rsp sg */
  3057. drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
  3058. job->reply_payload.payload_len,
  3059. &drv_fcxp->num_rsp_sgles);
  3060. if (!drv_fcxp->rspbuf_info) {
  3061. printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
  3062. bfad->pci_name);
  3063. rc = -ENOMEM;
  3064. goto out_free_mem;
  3065. }
  3066. rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
  3067. drv_fcxp->rsp_sge = (struct bfa_sge_s *)
  3068. (((uint8_t *)drv_fcxp->rspbuf_info) +
  3069. (sizeof(struct bfad_buf_info) *
  3070. drv_fcxp->num_rsp_sgles));
  3071. /* fcxp send */
  3072. init_completion(&drv_fcxp->comp);
  3073. rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
  3074. if (rc == BFA_STATUS_OK) {
  3075. wait_for_completion(&drv_fcxp->comp);
  3076. bsg_fcpt->status = drv_fcxp->req_status;
  3077. } else {
  3078. bsg_fcpt->status = rc;
  3079. goto out_free_mem;
  3080. }
  3081. /* fill the job->reply data */
  3082. if (drv_fcxp->req_status == BFA_STATUS_OK) {
  3083. job->reply_len = drv_fcxp->rsp_len;
  3084. job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
  3085. job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  3086. } else {
  3087. job->reply->reply_payload_rcv_len =
  3088. sizeof(struct fc_bsg_ctels_reply);
  3089. job->reply_len = sizeof(uint32_t);
  3090. job->reply->reply_data.ctels_reply.status =
  3091. FC_CTELS_STATUS_REJECT;
  3092. }
  3093. /* Copy the response data to the reply_payload sg list */
  3094. sg_copy_from_buffer(job->reply_payload.sg_list,
  3095. job->reply_payload.sg_cnt,
  3096. (uint8_t *)rsp_buf_info->virt,
  3097. job->reply_payload.payload_len);
  3098. out_free_mem:
  3099. bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
  3100. drv_fcxp->num_rsp_sgles);
  3101. bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
  3102. drv_fcxp->num_req_sgles);
  3103. kfree(req_kbuf);
  3104. kfree(rsp_kbuf);
  3105. /* Need a copy to user op */
  3106. if (copy_to_user((void *)(unsigned long)bsg_data->payload,
  3107. (void *)bsg_fcpt, bsg_data->payload_len))
  3108. rc = -EIO;
  3109. kfree(bsg_fcpt);
  3110. kfree(drv_fcxp);
  3111. out:
  3112. job->reply->result = rc;
  3113. if (rc == BFA_STATUS_OK)
  3114. job->job_done(job);
  3115. return rc;
  3116. }
  3117. int
  3118. bfad_im_bsg_request(struct fc_bsg_job *job)
  3119. {
  3120. uint32_t rc = BFA_STATUS_OK;
  3121. switch (job->request->msgcode) {
  3122. case FC_BSG_HST_VENDOR:
  3123. /* Process BSG HST Vendor requests */
  3124. rc = bfad_im_bsg_vendor_request(job);
  3125. break;
  3126. case FC_BSG_HST_ELS_NOLOGIN:
  3127. case FC_BSG_RPT_ELS:
  3128. case FC_BSG_HST_CT:
  3129. case FC_BSG_RPT_CT:
  3130. /* Process BSG ELS/CT commands */
  3131. rc = bfad_im_bsg_els_ct_request(job);
  3132. break;
  3133. default:
  3134. job->reply->result = rc = -EINVAL;
  3135. job->reply->reply_payload_rcv_len = 0;
  3136. break;
  3137. }
  3138. return rc;
  3139. }
  3140. int
  3141. bfad_im_bsg_timeout(struct fc_bsg_job *job)
  3142. {
  3143. /* Don't complete the BSG job request - return -EAGAIN
  3144. * to reset bsg job timeout : for ELS/CT pass thru we
  3145. * already have timer to track the request.
  3146. */
  3147. return -EAGAIN;
  3148. }