bfad_bsg.c 88 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209321032113212321332143215321632173218321932203221322232233224322532263227322832293230323132323233323432353236323732383239324032413242324332443245324632473248324932503251325232533254325532563257325832593260326132623263326432653266326732683269327032713272
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/uaccess.h>
  18. #include "bfad_drv.h"
  19. #include "bfad_im.h"
  20. #include "bfad_bsg.h"
  21. BFA_TRC_FILE(LDRV, BSG);
  22. int
  23. bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
  24. {
  25. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  26. int rc = 0;
  27. unsigned long flags;
  28. spin_lock_irqsave(&bfad->bfad_lock, flags);
  29. /* If IOC is not in disabled state - return */
  30. if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
  31. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  32. iocmd->status = BFA_STATUS_IOC_FAILURE;
  33. return rc;
  34. }
  35. init_completion(&bfad->enable_comp);
  36. bfa_iocfc_enable(&bfad->bfa);
  37. iocmd->status = BFA_STATUS_OK;
  38. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  39. wait_for_completion(&bfad->enable_comp);
  40. return rc;
  41. }
  42. int
  43. bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
  44. {
  45. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  46. int rc = 0;
  47. unsigned long flags;
  48. spin_lock_irqsave(&bfad->bfad_lock, flags);
  49. if (bfad->disable_active) {
  50. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  51. return -EBUSY;
  52. }
  53. bfad->disable_active = BFA_TRUE;
  54. init_completion(&bfad->disable_comp);
  55. bfa_iocfc_disable(&bfad->bfa);
  56. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  57. wait_for_completion(&bfad->disable_comp);
  58. bfad->disable_active = BFA_FALSE;
  59. iocmd->status = BFA_STATUS_OK;
  60. return rc;
  61. }
  62. static int
  63. bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
  64. {
  65. int i;
  66. struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
  67. struct bfad_im_port_s *im_port;
  68. struct bfa_port_attr_s pattr;
  69. unsigned long flags;
  70. spin_lock_irqsave(&bfad->bfad_lock, flags);
  71. bfa_fcport_get_attr(&bfad->bfa, &pattr);
  72. iocmd->nwwn = pattr.nwwn;
  73. iocmd->pwwn = pattr.pwwn;
  74. iocmd->ioc_type = bfa_get_type(&bfad->bfa);
  75. iocmd->mac = bfa_get_mac(&bfad->bfa);
  76. iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
  77. bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
  78. iocmd->factorynwwn = pattr.factorynwwn;
  79. iocmd->factorypwwn = pattr.factorypwwn;
  80. iocmd->bfad_num = bfad->inst_no;
  81. im_port = bfad->pport.im_port;
  82. iocmd->host = im_port->shost->host_no;
  83. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  84. strcpy(iocmd->name, bfad->adapter_name);
  85. strcpy(iocmd->port_name, bfad->port_name);
  86. strcpy(iocmd->hwpath, bfad->pci_name);
  87. /* set adapter hw path */
  88. strcpy(iocmd->adapter_hwpath, bfad->pci_name);
  89. i = strlen(iocmd->adapter_hwpath) - 1;
  90. while (iocmd->adapter_hwpath[i] != '.')
  91. i--;
  92. iocmd->adapter_hwpath[i] = '\0';
  93. iocmd->status = BFA_STATUS_OK;
  94. return 0;
  95. }
  96. static int
  97. bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
  98. {
  99. struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
  100. unsigned long flags;
  101. spin_lock_irqsave(&bfad->bfad_lock, flags);
  102. bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
  103. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  104. /* fill in driver attr info */
  105. strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
  106. strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
  107. BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
  108. strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
  109. iocmd->ioc_attr.adapter_attr.fw_ver);
  110. strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
  111. iocmd->ioc_attr.adapter_attr.optrom_ver);
  112. /* copy chip rev info first otherwise it will be overwritten */
  113. memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
  114. sizeof(bfad->pci_attr.chip_rev));
  115. memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
  116. sizeof(struct bfa_ioc_pci_attr_s));
  117. iocmd->status = BFA_STATUS_OK;
  118. return 0;
  119. }
  120. int
  121. bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
  122. {
  123. struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
  124. bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
  125. iocmd->status = BFA_STATUS_OK;
  126. return 0;
  127. }
  128. int
  129. bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
  130. unsigned int payload_len)
  131. {
  132. struct bfa_bsg_ioc_fwstats_s *iocmd =
  133. (struct bfa_bsg_ioc_fwstats_s *)cmd;
  134. void *iocmd_bufptr;
  135. unsigned long flags;
  136. if (bfad_chk_iocmd_sz(payload_len,
  137. sizeof(struct bfa_bsg_ioc_fwstats_s),
  138. sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
  139. iocmd->status = BFA_STATUS_VERSION_FAIL;
  140. goto out;
  141. }
  142. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
  143. spin_lock_irqsave(&bfad->bfad_lock, flags);
  144. iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
  145. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  146. if (iocmd->status != BFA_STATUS_OK) {
  147. bfa_trc(bfad, iocmd->status);
  148. goto out;
  149. }
  150. out:
  151. bfa_trc(bfad, 0x6666);
  152. return 0;
  153. }
  154. int
  155. bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  156. {
  157. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  158. unsigned long flags;
  159. if (v_cmd == IOCMD_IOC_RESET_STATS) {
  160. bfa_ioc_clear_stats(&bfad->bfa);
  161. iocmd->status = BFA_STATUS_OK;
  162. } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
  163. spin_lock_irqsave(&bfad->bfad_lock, flags);
  164. iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
  165. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  166. }
  167. return 0;
  168. }
  169. int
  170. bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  171. {
  172. struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
  173. if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
  174. strcpy(bfad->adapter_name, iocmd->name);
  175. else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
  176. strcpy(bfad->port_name, iocmd->name);
  177. iocmd->status = BFA_STATUS_OK;
  178. return 0;
  179. }
  180. int
  181. bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
  182. {
  183. struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
  184. iocmd->status = BFA_STATUS_OK;
  185. bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
  186. return 0;
  187. }
  188. int
  189. bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
  190. {
  191. struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
  192. unsigned long flags;
  193. spin_lock_irqsave(&bfad->bfad_lock, flags);
  194. iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
  195. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  196. return 0;
  197. }
  198. int
  199. bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
  200. {
  201. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  202. struct bfad_hal_comp fcomp;
  203. unsigned long flags;
  204. init_completion(&fcomp.comp);
  205. spin_lock_irqsave(&bfad->bfad_lock, flags);
  206. iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
  207. bfad_hcb_comp, &fcomp);
  208. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  209. if (iocmd->status != BFA_STATUS_OK) {
  210. bfa_trc(bfad, iocmd->status);
  211. return 0;
  212. }
  213. wait_for_completion(&fcomp.comp);
  214. iocmd->status = fcomp.status;
  215. return 0;
  216. }
  217. int
  218. bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
  219. {
  220. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  221. struct bfad_hal_comp fcomp;
  222. unsigned long flags;
  223. init_completion(&fcomp.comp);
  224. spin_lock_irqsave(&bfad->bfad_lock, flags);
  225. iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
  226. bfad_hcb_comp, &fcomp);
  227. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  228. if (iocmd->status != BFA_STATUS_OK) {
  229. bfa_trc(bfad, iocmd->status);
  230. return 0;
  231. }
  232. wait_for_completion(&fcomp.comp);
  233. iocmd->status = fcomp.status;
  234. return 0;
  235. }
  236. static int
  237. bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
  238. {
  239. struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
  240. struct bfa_lport_attr_s port_attr;
  241. unsigned long flags;
  242. spin_lock_irqsave(&bfad->bfad_lock, flags);
  243. bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
  244. bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
  245. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  246. if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
  247. iocmd->attr.pid = port_attr.pid;
  248. else
  249. iocmd->attr.pid = 0;
  250. iocmd->attr.port_type = port_attr.port_type;
  251. iocmd->attr.loopback = port_attr.loopback;
  252. iocmd->attr.authfail = port_attr.authfail;
  253. strncpy(iocmd->attr.port_symname.symname,
  254. port_attr.port_cfg.sym_name.symname,
  255. sizeof(port_attr.port_cfg.sym_name.symname));
  256. iocmd->status = BFA_STATUS_OK;
  257. return 0;
  258. }
  259. int
  260. bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
  261. unsigned int payload_len)
  262. {
  263. struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
  264. struct bfad_hal_comp fcomp;
  265. void *iocmd_bufptr;
  266. unsigned long flags;
  267. if (bfad_chk_iocmd_sz(payload_len,
  268. sizeof(struct bfa_bsg_port_stats_s),
  269. sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
  270. iocmd->status = BFA_STATUS_VERSION_FAIL;
  271. return 0;
  272. }
  273. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
  274. init_completion(&fcomp.comp);
  275. spin_lock_irqsave(&bfad->bfad_lock, flags);
  276. iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
  277. iocmd_bufptr, bfad_hcb_comp, &fcomp);
  278. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  279. if (iocmd->status != BFA_STATUS_OK) {
  280. bfa_trc(bfad, iocmd->status);
  281. goto out;
  282. }
  283. wait_for_completion(&fcomp.comp);
  284. iocmd->status = fcomp.status;
  285. out:
  286. return 0;
  287. }
  288. int
  289. bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
  290. {
  291. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  292. struct bfad_hal_comp fcomp;
  293. unsigned long flags;
  294. init_completion(&fcomp.comp);
  295. spin_lock_irqsave(&bfad->bfad_lock, flags);
  296. iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
  297. bfad_hcb_comp, &fcomp);
  298. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  299. if (iocmd->status != BFA_STATUS_OK) {
  300. bfa_trc(bfad, iocmd->status);
  301. return 0;
  302. }
  303. wait_for_completion(&fcomp.comp);
  304. iocmd->status = fcomp.status;
  305. return 0;
  306. }
  307. int
  308. bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
  309. {
  310. struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
  311. unsigned long flags;
  312. spin_lock_irqsave(&bfad->bfad_lock, flags);
  313. if (v_cmd == IOCMD_PORT_CFG_TOPO)
  314. cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
  315. else if (v_cmd == IOCMD_PORT_CFG_SPEED)
  316. cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
  317. else if (v_cmd == IOCMD_PORT_CFG_ALPA)
  318. cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
  319. else if (v_cmd == IOCMD_PORT_CLR_ALPA)
  320. cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
  321. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  322. return 0;
  323. }
  324. int
  325. bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
  326. {
  327. struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
  328. (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
  329. unsigned long flags;
  330. spin_lock_irqsave(&bfad->bfad_lock, flags);
  331. iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
  332. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  333. return 0;
  334. }
  335. int
  336. bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  337. {
  338. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  339. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  340. unsigned long flags;
  341. spin_lock_irqsave(&bfad->bfad_lock, flags);
  342. if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
  343. if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
  344. fcport->cfg.bb_scn_state = BFA_TRUE;
  345. else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
  346. fcport->cfg.bb_scn_state = BFA_FALSE;
  347. }
  348. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  349. iocmd->status = BFA_STATUS_OK;
  350. return 0;
  351. }
  352. static int
  353. bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
  354. {
  355. struct bfa_fcs_lport_s *fcs_port;
  356. struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
  357. unsigned long flags;
  358. spin_lock_irqsave(&bfad->bfad_lock, flags);
  359. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  360. iocmd->vf_id, iocmd->pwwn);
  361. if (fcs_port == NULL) {
  362. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  363. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  364. goto out;
  365. }
  366. bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
  367. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  368. iocmd->status = BFA_STATUS_OK;
  369. out:
  370. return 0;
  371. }
  372. int
  373. bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
  374. {
  375. struct bfa_fcs_lport_s *fcs_port;
  376. struct bfa_bsg_lport_stats_s *iocmd =
  377. (struct bfa_bsg_lport_stats_s *)cmd;
  378. unsigned long flags;
  379. spin_lock_irqsave(&bfad->bfad_lock, flags);
  380. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  381. iocmd->vf_id, iocmd->pwwn);
  382. if (fcs_port == NULL) {
  383. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  384. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  385. goto out;
  386. }
  387. bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
  388. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  389. iocmd->status = BFA_STATUS_OK;
  390. out:
  391. return 0;
  392. }
  393. int
  394. bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
  395. {
  396. struct bfa_fcs_lport_s *fcs_port;
  397. struct bfa_bsg_reset_stats_s *iocmd =
  398. (struct bfa_bsg_reset_stats_s *)cmd;
  399. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  400. struct list_head *qe, *qen;
  401. struct bfa_itnim_s *itnim;
  402. unsigned long flags;
  403. spin_lock_irqsave(&bfad->bfad_lock, flags);
  404. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  405. iocmd->vf_id, iocmd->vpwwn);
  406. if (fcs_port == NULL) {
  407. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  408. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  409. goto out;
  410. }
  411. bfa_fcs_lport_clear_stats(fcs_port);
  412. /* clear IO stats from all active itnims */
  413. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  414. itnim = (struct bfa_itnim_s *) qe;
  415. if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
  416. continue;
  417. bfa_itnim_clear_stats(itnim);
  418. }
  419. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  420. iocmd->status = BFA_STATUS_OK;
  421. out:
  422. return 0;
  423. }
  424. int
  425. bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
  426. {
  427. struct bfa_fcs_lport_s *fcs_port;
  428. struct bfa_bsg_lport_iostats_s *iocmd =
  429. (struct bfa_bsg_lport_iostats_s *)cmd;
  430. unsigned long flags;
  431. spin_lock_irqsave(&bfad->bfad_lock, flags);
  432. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  433. iocmd->vf_id, iocmd->pwwn);
  434. if (fcs_port == NULL) {
  435. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  436. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  437. goto out;
  438. }
  439. bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
  440. fcs_port->lp_tag);
  441. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  442. iocmd->status = BFA_STATUS_OK;
  443. out:
  444. return 0;
  445. }
  446. int
  447. bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
  448. unsigned int payload_len)
  449. {
  450. struct bfa_bsg_lport_get_rports_s *iocmd =
  451. (struct bfa_bsg_lport_get_rports_s *)cmd;
  452. struct bfa_fcs_lport_s *fcs_port;
  453. unsigned long flags;
  454. void *iocmd_bufptr;
  455. if (iocmd->nrports == 0)
  456. return -EINVAL;
  457. if (bfad_chk_iocmd_sz(payload_len,
  458. sizeof(struct bfa_bsg_lport_get_rports_s),
  459. sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
  460. != BFA_STATUS_OK) {
  461. iocmd->status = BFA_STATUS_VERSION_FAIL;
  462. return 0;
  463. }
  464. iocmd_bufptr = (char *)iocmd +
  465. sizeof(struct bfa_bsg_lport_get_rports_s);
  466. spin_lock_irqsave(&bfad->bfad_lock, flags);
  467. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  468. iocmd->vf_id, iocmd->pwwn);
  469. if (fcs_port == NULL) {
  470. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  471. bfa_trc(bfad, 0);
  472. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  473. goto out;
  474. }
  475. bfa_fcs_lport_get_rport_quals(fcs_port,
  476. (struct bfa_rport_qualifier_s *)iocmd_bufptr,
  477. &iocmd->nrports);
  478. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  479. iocmd->status = BFA_STATUS_OK;
  480. out:
  481. return 0;
  482. }
  483. int
  484. bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
  485. {
  486. struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
  487. struct bfa_fcs_lport_s *fcs_port;
  488. struct bfa_fcs_rport_s *fcs_rport;
  489. unsigned long flags;
  490. spin_lock_irqsave(&bfad->bfad_lock, flags);
  491. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  492. iocmd->vf_id, iocmd->pwwn);
  493. if (fcs_port == NULL) {
  494. bfa_trc(bfad, 0);
  495. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  496. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  497. goto out;
  498. }
  499. if (iocmd->pid)
  500. fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
  501. iocmd->rpwwn, iocmd->pid);
  502. else
  503. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  504. if (fcs_rport == NULL) {
  505. bfa_trc(bfad, 0);
  506. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  507. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  508. goto out;
  509. }
  510. bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
  511. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  512. iocmd->status = BFA_STATUS_OK;
  513. out:
  514. return 0;
  515. }
  516. static int
  517. bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
  518. {
  519. struct bfa_bsg_rport_scsi_addr_s *iocmd =
  520. (struct bfa_bsg_rport_scsi_addr_s *)cmd;
  521. struct bfa_fcs_lport_s *fcs_port;
  522. struct bfa_fcs_itnim_s *fcs_itnim;
  523. struct bfad_itnim_s *drv_itnim;
  524. unsigned long flags;
  525. spin_lock_irqsave(&bfad->bfad_lock, flags);
  526. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  527. iocmd->vf_id, iocmd->pwwn);
  528. if (fcs_port == NULL) {
  529. bfa_trc(bfad, 0);
  530. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  531. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  532. goto out;
  533. }
  534. fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  535. if (fcs_itnim == NULL) {
  536. bfa_trc(bfad, 0);
  537. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  538. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  539. goto out;
  540. }
  541. drv_itnim = fcs_itnim->itnim_drv;
  542. if (drv_itnim && drv_itnim->im_port)
  543. iocmd->host = drv_itnim->im_port->shost->host_no;
  544. else {
  545. bfa_trc(bfad, 0);
  546. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  547. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  548. goto out;
  549. }
  550. iocmd->target = drv_itnim->scsi_tgt_id;
  551. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  552. iocmd->bus = 0;
  553. iocmd->lun = 0;
  554. iocmd->status = BFA_STATUS_OK;
  555. out:
  556. return 0;
  557. }
  558. int
  559. bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
  560. {
  561. struct bfa_bsg_rport_stats_s *iocmd =
  562. (struct bfa_bsg_rport_stats_s *)cmd;
  563. struct bfa_fcs_lport_s *fcs_port;
  564. struct bfa_fcs_rport_s *fcs_rport;
  565. unsigned long flags;
  566. spin_lock_irqsave(&bfad->bfad_lock, flags);
  567. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  568. iocmd->vf_id, iocmd->pwwn);
  569. if (fcs_port == NULL) {
  570. bfa_trc(bfad, 0);
  571. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  572. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  573. goto out;
  574. }
  575. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  576. if (fcs_rport == NULL) {
  577. bfa_trc(bfad, 0);
  578. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  579. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  580. goto out;
  581. }
  582. memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
  583. sizeof(struct bfa_rport_stats_s));
  584. if (bfa_fcs_rport_get_halrport(fcs_rport)) {
  585. memcpy((void *)&iocmd->stats.hal_stats,
  586. (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
  587. sizeof(struct bfa_rport_hal_stats_s));
  588. }
  589. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  590. iocmd->status = BFA_STATUS_OK;
  591. out:
  592. return 0;
  593. }
  594. int
  595. bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
  596. {
  597. struct bfa_bsg_rport_reset_stats_s *iocmd =
  598. (struct bfa_bsg_rport_reset_stats_s *)cmd;
  599. struct bfa_fcs_lport_s *fcs_port;
  600. struct bfa_fcs_rport_s *fcs_rport;
  601. struct bfa_rport_s *rport;
  602. unsigned long flags;
  603. spin_lock_irqsave(&bfad->bfad_lock, flags);
  604. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  605. iocmd->vf_id, iocmd->pwwn);
  606. if (fcs_port == NULL) {
  607. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  608. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  609. goto out;
  610. }
  611. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  612. if (fcs_rport == NULL) {
  613. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  614. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  615. goto out;
  616. }
  617. memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
  618. rport = bfa_fcs_rport_get_halrport(fcs_rport);
  619. if (rport)
  620. memset(&rport->stats, 0, sizeof(rport->stats));
  621. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  622. iocmd->status = BFA_STATUS_OK;
  623. out:
  624. return 0;
  625. }
  626. int
  627. bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
  628. {
  629. struct bfa_bsg_rport_set_speed_s *iocmd =
  630. (struct bfa_bsg_rport_set_speed_s *)cmd;
  631. struct bfa_fcs_lport_s *fcs_port;
  632. struct bfa_fcs_rport_s *fcs_rport;
  633. unsigned long flags;
  634. spin_lock_irqsave(&bfad->bfad_lock, flags);
  635. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  636. iocmd->vf_id, iocmd->pwwn);
  637. if (fcs_port == NULL) {
  638. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  639. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  640. goto out;
  641. }
  642. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  643. if (fcs_rport == NULL) {
  644. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  645. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  646. goto out;
  647. }
  648. fcs_rport->rpf.assigned_speed = iocmd->speed;
  649. /* Set this speed in f/w only if the RPSC speed is not available */
  650. if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
  651. if (fcs_rport->bfa_rport)
  652. bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
  653. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  654. iocmd->status = BFA_STATUS_OK;
  655. out:
  656. return 0;
  657. }
  658. int
  659. bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
  660. {
  661. struct bfa_fcs_vport_s *fcs_vport;
  662. struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
  663. unsigned long flags;
  664. spin_lock_irqsave(&bfad->bfad_lock, flags);
  665. fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
  666. iocmd->vf_id, iocmd->vpwwn);
  667. if (fcs_vport == NULL) {
  668. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  669. iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
  670. goto out;
  671. }
  672. bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
  673. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  674. iocmd->status = BFA_STATUS_OK;
  675. out:
  676. return 0;
  677. }
  678. int
  679. bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
  680. {
  681. struct bfa_fcs_vport_s *fcs_vport;
  682. struct bfa_bsg_vport_stats_s *iocmd =
  683. (struct bfa_bsg_vport_stats_s *)cmd;
  684. unsigned long flags;
  685. spin_lock_irqsave(&bfad->bfad_lock, flags);
  686. fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
  687. iocmd->vf_id, iocmd->vpwwn);
  688. if (fcs_vport == NULL) {
  689. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  690. iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
  691. goto out;
  692. }
  693. memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
  694. sizeof(struct bfa_vport_stats_s));
  695. memcpy((void *)&iocmd->vport_stats.port_stats,
  696. (void *)&fcs_vport->lport.stats,
  697. sizeof(struct bfa_lport_stats_s));
  698. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  699. iocmd->status = BFA_STATUS_OK;
  700. out:
  701. return 0;
  702. }
  703. int
  704. bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
  705. {
  706. struct bfa_fcs_vport_s *fcs_vport;
  707. struct bfa_bsg_reset_stats_s *iocmd =
  708. (struct bfa_bsg_reset_stats_s *)cmd;
  709. unsigned long flags;
  710. spin_lock_irqsave(&bfad->bfad_lock, flags);
  711. fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
  712. iocmd->vf_id, iocmd->vpwwn);
  713. if (fcs_vport == NULL) {
  714. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  715. iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
  716. goto out;
  717. }
  718. memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
  719. memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
  720. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  721. iocmd->status = BFA_STATUS_OK;
  722. out:
  723. return 0;
  724. }
  725. static int
  726. bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
  727. unsigned int payload_len)
  728. {
  729. struct bfa_bsg_fabric_get_lports_s *iocmd =
  730. (struct bfa_bsg_fabric_get_lports_s *)cmd;
  731. bfa_fcs_vf_t *fcs_vf;
  732. uint32_t nports = iocmd->nports;
  733. unsigned long flags;
  734. void *iocmd_bufptr;
  735. if (nports == 0) {
  736. iocmd->status = BFA_STATUS_EINVAL;
  737. goto out;
  738. }
  739. if (bfad_chk_iocmd_sz(payload_len,
  740. sizeof(struct bfa_bsg_fabric_get_lports_s),
  741. sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
  742. iocmd->status = BFA_STATUS_VERSION_FAIL;
  743. goto out;
  744. }
  745. iocmd_bufptr = (char *)iocmd +
  746. sizeof(struct bfa_bsg_fabric_get_lports_s);
  747. spin_lock_irqsave(&bfad->bfad_lock, flags);
  748. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  749. if (fcs_vf == NULL) {
  750. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  751. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  752. goto out;
  753. }
  754. bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
  755. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  756. iocmd->nports = nports;
  757. iocmd->status = BFA_STATUS_OK;
  758. out:
  759. return 0;
  760. }
  761. int
  762. bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  763. {
  764. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  765. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  766. unsigned long flags;
  767. spin_lock_irqsave(&bfad->bfad_lock, flags);
  768. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  769. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  770. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  771. else {
  772. if (cmd == IOCMD_RATELIM_ENABLE)
  773. fcport->cfg.ratelimit = BFA_TRUE;
  774. else if (cmd == IOCMD_RATELIM_DISABLE)
  775. fcport->cfg.ratelimit = BFA_FALSE;
  776. if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
  777. fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
  778. iocmd->status = BFA_STATUS_OK;
  779. }
  780. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  781. return 0;
  782. }
  783. int
  784. bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  785. {
  786. struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
  787. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  788. unsigned long flags;
  789. spin_lock_irqsave(&bfad->bfad_lock, flags);
  790. /* Auto and speeds greater than the supported speed, are invalid */
  791. if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
  792. (iocmd->speed > fcport->speed_sup)) {
  793. iocmd->status = BFA_STATUS_UNSUPP_SPEED;
  794. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  795. return 0;
  796. }
  797. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  798. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  799. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  800. else {
  801. fcport->cfg.trl_def_speed = iocmd->speed;
  802. iocmd->status = BFA_STATUS_OK;
  803. }
  804. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  805. return 0;
  806. }
  807. int
  808. bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
  809. {
  810. struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
  811. unsigned long flags;
  812. spin_lock_irqsave(&bfad->bfad_lock, flags);
  813. bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
  814. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  815. iocmd->status = BFA_STATUS_OK;
  816. return 0;
  817. }
  818. int
  819. bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
  820. {
  821. struct bfa_bsg_fcpim_modstats_s *iocmd =
  822. (struct bfa_bsg_fcpim_modstats_s *)cmd;
  823. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  824. struct list_head *qe, *qen;
  825. struct bfa_itnim_s *itnim;
  826. unsigned long flags;
  827. spin_lock_irqsave(&bfad->bfad_lock, flags);
  828. /* accumulate IO stats from itnim */
  829. memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
  830. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  831. itnim = (struct bfa_itnim_s *) qe;
  832. bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
  833. }
  834. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  835. iocmd->status = BFA_STATUS_OK;
  836. return 0;
  837. }
  838. int
  839. bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
  840. {
  841. struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
  842. (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
  843. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  844. struct list_head *qe, *qen;
  845. struct bfa_itnim_s *itnim;
  846. unsigned long flags;
  847. spin_lock_irqsave(&bfad->bfad_lock, flags);
  848. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  849. itnim = (struct bfa_itnim_s *) qe;
  850. bfa_itnim_clear_stats(itnim);
  851. }
  852. memset(&fcpim->del_itn_stats, 0,
  853. sizeof(struct bfa_fcpim_del_itn_stats_s));
  854. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  855. iocmd->status = BFA_STATUS_OK;
  856. return 0;
  857. }
  858. int
  859. bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
  860. {
  861. struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
  862. (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
  863. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  864. unsigned long flags;
  865. spin_lock_irqsave(&bfad->bfad_lock, flags);
  866. memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
  867. sizeof(struct bfa_fcpim_del_itn_stats_s));
  868. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  869. iocmd->status = BFA_STATUS_OK;
  870. return 0;
  871. }
  872. static int
  873. bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
  874. {
  875. struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
  876. struct bfa_fcs_lport_s *fcs_port;
  877. unsigned long flags;
  878. spin_lock_irqsave(&bfad->bfad_lock, flags);
  879. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  880. iocmd->vf_id, iocmd->lpwwn);
  881. if (!fcs_port)
  882. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  883. else
  884. iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
  885. iocmd->rpwwn, &iocmd->attr);
  886. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  887. return 0;
  888. }
  889. static int
  890. bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
  891. {
  892. struct bfa_bsg_itnim_iostats_s *iocmd =
  893. (struct bfa_bsg_itnim_iostats_s *)cmd;
  894. struct bfa_fcs_lport_s *fcs_port;
  895. struct bfa_fcs_itnim_s *itnim;
  896. unsigned long flags;
  897. spin_lock_irqsave(&bfad->bfad_lock, flags);
  898. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  899. iocmd->vf_id, iocmd->lpwwn);
  900. if (!fcs_port) {
  901. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  902. bfa_trc(bfad, 0);
  903. } else {
  904. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  905. if (itnim == NULL)
  906. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  907. else {
  908. iocmd->status = BFA_STATUS_OK;
  909. if (bfa_fcs_itnim_get_halitn(itnim))
  910. memcpy((void *)&iocmd->iostats, (void *)
  911. &(bfa_fcs_itnim_get_halitn(itnim)->stats),
  912. sizeof(struct bfa_itnim_iostats_s));
  913. }
  914. }
  915. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  916. return 0;
  917. }
  918. static int
  919. bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
  920. {
  921. struct bfa_bsg_rport_reset_stats_s *iocmd =
  922. (struct bfa_bsg_rport_reset_stats_s *)cmd;
  923. struct bfa_fcs_lport_s *fcs_port;
  924. struct bfa_fcs_itnim_s *itnim;
  925. unsigned long flags;
  926. spin_lock_irqsave(&bfad->bfad_lock, flags);
  927. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  928. iocmd->vf_id, iocmd->pwwn);
  929. if (!fcs_port)
  930. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  931. else {
  932. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  933. if (itnim == NULL)
  934. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  935. else {
  936. iocmd->status = BFA_STATUS_OK;
  937. bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
  938. bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
  939. }
  940. }
  941. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  942. return 0;
  943. }
  944. static int
  945. bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
  946. {
  947. struct bfa_bsg_itnim_itnstats_s *iocmd =
  948. (struct bfa_bsg_itnim_itnstats_s *)cmd;
  949. struct bfa_fcs_lport_s *fcs_port;
  950. struct bfa_fcs_itnim_s *itnim;
  951. unsigned long flags;
  952. spin_lock_irqsave(&bfad->bfad_lock, flags);
  953. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  954. iocmd->vf_id, iocmd->lpwwn);
  955. if (!fcs_port) {
  956. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  957. bfa_trc(bfad, 0);
  958. } else {
  959. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  960. if (itnim == NULL)
  961. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  962. else {
  963. iocmd->status = BFA_STATUS_OK;
  964. bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
  965. &iocmd->itnstats);
  966. }
  967. }
  968. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  969. return 0;
  970. }
  971. int
  972. bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
  973. {
  974. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  975. unsigned long flags;
  976. spin_lock_irqsave(&bfad->bfad_lock, flags);
  977. iocmd->status = bfa_fcport_enable(&bfad->bfa);
  978. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  979. return 0;
  980. }
  981. int
  982. bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
  983. {
  984. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  985. unsigned long flags;
  986. spin_lock_irqsave(&bfad->bfad_lock, flags);
  987. iocmd->status = bfa_fcport_disable(&bfad->bfa);
  988. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  989. return 0;
  990. }
  991. int
  992. bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
  993. {
  994. struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
  995. struct bfad_hal_comp fcomp;
  996. unsigned long flags;
  997. init_completion(&fcomp.comp);
  998. spin_lock_irqsave(&bfad->bfad_lock, flags);
  999. iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
  1000. &iocmd->pcifn_cfg,
  1001. bfad_hcb_comp, &fcomp);
  1002. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1003. if (iocmd->status != BFA_STATUS_OK)
  1004. goto out;
  1005. wait_for_completion(&fcomp.comp);
  1006. iocmd->status = fcomp.status;
  1007. out:
  1008. return 0;
  1009. }
  1010. int
  1011. bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
  1012. {
  1013. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  1014. struct bfad_hal_comp fcomp;
  1015. unsigned long flags;
  1016. init_completion(&fcomp.comp);
  1017. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1018. iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
  1019. &iocmd->pcifn_id, iocmd->port,
  1020. iocmd->pcifn_class, iocmd->bandwidth,
  1021. bfad_hcb_comp, &fcomp);
  1022. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1023. if (iocmd->status != BFA_STATUS_OK)
  1024. goto out;
  1025. wait_for_completion(&fcomp.comp);
  1026. iocmd->status = fcomp.status;
  1027. out:
  1028. return 0;
  1029. }
  1030. int
  1031. bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
  1032. {
  1033. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  1034. struct bfad_hal_comp fcomp;
  1035. unsigned long flags;
  1036. init_completion(&fcomp.comp);
  1037. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1038. iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
  1039. iocmd->pcifn_id,
  1040. bfad_hcb_comp, &fcomp);
  1041. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1042. if (iocmd->status != BFA_STATUS_OK)
  1043. goto out;
  1044. wait_for_completion(&fcomp.comp);
  1045. iocmd->status = fcomp.status;
  1046. out:
  1047. return 0;
  1048. }
  1049. int
  1050. bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
  1051. {
  1052. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  1053. struct bfad_hal_comp fcomp;
  1054. unsigned long flags;
  1055. init_completion(&fcomp.comp);
  1056. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1057. iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
  1058. iocmd->pcifn_id, iocmd->bandwidth,
  1059. bfad_hcb_comp, &fcomp);
  1060. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1061. bfa_trc(bfad, iocmd->status);
  1062. if (iocmd->status != BFA_STATUS_OK)
  1063. goto out;
  1064. wait_for_completion(&fcomp.comp);
  1065. iocmd->status = fcomp.status;
  1066. bfa_trc(bfad, iocmd->status);
  1067. out:
  1068. return 0;
  1069. }
  1070. int
  1071. bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
  1072. {
  1073. struct bfa_bsg_adapter_cfg_mode_s *iocmd =
  1074. (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
  1075. struct bfad_hal_comp fcomp;
  1076. unsigned long flags = 0;
  1077. init_completion(&fcomp.comp);
  1078. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1079. iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
  1080. iocmd->cfg.mode, iocmd->cfg.max_pf,
  1081. iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
  1082. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1083. if (iocmd->status != BFA_STATUS_OK)
  1084. goto out;
  1085. wait_for_completion(&fcomp.comp);
  1086. iocmd->status = fcomp.status;
  1087. out:
  1088. return 0;
  1089. }
  1090. int
  1091. bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
  1092. {
  1093. struct bfa_bsg_port_cfg_mode_s *iocmd =
  1094. (struct bfa_bsg_port_cfg_mode_s *)cmd;
  1095. struct bfad_hal_comp fcomp;
  1096. unsigned long flags = 0;
  1097. init_completion(&fcomp.comp);
  1098. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1099. iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
  1100. iocmd->instance, iocmd->cfg.mode,
  1101. iocmd->cfg.max_pf, iocmd->cfg.max_vf,
  1102. bfad_hcb_comp, &fcomp);
  1103. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1104. if (iocmd->status != BFA_STATUS_OK)
  1105. goto out;
  1106. wait_for_completion(&fcomp.comp);
  1107. iocmd->status = fcomp.status;
  1108. out:
  1109. return 0;
  1110. }
  1111. int
  1112. bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  1113. {
  1114. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  1115. struct bfad_hal_comp fcomp;
  1116. unsigned long flags;
  1117. init_completion(&fcomp.comp);
  1118. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1119. if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
  1120. iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
  1121. bfad_hcb_comp, &fcomp);
  1122. else
  1123. iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
  1124. bfad_hcb_comp, &fcomp);
  1125. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1126. if (iocmd->status != BFA_STATUS_OK)
  1127. goto out;
  1128. wait_for_completion(&fcomp.comp);
  1129. iocmd->status = fcomp.status;
  1130. out:
  1131. return 0;
  1132. }
  1133. int
  1134. bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
  1135. {
  1136. struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
  1137. struct bfad_hal_comp fcomp;
  1138. unsigned long flags;
  1139. init_completion(&fcomp.comp);
  1140. iocmd->status = BFA_STATUS_OK;
  1141. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1142. iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
  1143. bfad_hcb_comp, &fcomp);
  1144. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1145. if (iocmd->status != BFA_STATUS_OK)
  1146. goto out;
  1147. wait_for_completion(&fcomp.comp);
  1148. iocmd->status = fcomp.status;
  1149. out:
  1150. return 0;
  1151. }
  1152. int
  1153. bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1154. {
  1155. struct bfa_bsg_cee_attr_s *iocmd =
  1156. (struct bfa_bsg_cee_attr_s *)cmd;
  1157. void *iocmd_bufptr;
  1158. struct bfad_hal_comp cee_comp;
  1159. unsigned long flags;
  1160. if (bfad_chk_iocmd_sz(payload_len,
  1161. sizeof(struct bfa_bsg_cee_attr_s),
  1162. sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
  1163. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1164. return 0;
  1165. }
  1166. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
  1167. cee_comp.status = 0;
  1168. init_completion(&cee_comp.comp);
  1169. mutex_lock(&bfad_mutex);
  1170. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1171. iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
  1172. bfad_hcb_comp, &cee_comp);
  1173. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1174. if (iocmd->status != BFA_STATUS_OK) {
  1175. mutex_unlock(&bfad_mutex);
  1176. bfa_trc(bfad, 0x5555);
  1177. goto out;
  1178. }
  1179. wait_for_completion(&cee_comp.comp);
  1180. mutex_unlock(&bfad_mutex);
  1181. out:
  1182. return 0;
  1183. }
  1184. int
  1185. bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
  1186. unsigned int payload_len)
  1187. {
  1188. struct bfa_bsg_cee_stats_s *iocmd =
  1189. (struct bfa_bsg_cee_stats_s *)cmd;
  1190. void *iocmd_bufptr;
  1191. struct bfad_hal_comp cee_comp;
  1192. unsigned long flags;
  1193. if (bfad_chk_iocmd_sz(payload_len,
  1194. sizeof(struct bfa_bsg_cee_stats_s),
  1195. sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
  1196. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1197. return 0;
  1198. }
  1199. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
  1200. cee_comp.status = 0;
  1201. init_completion(&cee_comp.comp);
  1202. mutex_lock(&bfad_mutex);
  1203. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1204. iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
  1205. bfad_hcb_comp, &cee_comp);
  1206. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1207. if (iocmd->status != BFA_STATUS_OK) {
  1208. mutex_unlock(&bfad_mutex);
  1209. bfa_trc(bfad, 0x5555);
  1210. goto out;
  1211. }
  1212. wait_for_completion(&cee_comp.comp);
  1213. mutex_unlock(&bfad_mutex);
  1214. out:
  1215. return 0;
  1216. }
  1217. int
  1218. bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
  1219. {
  1220. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1221. unsigned long flags;
  1222. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1223. iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
  1224. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1225. if (iocmd->status != BFA_STATUS_OK)
  1226. bfa_trc(bfad, 0x5555);
  1227. return 0;
  1228. }
  1229. int
  1230. bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
  1231. {
  1232. struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
  1233. struct bfad_hal_comp fcomp;
  1234. unsigned long flags;
  1235. init_completion(&fcomp.comp);
  1236. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1237. iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
  1238. bfad_hcb_comp, &fcomp);
  1239. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1240. bfa_trc(bfad, iocmd->status);
  1241. if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
  1242. goto out;
  1243. wait_for_completion(&fcomp.comp);
  1244. iocmd->status = fcomp.status;
  1245. out:
  1246. return 0;
  1247. }
  1248. int
  1249. bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
  1250. {
  1251. struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
  1252. struct bfad_hal_comp fcomp;
  1253. unsigned long flags;
  1254. init_completion(&fcomp.comp);
  1255. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1256. iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
  1257. bfad_hcb_comp, &fcomp);
  1258. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1259. bfa_trc(bfad, iocmd->status);
  1260. if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
  1261. goto out;
  1262. wait_for_completion(&fcomp.comp);
  1263. iocmd->status = fcomp.status;
  1264. out:
  1265. return 0;
  1266. }
  1267. int
  1268. bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
  1269. {
  1270. struct bfa_bsg_flash_attr_s *iocmd =
  1271. (struct bfa_bsg_flash_attr_s *)cmd;
  1272. struct bfad_hal_comp fcomp;
  1273. unsigned long flags;
  1274. init_completion(&fcomp.comp);
  1275. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1276. iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
  1277. bfad_hcb_comp, &fcomp);
  1278. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1279. if (iocmd->status != BFA_STATUS_OK)
  1280. goto out;
  1281. wait_for_completion(&fcomp.comp);
  1282. iocmd->status = fcomp.status;
  1283. out:
  1284. return 0;
  1285. }
  1286. int
  1287. bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
  1288. {
  1289. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1290. struct bfad_hal_comp fcomp;
  1291. unsigned long flags;
  1292. init_completion(&fcomp.comp);
  1293. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1294. iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
  1295. iocmd->instance, bfad_hcb_comp, &fcomp);
  1296. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1297. if (iocmd->status != BFA_STATUS_OK)
  1298. goto out;
  1299. wait_for_completion(&fcomp.comp);
  1300. iocmd->status = fcomp.status;
  1301. out:
  1302. return 0;
  1303. }
  1304. int
  1305. bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
  1306. unsigned int payload_len)
  1307. {
  1308. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1309. void *iocmd_bufptr;
  1310. struct bfad_hal_comp fcomp;
  1311. unsigned long flags;
  1312. if (bfad_chk_iocmd_sz(payload_len,
  1313. sizeof(struct bfa_bsg_flash_s),
  1314. iocmd->bufsz) != BFA_STATUS_OK) {
  1315. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1316. return 0;
  1317. }
  1318. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
  1319. init_completion(&fcomp.comp);
  1320. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1321. iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
  1322. iocmd->type, iocmd->instance, iocmd_bufptr,
  1323. iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
  1324. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1325. if (iocmd->status != BFA_STATUS_OK)
  1326. goto out;
  1327. wait_for_completion(&fcomp.comp);
  1328. iocmd->status = fcomp.status;
  1329. out:
  1330. return 0;
  1331. }
  1332. int
  1333. bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
  1334. unsigned int payload_len)
  1335. {
  1336. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1337. struct bfad_hal_comp fcomp;
  1338. void *iocmd_bufptr;
  1339. unsigned long flags;
  1340. if (bfad_chk_iocmd_sz(payload_len,
  1341. sizeof(struct bfa_bsg_flash_s),
  1342. iocmd->bufsz) != BFA_STATUS_OK) {
  1343. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1344. return 0;
  1345. }
  1346. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
  1347. init_completion(&fcomp.comp);
  1348. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1349. iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
  1350. iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
  1351. bfad_hcb_comp, &fcomp);
  1352. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1353. if (iocmd->status != BFA_STATUS_OK)
  1354. goto out;
  1355. wait_for_completion(&fcomp.comp);
  1356. iocmd->status = fcomp.status;
  1357. out:
  1358. return 0;
  1359. }
  1360. int
  1361. bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
  1362. {
  1363. struct bfa_bsg_diag_get_temp_s *iocmd =
  1364. (struct bfa_bsg_diag_get_temp_s *)cmd;
  1365. struct bfad_hal_comp fcomp;
  1366. unsigned long flags;
  1367. init_completion(&fcomp.comp);
  1368. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1369. iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
  1370. &iocmd->result, bfad_hcb_comp, &fcomp);
  1371. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1372. bfa_trc(bfad, iocmd->status);
  1373. if (iocmd->status != BFA_STATUS_OK)
  1374. goto out;
  1375. wait_for_completion(&fcomp.comp);
  1376. iocmd->status = fcomp.status;
  1377. out:
  1378. return 0;
  1379. }
  1380. int
  1381. bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
  1382. {
  1383. struct bfa_bsg_diag_memtest_s *iocmd =
  1384. (struct bfa_bsg_diag_memtest_s *)cmd;
  1385. struct bfad_hal_comp fcomp;
  1386. unsigned long flags;
  1387. init_completion(&fcomp.comp);
  1388. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1389. iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
  1390. &iocmd->memtest, iocmd->pat,
  1391. &iocmd->result, bfad_hcb_comp, &fcomp);
  1392. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1393. bfa_trc(bfad, iocmd->status);
  1394. if (iocmd->status != BFA_STATUS_OK)
  1395. goto out;
  1396. wait_for_completion(&fcomp.comp);
  1397. iocmd->status = fcomp.status;
  1398. out:
  1399. return 0;
  1400. }
  1401. int
  1402. bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
  1403. {
  1404. struct bfa_bsg_diag_loopback_s *iocmd =
  1405. (struct bfa_bsg_diag_loopback_s *)cmd;
  1406. struct bfad_hal_comp fcomp;
  1407. unsigned long flags;
  1408. init_completion(&fcomp.comp);
  1409. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1410. iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
  1411. iocmd->speed, iocmd->lpcnt, iocmd->pat,
  1412. &iocmd->result, bfad_hcb_comp, &fcomp);
  1413. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1414. bfa_trc(bfad, iocmd->status);
  1415. if (iocmd->status != BFA_STATUS_OK)
  1416. goto out;
  1417. wait_for_completion(&fcomp.comp);
  1418. iocmd->status = fcomp.status;
  1419. out:
  1420. return 0;
  1421. }
  1422. int
  1423. bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
  1424. {
  1425. struct bfa_bsg_diag_fwping_s *iocmd =
  1426. (struct bfa_bsg_diag_fwping_s *)cmd;
  1427. struct bfad_hal_comp fcomp;
  1428. unsigned long flags;
  1429. init_completion(&fcomp.comp);
  1430. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1431. iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
  1432. iocmd->pattern, &iocmd->result,
  1433. bfad_hcb_comp, &fcomp);
  1434. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1435. bfa_trc(bfad, iocmd->status);
  1436. if (iocmd->status != BFA_STATUS_OK)
  1437. goto out;
  1438. bfa_trc(bfad, 0x77771);
  1439. wait_for_completion(&fcomp.comp);
  1440. iocmd->status = fcomp.status;
  1441. out:
  1442. return 0;
  1443. }
  1444. int
  1445. bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
  1446. {
  1447. struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
  1448. struct bfad_hal_comp fcomp;
  1449. unsigned long flags;
  1450. init_completion(&fcomp.comp);
  1451. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1452. iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
  1453. iocmd->queue, &iocmd->result,
  1454. bfad_hcb_comp, &fcomp);
  1455. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1456. if (iocmd->status != BFA_STATUS_OK)
  1457. goto out;
  1458. wait_for_completion(&fcomp.comp);
  1459. iocmd->status = fcomp.status;
  1460. out:
  1461. return 0;
  1462. }
  1463. int
  1464. bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
  1465. {
  1466. struct bfa_bsg_sfp_show_s *iocmd =
  1467. (struct bfa_bsg_sfp_show_s *)cmd;
  1468. struct bfad_hal_comp fcomp;
  1469. unsigned long flags;
  1470. init_completion(&fcomp.comp);
  1471. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1472. iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
  1473. bfad_hcb_comp, &fcomp);
  1474. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1475. bfa_trc(bfad, iocmd->status);
  1476. if (iocmd->status != BFA_STATUS_OK)
  1477. goto out;
  1478. wait_for_completion(&fcomp.comp);
  1479. iocmd->status = fcomp.status;
  1480. bfa_trc(bfad, iocmd->status);
  1481. out:
  1482. return 0;
  1483. }
  1484. int
  1485. bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
  1486. {
  1487. struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
  1488. unsigned long flags;
  1489. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1490. iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
  1491. &iocmd->ledtest);
  1492. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1493. return 0;
  1494. }
  1495. int
  1496. bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
  1497. {
  1498. struct bfa_bsg_diag_beacon_s *iocmd =
  1499. (struct bfa_bsg_diag_beacon_s *)cmd;
  1500. unsigned long flags;
  1501. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1502. iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
  1503. iocmd->beacon, iocmd->link_e2e_beacon,
  1504. iocmd->second);
  1505. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1506. return 0;
  1507. }
  1508. int
  1509. bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
  1510. {
  1511. struct bfa_bsg_diag_lb_stat_s *iocmd =
  1512. (struct bfa_bsg_diag_lb_stat_s *)cmd;
  1513. unsigned long flags;
  1514. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1515. iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
  1516. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1517. bfa_trc(bfad, iocmd->status);
  1518. return 0;
  1519. }
  1520. int
  1521. bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
  1522. {
  1523. struct bfa_bsg_phy_attr_s *iocmd =
  1524. (struct bfa_bsg_phy_attr_s *)cmd;
  1525. struct bfad_hal_comp fcomp;
  1526. unsigned long flags;
  1527. init_completion(&fcomp.comp);
  1528. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1529. iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
  1530. &iocmd->attr, bfad_hcb_comp, &fcomp);
  1531. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1532. if (iocmd->status != BFA_STATUS_OK)
  1533. goto out;
  1534. wait_for_completion(&fcomp.comp);
  1535. iocmd->status = fcomp.status;
  1536. out:
  1537. return 0;
  1538. }
  1539. int
  1540. bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
  1541. {
  1542. struct bfa_bsg_phy_stats_s *iocmd =
  1543. (struct bfa_bsg_phy_stats_s *)cmd;
  1544. struct bfad_hal_comp fcomp;
  1545. unsigned long flags;
  1546. init_completion(&fcomp.comp);
  1547. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1548. iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
  1549. &iocmd->stats, bfad_hcb_comp, &fcomp);
  1550. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1551. if (iocmd->status != BFA_STATUS_OK)
  1552. goto out;
  1553. wait_for_completion(&fcomp.comp);
  1554. iocmd->status = fcomp.status;
  1555. out:
  1556. return 0;
  1557. }
  1558. int
  1559. bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1560. {
  1561. struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
  1562. struct bfad_hal_comp fcomp;
  1563. void *iocmd_bufptr;
  1564. unsigned long flags;
  1565. if (bfad_chk_iocmd_sz(payload_len,
  1566. sizeof(struct bfa_bsg_phy_s),
  1567. iocmd->bufsz) != BFA_STATUS_OK) {
  1568. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1569. return 0;
  1570. }
  1571. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
  1572. init_completion(&fcomp.comp);
  1573. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1574. iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
  1575. iocmd->instance, iocmd_bufptr, iocmd->bufsz,
  1576. 0, bfad_hcb_comp, &fcomp);
  1577. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1578. if (iocmd->status != BFA_STATUS_OK)
  1579. goto out;
  1580. wait_for_completion(&fcomp.comp);
  1581. iocmd->status = fcomp.status;
  1582. if (iocmd->status != BFA_STATUS_OK)
  1583. goto out;
  1584. out:
  1585. return 0;
  1586. }
  1587. int
  1588. bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
  1589. {
  1590. struct bfa_bsg_vhba_attr_s *iocmd =
  1591. (struct bfa_bsg_vhba_attr_s *)cmd;
  1592. struct bfa_vhba_attr_s *attr = &iocmd->attr;
  1593. unsigned long flags;
  1594. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1595. attr->pwwn = bfad->bfa.ioc.attr->pwwn;
  1596. attr->nwwn = bfad->bfa.ioc.attr->nwwn;
  1597. attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
  1598. attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
  1599. attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
  1600. iocmd->status = BFA_STATUS_OK;
  1601. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1602. return 0;
  1603. }
  1604. int
  1605. bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1606. {
  1607. struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
  1608. void *iocmd_bufptr;
  1609. struct bfad_hal_comp fcomp;
  1610. unsigned long flags;
  1611. if (bfad_chk_iocmd_sz(payload_len,
  1612. sizeof(struct bfa_bsg_phy_s),
  1613. iocmd->bufsz) != BFA_STATUS_OK) {
  1614. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1615. return 0;
  1616. }
  1617. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
  1618. init_completion(&fcomp.comp);
  1619. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1620. iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
  1621. iocmd->instance, iocmd_bufptr, iocmd->bufsz,
  1622. 0, bfad_hcb_comp, &fcomp);
  1623. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1624. if (iocmd->status != BFA_STATUS_OK)
  1625. goto out;
  1626. wait_for_completion(&fcomp.comp);
  1627. iocmd->status = fcomp.status;
  1628. out:
  1629. return 0;
  1630. }
  1631. int
  1632. bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
  1633. {
  1634. struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
  1635. void *iocmd_bufptr;
  1636. if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
  1637. bfa_trc(bfad, sizeof(struct bfa_plog_s));
  1638. iocmd->status = BFA_STATUS_EINVAL;
  1639. goto out;
  1640. }
  1641. iocmd->status = BFA_STATUS_OK;
  1642. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
  1643. memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
  1644. out:
  1645. return 0;
  1646. }
  1647. #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
  1648. int
  1649. bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
  1650. unsigned int payload_len)
  1651. {
  1652. struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
  1653. void *iocmd_bufptr;
  1654. unsigned long flags;
  1655. u32 offset;
  1656. if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
  1657. BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
  1658. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1659. return 0;
  1660. }
  1661. if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
  1662. !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
  1663. !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
  1664. bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
  1665. iocmd->status = BFA_STATUS_EINVAL;
  1666. goto out;
  1667. }
  1668. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
  1669. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1670. offset = iocmd->offset;
  1671. iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
  1672. &offset, &iocmd->bufsz);
  1673. iocmd->offset = offset;
  1674. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1675. out:
  1676. return 0;
  1677. }
  1678. int
  1679. bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  1680. {
  1681. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1682. unsigned long flags;
  1683. if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
  1684. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1685. bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
  1686. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1687. } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
  1688. bfad->plog_buf.head = bfad->plog_buf.tail = 0;
  1689. else if (v_cmd == IOCMD_DEBUG_START_DTRC)
  1690. bfa_trc_init(bfad->trcmod);
  1691. else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
  1692. bfa_trc_stop(bfad->trcmod);
  1693. iocmd->status = BFA_STATUS_OK;
  1694. return 0;
  1695. }
  1696. int
  1697. bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
  1698. {
  1699. struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
  1700. if (iocmd->ctl == BFA_TRUE)
  1701. bfad->plog_buf.plog_enabled = 1;
  1702. else
  1703. bfad->plog_buf.plog_enabled = 0;
  1704. iocmd->status = BFA_STATUS_OK;
  1705. return 0;
  1706. }
  1707. int
  1708. bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  1709. {
  1710. struct bfa_bsg_fcpim_profile_s *iocmd =
  1711. (struct bfa_bsg_fcpim_profile_s *)cmd;
  1712. struct timeval tv;
  1713. unsigned long flags;
  1714. do_gettimeofday(&tv);
  1715. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1716. if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
  1717. iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
  1718. else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
  1719. iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
  1720. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1721. return 0;
  1722. }
  1723. static int
  1724. bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
  1725. {
  1726. struct bfa_bsg_itnim_ioprofile_s *iocmd =
  1727. (struct bfa_bsg_itnim_ioprofile_s *)cmd;
  1728. struct bfa_fcs_lport_s *fcs_port;
  1729. struct bfa_fcs_itnim_s *itnim;
  1730. unsigned long flags;
  1731. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1732. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  1733. iocmd->vf_id, iocmd->lpwwn);
  1734. if (!fcs_port)
  1735. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  1736. else {
  1737. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  1738. if (itnim == NULL)
  1739. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  1740. else
  1741. iocmd->status = bfa_itnim_get_ioprofile(
  1742. bfa_fcs_itnim_get_halitn(itnim),
  1743. &iocmd->ioprofile);
  1744. }
  1745. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1746. return 0;
  1747. }
  1748. int
  1749. bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
  1750. {
  1751. struct bfa_bsg_fcport_stats_s *iocmd =
  1752. (struct bfa_bsg_fcport_stats_s *)cmd;
  1753. struct bfad_hal_comp fcomp;
  1754. unsigned long flags;
  1755. struct bfa_cb_pending_q_s cb_qe;
  1756. init_completion(&fcomp.comp);
  1757. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
  1758. &fcomp, &iocmd->stats);
  1759. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1760. iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
  1761. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1762. if (iocmd->status != BFA_STATUS_OK) {
  1763. bfa_trc(bfad, iocmd->status);
  1764. goto out;
  1765. }
  1766. wait_for_completion(&fcomp.comp);
  1767. iocmd->status = fcomp.status;
  1768. out:
  1769. return 0;
  1770. }
  1771. int
  1772. bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
  1773. {
  1774. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1775. struct bfad_hal_comp fcomp;
  1776. unsigned long flags;
  1777. struct bfa_cb_pending_q_s cb_qe;
  1778. init_completion(&fcomp.comp);
  1779. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
  1780. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1781. iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
  1782. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1783. if (iocmd->status != BFA_STATUS_OK) {
  1784. bfa_trc(bfad, iocmd->status);
  1785. goto out;
  1786. }
  1787. wait_for_completion(&fcomp.comp);
  1788. iocmd->status = fcomp.status;
  1789. out:
  1790. return 0;
  1791. }
  1792. int
  1793. bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
  1794. {
  1795. struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
  1796. struct bfad_hal_comp fcomp;
  1797. unsigned long flags;
  1798. init_completion(&fcomp.comp);
  1799. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1800. iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
  1801. BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
  1802. &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
  1803. bfad_hcb_comp, &fcomp);
  1804. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1805. if (iocmd->status != BFA_STATUS_OK)
  1806. goto out;
  1807. wait_for_completion(&fcomp.comp);
  1808. iocmd->status = fcomp.status;
  1809. out:
  1810. return 0;
  1811. }
  1812. int
  1813. bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
  1814. {
  1815. struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
  1816. struct bfad_hal_comp fcomp;
  1817. unsigned long flags;
  1818. init_completion(&fcomp.comp);
  1819. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1820. iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
  1821. BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
  1822. &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
  1823. bfad_hcb_comp, &fcomp);
  1824. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1825. if (iocmd->status != BFA_STATUS_OK)
  1826. goto out;
  1827. wait_for_completion(&fcomp.comp);
  1828. iocmd->status = fcomp.status;
  1829. out:
  1830. return 0;
  1831. }
  1832. int
  1833. bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
  1834. {
  1835. struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
  1836. struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
  1837. struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
  1838. unsigned long flags;
  1839. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1840. pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
  1841. pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
  1842. pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
  1843. memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
  1844. iocmd->status = BFA_STATUS_OK;
  1845. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1846. return 0;
  1847. }
  1848. int
  1849. bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
  1850. {
  1851. struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
  1852. struct bfad_hal_comp fcomp;
  1853. unsigned long flags;
  1854. init_completion(&fcomp.comp);
  1855. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1856. iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
  1857. BFA_FLASH_PART_PXECFG,
  1858. bfad->bfa.ioc.port_id, &iocmd->cfg,
  1859. sizeof(struct bfa_ethboot_cfg_s), 0,
  1860. bfad_hcb_comp, &fcomp);
  1861. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1862. if (iocmd->status != BFA_STATUS_OK)
  1863. goto out;
  1864. wait_for_completion(&fcomp.comp);
  1865. iocmd->status = fcomp.status;
  1866. out:
  1867. return 0;
  1868. }
  1869. int
  1870. bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
  1871. {
  1872. struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
  1873. struct bfad_hal_comp fcomp;
  1874. unsigned long flags;
  1875. init_completion(&fcomp.comp);
  1876. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1877. iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
  1878. BFA_FLASH_PART_PXECFG,
  1879. bfad->bfa.ioc.port_id, &iocmd->cfg,
  1880. sizeof(struct bfa_ethboot_cfg_s), 0,
  1881. bfad_hcb_comp, &fcomp);
  1882. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1883. if (iocmd->status != BFA_STATUS_OK)
  1884. goto out;
  1885. wait_for_completion(&fcomp.comp);
  1886. iocmd->status = fcomp.status;
  1887. out:
  1888. return 0;
  1889. }
  1890. int
  1891. bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  1892. {
  1893. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1894. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  1895. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  1896. unsigned long flags;
  1897. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1898. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
  1899. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  1900. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  1901. else {
  1902. if (v_cmd == IOCMD_TRUNK_ENABLE) {
  1903. trunk->attr.state = BFA_TRUNK_OFFLINE;
  1904. bfa_fcport_disable(&bfad->bfa);
  1905. fcport->cfg.trunked = BFA_TRUE;
  1906. } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
  1907. trunk->attr.state = BFA_TRUNK_DISABLED;
  1908. bfa_fcport_disable(&bfad->bfa);
  1909. fcport->cfg.trunked = BFA_FALSE;
  1910. }
  1911. if (!bfa_fcport_is_disabled(&bfad->bfa))
  1912. bfa_fcport_enable(&bfad->bfa);
  1913. iocmd->status = BFA_STATUS_OK;
  1914. }
  1915. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1916. return 0;
  1917. }
  1918. int
  1919. bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
  1920. {
  1921. struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
  1922. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  1923. struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
  1924. unsigned long flags;
  1925. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1926. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) ||
  1927. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  1928. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  1929. else {
  1930. memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
  1931. sizeof(struct bfa_trunk_attr_s));
  1932. iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
  1933. iocmd->status = BFA_STATUS_OK;
  1934. }
  1935. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1936. return 0;
  1937. }
  1938. int
  1939. bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  1940. {
  1941. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  1942. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  1943. unsigned long flags;
  1944. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1945. if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
  1946. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  1947. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  1948. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  1949. else {
  1950. if (v_cmd == IOCMD_QOS_ENABLE)
  1951. fcport->cfg.qos_enabled = BFA_TRUE;
  1952. else if (v_cmd == IOCMD_QOS_DISABLE)
  1953. fcport->cfg.qos_enabled = BFA_FALSE;
  1954. }
  1955. }
  1956. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1957. return 0;
  1958. }
  1959. int
  1960. bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
  1961. {
  1962. struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
  1963. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  1964. unsigned long flags;
  1965. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1966. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  1967. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  1968. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  1969. else {
  1970. iocmd->attr.state = fcport->qos_attr.state;
  1971. iocmd->attr.total_bb_cr =
  1972. be32_to_cpu(fcport->qos_attr.total_bb_cr);
  1973. iocmd->status = BFA_STATUS_OK;
  1974. }
  1975. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1976. return 0;
  1977. }
  1978. int
  1979. bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
  1980. {
  1981. struct bfa_bsg_qos_vc_attr_s *iocmd =
  1982. (struct bfa_bsg_qos_vc_attr_s *)cmd;
  1983. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  1984. struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
  1985. unsigned long flags;
  1986. u32 i = 0;
  1987. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1988. iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
  1989. iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
  1990. iocmd->attr.elp_opmode_flags =
  1991. be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
  1992. /* Individual VC info */
  1993. while (i < iocmd->attr.total_vc_count) {
  1994. iocmd->attr.vc_info[i].vc_credit =
  1995. bfa_vc_attr->vc_info[i].vc_credit;
  1996. iocmd->attr.vc_info[i].borrow_credit =
  1997. bfa_vc_attr->vc_info[i].borrow_credit;
  1998. iocmd->attr.vc_info[i].priority =
  1999. bfa_vc_attr->vc_info[i].priority;
  2000. i++;
  2001. }
  2002. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2003. iocmd->status = BFA_STATUS_OK;
  2004. return 0;
  2005. }
  2006. int
  2007. bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
  2008. {
  2009. struct bfa_bsg_fcport_stats_s *iocmd =
  2010. (struct bfa_bsg_fcport_stats_s *)cmd;
  2011. struct bfad_hal_comp fcomp;
  2012. unsigned long flags;
  2013. struct bfa_cb_pending_q_s cb_qe;
  2014. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2015. init_completion(&fcomp.comp);
  2016. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
  2017. &fcomp, &iocmd->stats);
  2018. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2019. WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
  2020. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  2021. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2022. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2023. else
  2024. iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
  2025. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2026. if (iocmd->status != BFA_STATUS_OK) {
  2027. bfa_trc(bfad, iocmd->status);
  2028. goto out;
  2029. }
  2030. wait_for_completion(&fcomp.comp);
  2031. iocmd->status = fcomp.status;
  2032. out:
  2033. return 0;
  2034. }
  2035. int
  2036. bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
  2037. {
  2038. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  2039. struct bfad_hal_comp fcomp;
  2040. unsigned long flags;
  2041. struct bfa_cb_pending_q_s cb_qe;
  2042. struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
  2043. init_completion(&fcomp.comp);
  2044. bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
  2045. &fcomp, NULL);
  2046. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2047. WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
  2048. if ((fcport->cfg.topology == BFA_PORT_TOPOLOGY_LOOP) &&
  2049. (fcport->topology == BFA_PORT_TOPOLOGY_LOOP))
  2050. iocmd->status = BFA_STATUS_TOPOLOGY_LOOP;
  2051. else
  2052. iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
  2053. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2054. if (iocmd->status != BFA_STATUS_OK) {
  2055. bfa_trc(bfad, iocmd->status);
  2056. goto out;
  2057. }
  2058. wait_for_completion(&fcomp.comp);
  2059. iocmd->status = fcomp.status;
  2060. out:
  2061. return 0;
  2062. }
  2063. int
  2064. bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
  2065. {
  2066. struct bfa_bsg_vf_stats_s *iocmd =
  2067. (struct bfa_bsg_vf_stats_s *)cmd;
  2068. struct bfa_fcs_fabric_s *fcs_vf;
  2069. unsigned long flags;
  2070. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2071. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  2072. if (fcs_vf == NULL) {
  2073. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2074. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  2075. goto out;
  2076. }
  2077. memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
  2078. sizeof(struct bfa_vf_stats_s));
  2079. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2080. iocmd->status = BFA_STATUS_OK;
  2081. out:
  2082. return 0;
  2083. }
  2084. int
  2085. bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
  2086. {
  2087. struct bfa_bsg_vf_reset_stats_s *iocmd =
  2088. (struct bfa_bsg_vf_reset_stats_s *)cmd;
  2089. struct bfa_fcs_fabric_s *fcs_vf;
  2090. unsigned long flags;
  2091. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2092. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  2093. if (fcs_vf == NULL) {
  2094. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2095. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  2096. goto out;
  2097. }
  2098. memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
  2099. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2100. iocmd->status = BFA_STATUS_OK;
  2101. out:
  2102. return 0;
  2103. }
  2104. /* Function to reset the LUN SCAN mode */
  2105. static void
  2106. bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
  2107. {
  2108. struct bfad_im_port_s *pport_im = bfad->pport.im_port;
  2109. struct bfad_vport_s *vport = NULL;
  2110. /* Set the scsi device LUN SCAN flags for base port */
  2111. bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
  2112. /* Set the scsi device LUN SCAN flags for the vports */
  2113. list_for_each_entry(vport, &bfad->vport_list, list_entry)
  2114. bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
  2115. }
  2116. int
  2117. bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
  2118. {
  2119. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  2120. unsigned long flags;
  2121. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2122. if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
  2123. iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
  2124. /* Set the LUN Scanning mode to be Sequential scan */
  2125. if (iocmd->status == BFA_STATUS_OK)
  2126. bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
  2127. } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
  2128. iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
  2129. /* Set the LUN Scanning mode to default REPORT_LUNS scan */
  2130. if (iocmd->status == BFA_STATUS_OK)
  2131. bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
  2132. } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
  2133. iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
  2134. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2135. return 0;
  2136. }
  2137. int
  2138. bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
  2139. {
  2140. struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
  2141. (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
  2142. struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
  2143. unsigned long flags;
  2144. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2145. iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
  2146. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2147. return 0;
  2148. }
  2149. int
  2150. bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
  2151. {
  2152. struct bfa_bsg_fcpim_lunmask_s *iocmd =
  2153. (struct bfa_bsg_fcpim_lunmask_s *)cmd;
  2154. unsigned long flags;
  2155. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2156. if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
  2157. iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
  2158. &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
  2159. else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
  2160. iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
  2161. iocmd->vf_id, &iocmd->pwwn,
  2162. iocmd->rpwwn, iocmd->lun);
  2163. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2164. return 0;
  2165. }
  2166. static int
  2167. bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
  2168. unsigned int payload_len)
  2169. {
  2170. int rc = -EINVAL;
  2171. switch (cmd) {
  2172. case IOCMD_IOC_ENABLE:
  2173. rc = bfad_iocmd_ioc_enable(bfad, iocmd);
  2174. break;
  2175. case IOCMD_IOC_DISABLE:
  2176. rc = bfad_iocmd_ioc_disable(bfad, iocmd);
  2177. break;
  2178. case IOCMD_IOC_GET_INFO:
  2179. rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
  2180. break;
  2181. case IOCMD_IOC_GET_ATTR:
  2182. rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
  2183. break;
  2184. case IOCMD_IOC_GET_STATS:
  2185. rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
  2186. break;
  2187. case IOCMD_IOC_GET_FWSTATS:
  2188. rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
  2189. break;
  2190. case IOCMD_IOC_RESET_STATS:
  2191. case IOCMD_IOC_RESET_FWSTATS:
  2192. rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
  2193. break;
  2194. case IOCMD_IOC_SET_ADAPTER_NAME:
  2195. case IOCMD_IOC_SET_PORT_NAME:
  2196. rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
  2197. break;
  2198. case IOCMD_IOCFC_GET_ATTR:
  2199. rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
  2200. break;
  2201. case IOCMD_IOCFC_SET_INTR:
  2202. rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
  2203. break;
  2204. case IOCMD_PORT_ENABLE:
  2205. rc = bfad_iocmd_port_enable(bfad, iocmd);
  2206. break;
  2207. case IOCMD_PORT_DISABLE:
  2208. rc = bfad_iocmd_port_disable(bfad, iocmd);
  2209. break;
  2210. case IOCMD_PORT_GET_ATTR:
  2211. rc = bfad_iocmd_port_get_attr(bfad, iocmd);
  2212. break;
  2213. case IOCMD_PORT_GET_STATS:
  2214. rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
  2215. break;
  2216. case IOCMD_PORT_RESET_STATS:
  2217. rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
  2218. break;
  2219. case IOCMD_PORT_CFG_TOPO:
  2220. case IOCMD_PORT_CFG_SPEED:
  2221. case IOCMD_PORT_CFG_ALPA:
  2222. case IOCMD_PORT_CLR_ALPA:
  2223. rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
  2224. break;
  2225. case IOCMD_PORT_CFG_MAXFRSZ:
  2226. rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
  2227. break;
  2228. case IOCMD_PORT_BBSC_ENABLE:
  2229. case IOCMD_PORT_BBSC_DISABLE:
  2230. rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
  2231. break;
  2232. case IOCMD_LPORT_GET_ATTR:
  2233. rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
  2234. break;
  2235. case IOCMD_LPORT_GET_STATS:
  2236. rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
  2237. break;
  2238. case IOCMD_LPORT_RESET_STATS:
  2239. rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
  2240. break;
  2241. case IOCMD_LPORT_GET_IOSTATS:
  2242. rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
  2243. break;
  2244. case IOCMD_LPORT_GET_RPORTS:
  2245. rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
  2246. break;
  2247. case IOCMD_RPORT_GET_ATTR:
  2248. rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
  2249. break;
  2250. case IOCMD_RPORT_GET_ADDR:
  2251. rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
  2252. break;
  2253. case IOCMD_RPORT_GET_STATS:
  2254. rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
  2255. break;
  2256. case IOCMD_RPORT_RESET_STATS:
  2257. rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
  2258. break;
  2259. case IOCMD_RPORT_SET_SPEED:
  2260. rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
  2261. break;
  2262. case IOCMD_VPORT_GET_ATTR:
  2263. rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
  2264. break;
  2265. case IOCMD_VPORT_GET_STATS:
  2266. rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
  2267. break;
  2268. case IOCMD_VPORT_RESET_STATS:
  2269. rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
  2270. break;
  2271. case IOCMD_FABRIC_GET_LPORTS:
  2272. rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
  2273. break;
  2274. case IOCMD_RATELIM_ENABLE:
  2275. case IOCMD_RATELIM_DISABLE:
  2276. rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
  2277. break;
  2278. case IOCMD_RATELIM_DEF_SPEED:
  2279. rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
  2280. break;
  2281. case IOCMD_FCPIM_FAILOVER:
  2282. rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
  2283. break;
  2284. case IOCMD_FCPIM_MODSTATS:
  2285. rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
  2286. break;
  2287. case IOCMD_FCPIM_MODSTATSCLR:
  2288. rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
  2289. break;
  2290. case IOCMD_FCPIM_DEL_ITN_STATS:
  2291. rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
  2292. break;
  2293. case IOCMD_ITNIM_GET_ATTR:
  2294. rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
  2295. break;
  2296. case IOCMD_ITNIM_GET_IOSTATS:
  2297. rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
  2298. break;
  2299. case IOCMD_ITNIM_RESET_STATS:
  2300. rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
  2301. break;
  2302. case IOCMD_ITNIM_GET_ITNSTATS:
  2303. rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
  2304. break;
  2305. case IOCMD_FCPORT_ENABLE:
  2306. rc = bfad_iocmd_fcport_enable(bfad, iocmd);
  2307. break;
  2308. case IOCMD_FCPORT_DISABLE:
  2309. rc = bfad_iocmd_fcport_disable(bfad, iocmd);
  2310. break;
  2311. case IOCMD_IOC_PCIFN_CFG:
  2312. rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
  2313. break;
  2314. case IOCMD_PCIFN_CREATE:
  2315. rc = bfad_iocmd_pcifn_create(bfad, iocmd);
  2316. break;
  2317. case IOCMD_PCIFN_DELETE:
  2318. rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
  2319. break;
  2320. case IOCMD_PCIFN_BW:
  2321. rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
  2322. break;
  2323. case IOCMD_ADAPTER_CFG_MODE:
  2324. rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
  2325. break;
  2326. case IOCMD_PORT_CFG_MODE:
  2327. rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
  2328. break;
  2329. case IOCMD_FLASH_ENABLE_OPTROM:
  2330. case IOCMD_FLASH_DISABLE_OPTROM:
  2331. rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
  2332. break;
  2333. case IOCMD_FAA_QUERY:
  2334. rc = bfad_iocmd_faa_query(bfad, iocmd);
  2335. break;
  2336. case IOCMD_CEE_GET_ATTR:
  2337. rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
  2338. break;
  2339. case IOCMD_CEE_GET_STATS:
  2340. rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
  2341. break;
  2342. case IOCMD_CEE_RESET_STATS:
  2343. rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
  2344. break;
  2345. case IOCMD_SFP_MEDIA:
  2346. rc = bfad_iocmd_sfp_media(bfad, iocmd);
  2347. break;
  2348. case IOCMD_SFP_SPEED:
  2349. rc = bfad_iocmd_sfp_speed(bfad, iocmd);
  2350. break;
  2351. case IOCMD_FLASH_GET_ATTR:
  2352. rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
  2353. break;
  2354. case IOCMD_FLASH_ERASE_PART:
  2355. rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
  2356. break;
  2357. case IOCMD_FLASH_UPDATE_PART:
  2358. rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
  2359. break;
  2360. case IOCMD_FLASH_READ_PART:
  2361. rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
  2362. break;
  2363. case IOCMD_DIAG_TEMP:
  2364. rc = bfad_iocmd_diag_temp(bfad, iocmd);
  2365. break;
  2366. case IOCMD_DIAG_MEMTEST:
  2367. rc = bfad_iocmd_diag_memtest(bfad, iocmd);
  2368. break;
  2369. case IOCMD_DIAG_LOOPBACK:
  2370. rc = bfad_iocmd_diag_loopback(bfad, iocmd);
  2371. break;
  2372. case IOCMD_DIAG_FWPING:
  2373. rc = bfad_iocmd_diag_fwping(bfad, iocmd);
  2374. break;
  2375. case IOCMD_DIAG_QUEUETEST:
  2376. rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
  2377. break;
  2378. case IOCMD_DIAG_SFP:
  2379. rc = bfad_iocmd_diag_sfp(bfad, iocmd);
  2380. break;
  2381. case IOCMD_DIAG_LED:
  2382. rc = bfad_iocmd_diag_led(bfad, iocmd);
  2383. break;
  2384. case IOCMD_DIAG_BEACON_LPORT:
  2385. rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
  2386. break;
  2387. case IOCMD_DIAG_LB_STAT:
  2388. rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
  2389. break;
  2390. case IOCMD_PHY_GET_ATTR:
  2391. rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
  2392. break;
  2393. case IOCMD_PHY_GET_STATS:
  2394. rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
  2395. break;
  2396. case IOCMD_PHY_UPDATE_FW:
  2397. rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
  2398. break;
  2399. case IOCMD_PHY_READ_FW:
  2400. rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
  2401. break;
  2402. case IOCMD_VHBA_QUERY:
  2403. rc = bfad_iocmd_vhba_query(bfad, iocmd);
  2404. break;
  2405. case IOCMD_DEBUG_PORTLOG:
  2406. rc = bfad_iocmd_porglog_get(bfad, iocmd);
  2407. break;
  2408. case IOCMD_DEBUG_FW_CORE:
  2409. rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
  2410. break;
  2411. case IOCMD_DEBUG_FW_STATE_CLR:
  2412. case IOCMD_DEBUG_PORTLOG_CLR:
  2413. case IOCMD_DEBUG_START_DTRC:
  2414. case IOCMD_DEBUG_STOP_DTRC:
  2415. rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
  2416. break;
  2417. case IOCMD_DEBUG_PORTLOG_CTL:
  2418. rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
  2419. break;
  2420. case IOCMD_FCPIM_PROFILE_ON:
  2421. case IOCMD_FCPIM_PROFILE_OFF:
  2422. rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
  2423. break;
  2424. case IOCMD_ITNIM_GET_IOPROFILE:
  2425. rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
  2426. break;
  2427. case IOCMD_FCPORT_GET_STATS:
  2428. rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
  2429. break;
  2430. case IOCMD_FCPORT_RESET_STATS:
  2431. rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
  2432. break;
  2433. case IOCMD_BOOT_CFG:
  2434. rc = bfad_iocmd_boot_cfg(bfad, iocmd);
  2435. break;
  2436. case IOCMD_BOOT_QUERY:
  2437. rc = bfad_iocmd_boot_query(bfad, iocmd);
  2438. break;
  2439. case IOCMD_PREBOOT_QUERY:
  2440. rc = bfad_iocmd_preboot_query(bfad, iocmd);
  2441. break;
  2442. case IOCMD_ETHBOOT_CFG:
  2443. rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
  2444. break;
  2445. case IOCMD_ETHBOOT_QUERY:
  2446. rc = bfad_iocmd_ethboot_query(bfad, iocmd);
  2447. break;
  2448. case IOCMD_TRUNK_ENABLE:
  2449. case IOCMD_TRUNK_DISABLE:
  2450. rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
  2451. break;
  2452. case IOCMD_TRUNK_GET_ATTR:
  2453. rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
  2454. break;
  2455. case IOCMD_QOS_ENABLE:
  2456. case IOCMD_QOS_DISABLE:
  2457. rc = bfad_iocmd_qos(bfad, iocmd, cmd);
  2458. break;
  2459. case IOCMD_QOS_GET_ATTR:
  2460. rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
  2461. break;
  2462. case IOCMD_QOS_GET_VC_ATTR:
  2463. rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
  2464. break;
  2465. case IOCMD_QOS_GET_STATS:
  2466. rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
  2467. break;
  2468. case IOCMD_QOS_RESET_STATS:
  2469. rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
  2470. break;
  2471. case IOCMD_VF_GET_STATS:
  2472. rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
  2473. break;
  2474. case IOCMD_VF_RESET_STATS:
  2475. rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
  2476. break;
  2477. case IOCMD_FCPIM_LUNMASK_ENABLE:
  2478. case IOCMD_FCPIM_LUNMASK_DISABLE:
  2479. case IOCMD_FCPIM_LUNMASK_CLEAR:
  2480. rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
  2481. break;
  2482. case IOCMD_FCPIM_LUNMASK_QUERY:
  2483. rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
  2484. break;
  2485. case IOCMD_FCPIM_LUNMASK_ADD:
  2486. case IOCMD_FCPIM_LUNMASK_DELETE:
  2487. rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
  2488. break;
  2489. default:
  2490. rc = -EINVAL;
  2491. break;
  2492. }
  2493. return rc;
  2494. }
  2495. static int
  2496. bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
  2497. {
  2498. uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
  2499. struct bfad_im_port_s *im_port =
  2500. (struct bfad_im_port_s *) job->shost->hostdata[0];
  2501. struct bfad_s *bfad = im_port->bfad;
  2502. struct request_queue *request_q = job->req->q;
  2503. void *payload_kbuf;
  2504. int rc = -EINVAL;
  2505. /*
  2506. * Set the BSG device request_queue size to 256 to support
  2507. * payloads larger than 512*1024K bytes.
  2508. */
  2509. blk_queue_max_segments(request_q, 256);
  2510. /* Allocate a temp buffer to hold the passed in user space command */
  2511. payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  2512. if (!payload_kbuf) {
  2513. rc = -ENOMEM;
  2514. goto out;
  2515. }
  2516. /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
  2517. sg_copy_to_buffer(job->request_payload.sg_list,
  2518. job->request_payload.sg_cnt, payload_kbuf,
  2519. job->request_payload.payload_len);
  2520. /* Invoke IOCMD handler - to handle all the vendor command requests */
  2521. rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
  2522. job->request_payload.payload_len);
  2523. if (rc != BFA_STATUS_OK)
  2524. goto error;
  2525. /* Copy the response data to the job->reply_payload sg_list */
  2526. sg_copy_from_buffer(job->reply_payload.sg_list,
  2527. job->reply_payload.sg_cnt,
  2528. payload_kbuf,
  2529. job->reply_payload.payload_len);
  2530. /* free the command buffer */
  2531. kfree(payload_kbuf);
  2532. /* Fill the BSG job reply data */
  2533. job->reply_len = job->reply_payload.payload_len;
  2534. job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
  2535. job->reply->result = rc;
  2536. job->job_done(job);
  2537. return rc;
  2538. error:
  2539. /* free the command buffer */
  2540. kfree(payload_kbuf);
  2541. out:
  2542. job->reply->result = rc;
  2543. job->reply_len = sizeof(uint32_t);
  2544. job->reply->reply_payload_rcv_len = 0;
  2545. return rc;
  2546. }
  2547. /* FC passthru call backs */
  2548. u64
  2549. bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
  2550. {
  2551. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2552. struct bfa_sge_s *sge;
  2553. u64 addr;
  2554. sge = drv_fcxp->req_sge + sgeid;
  2555. addr = (u64)(size_t) sge->sg_addr;
  2556. return addr;
  2557. }
  2558. u32
  2559. bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
  2560. {
  2561. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2562. struct bfa_sge_s *sge;
  2563. sge = drv_fcxp->req_sge + sgeid;
  2564. return sge->sg_len;
  2565. }
  2566. u64
  2567. bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
  2568. {
  2569. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2570. struct bfa_sge_s *sge;
  2571. u64 addr;
  2572. sge = drv_fcxp->rsp_sge + sgeid;
  2573. addr = (u64)(size_t) sge->sg_addr;
  2574. return addr;
  2575. }
  2576. u32
  2577. bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
  2578. {
  2579. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2580. struct bfa_sge_s *sge;
  2581. sge = drv_fcxp->rsp_sge + sgeid;
  2582. return sge->sg_len;
  2583. }
  2584. void
  2585. bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
  2586. bfa_status_t req_status, u32 rsp_len, u32 resid_len,
  2587. struct fchs_s *rsp_fchs)
  2588. {
  2589. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  2590. drv_fcxp->req_status = req_status;
  2591. drv_fcxp->rsp_len = rsp_len;
  2592. /* bfa_fcxp will be automatically freed by BFA */
  2593. drv_fcxp->bfa_fcxp = NULL;
  2594. complete(&drv_fcxp->comp);
  2595. }
  2596. struct bfad_buf_info *
  2597. bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
  2598. uint32_t payload_len, uint32_t *num_sgles)
  2599. {
  2600. struct bfad_buf_info *buf_base, *buf_info;
  2601. struct bfa_sge_s *sg_table;
  2602. int sge_num = 1;
  2603. buf_base = kzalloc((sizeof(struct bfad_buf_info) +
  2604. sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
  2605. if (!buf_base)
  2606. return NULL;
  2607. sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
  2608. (sizeof(struct bfad_buf_info) * sge_num));
  2609. /* Allocate dma coherent memory */
  2610. buf_info = buf_base;
  2611. buf_info->size = payload_len;
  2612. buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
  2613. &buf_info->phys, GFP_KERNEL);
  2614. if (!buf_info->virt)
  2615. goto out_free_mem;
  2616. /* copy the linear bsg buffer to buf_info */
  2617. memset(buf_info->virt, 0, buf_info->size);
  2618. memcpy(buf_info->virt, payload_kbuf, buf_info->size);
  2619. /*
  2620. * Setup SG table
  2621. */
  2622. sg_table->sg_len = buf_info->size;
  2623. sg_table->sg_addr = (void *)(size_t) buf_info->phys;
  2624. *num_sgles = sge_num;
  2625. return buf_base;
  2626. out_free_mem:
  2627. kfree(buf_base);
  2628. return NULL;
  2629. }
  2630. void
  2631. bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
  2632. uint32_t num_sgles)
  2633. {
  2634. int i;
  2635. struct bfad_buf_info *buf_info = buf_base;
  2636. if (buf_base) {
  2637. for (i = 0; i < num_sgles; buf_info++, i++) {
  2638. if (buf_info->virt != NULL)
  2639. dma_free_coherent(&bfad->pcidev->dev,
  2640. buf_info->size, buf_info->virt,
  2641. buf_info->phys);
  2642. }
  2643. kfree(buf_base);
  2644. }
  2645. }
  2646. int
  2647. bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
  2648. bfa_bsg_fcpt_t *bsg_fcpt)
  2649. {
  2650. struct bfa_fcxp_s *hal_fcxp;
  2651. struct bfad_s *bfad = drv_fcxp->port->bfad;
  2652. unsigned long flags;
  2653. uint8_t lp_tag;
  2654. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2655. /* Allocate bfa_fcxp structure */
  2656. hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
  2657. drv_fcxp->num_req_sgles,
  2658. drv_fcxp->num_rsp_sgles,
  2659. bfad_fcxp_get_req_sgaddr_cb,
  2660. bfad_fcxp_get_req_sglen_cb,
  2661. bfad_fcxp_get_rsp_sgaddr_cb,
  2662. bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
  2663. if (!hal_fcxp) {
  2664. bfa_trc(bfad, 0);
  2665. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2666. return BFA_STATUS_ENOMEM;
  2667. }
  2668. drv_fcxp->bfa_fcxp = hal_fcxp;
  2669. lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
  2670. bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
  2671. bsg_fcpt->cts, bsg_fcpt->cos,
  2672. job->request_payload.payload_len,
  2673. &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
  2674. job->reply_payload.payload_len, bsg_fcpt->tsecs);
  2675. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2676. return BFA_STATUS_OK;
  2677. }
  2678. int
  2679. bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
  2680. {
  2681. struct bfa_bsg_data *bsg_data;
  2682. struct bfad_im_port_s *im_port =
  2683. (struct bfad_im_port_s *) job->shost->hostdata[0];
  2684. struct bfad_s *bfad = im_port->bfad;
  2685. bfa_bsg_fcpt_t *bsg_fcpt;
  2686. struct bfad_fcxp *drv_fcxp;
  2687. struct bfa_fcs_lport_s *fcs_port;
  2688. struct bfa_fcs_rport_s *fcs_rport;
  2689. uint32_t command_type = job->request->msgcode;
  2690. unsigned long flags;
  2691. struct bfad_buf_info *rsp_buf_info;
  2692. void *req_kbuf = NULL, *rsp_kbuf = NULL;
  2693. int rc = -EINVAL;
  2694. job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
  2695. job->reply->reply_payload_rcv_len = 0;
  2696. /* Get the payload passed in from userspace */
  2697. bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
  2698. sizeof(struct fc_bsg_request));
  2699. if (bsg_data == NULL)
  2700. goto out;
  2701. /*
  2702. * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
  2703. * buffer of size bsg_data->payload_len
  2704. */
  2705. bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
  2706. if (!bsg_fcpt) {
  2707. rc = -ENOMEM;
  2708. goto out;
  2709. }
  2710. if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
  2711. bsg_data->payload_len)) {
  2712. kfree(bsg_fcpt);
  2713. rc = -EIO;
  2714. goto out;
  2715. }
  2716. drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
  2717. if (drv_fcxp == NULL) {
  2718. kfree(bsg_fcpt);
  2719. rc = -ENOMEM;
  2720. goto out;
  2721. }
  2722. spin_lock_irqsave(&bfad->bfad_lock, flags);
  2723. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
  2724. bsg_fcpt->lpwwn);
  2725. if (fcs_port == NULL) {
  2726. bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
  2727. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2728. goto out_free_mem;
  2729. }
  2730. /* Check if the port is online before sending FC Passthru cmd */
  2731. if (!bfa_fcs_lport_is_online(fcs_port)) {
  2732. bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
  2733. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2734. goto out_free_mem;
  2735. }
  2736. drv_fcxp->port = fcs_port->bfad_port;
  2737. if (drv_fcxp->port->bfad == 0)
  2738. drv_fcxp->port->bfad = bfad;
  2739. /* Fetch the bfa_rport - if nexus needed */
  2740. if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
  2741. command_type == FC_BSG_HST_CT) {
  2742. /* BSG HST commands: no nexus needed */
  2743. drv_fcxp->bfa_rport = NULL;
  2744. } else if (command_type == FC_BSG_RPT_ELS ||
  2745. command_type == FC_BSG_RPT_CT) {
  2746. /* BSG RPT commands: nexus needed */
  2747. fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
  2748. bsg_fcpt->dpwwn);
  2749. if (fcs_rport == NULL) {
  2750. bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
  2751. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2752. goto out_free_mem;
  2753. }
  2754. drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
  2755. } else { /* Unknown BSG msgcode; return -EINVAL */
  2756. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2757. goto out_free_mem;
  2758. }
  2759. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  2760. /* allocate memory for req / rsp buffers */
  2761. req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  2762. if (!req_kbuf) {
  2763. printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
  2764. bfad->pci_name);
  2765. rc = -ENOMEM;
  2766. goto out_free_mem;
  2767. }
  2768. rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
  2769. if (!rsp_kbuf) {
  2770. printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
  2771. bfad->pci_name);
  2772. rc = -ENOMEM;
  2773. goto out_free_mem;
  2774. }
  2775. /* map req sg - copy the sg_list passed in to the linear buffer */
  2776. sg_copy_to_buffer(job->request_payload.sg_list,
  2777. job->request_payload.sg_cnt, req_kbuf,
  2778. job->request_payload.payload_len);
  2779. drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
  2780. job->request_payload.payload_len,
  2781. &drv_fcxp->num_req_sgles);
  2782. if (!drv_fcxp->reqbuf_info) {
  2783. printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
  2784. bfad->pci_name);
  2785. rc = -ENOMEM;
  2786. goto out_free_mem;
  2787. }
  2788. drv_fcxp->req_sge = (struct bfa_sge_s *)
  2789. (((uint8_t *)drv_fcxp->reqbuf_info) +
  2790. (sizeof(struct bfad_buf_info) *
  2791. drv_fcxp->num_req_sgles));
  2792. /* map rsp sg */
  2793. drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
  2794. job->reply_payload.payload_len,
  2795. &drv_fcxp->num_rsp_sgles);
  2796. if (!drv_fcxp->rspbuf_info) {
  2797. printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
  2798. bfad->pci_name);
  2799. rc = -ENOMEM;
  2800. goto out_free_mem;
  2801. }
  2802. rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
  2803. drv_fcxp->rsp_sge = (struct bfa_sge_s *)
  2804. (((uint8_t *)drv_fcxp->rspbuf_info) +
  2805. (sizeof(struct bfad_buf_info) *
  2806. drv_fcxp->num_rsp_sgles));
  2807. /* fcxp send */
  2808. init_completion(&drv_fcxp->comp);
  2809. rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
  2810. if (rc == BFA_STATUS_OK) {
  2811. wait_for_completion(&drv_fcxp->comp);
  2812. bsg_fcpt->status = drv_fcxp->req_status;
  2813. } else {
  2814. bsg_fcpt->status = rc;
  2815. goto out_free_mem;
  2816. }
  2817. /* fill the job->reply data */
  2818. if (drv_fcxp->req_status == BFA_STATUS_OK) {
  2819. job->reply_len = drv_fcxp->rsp_len;
  2820. job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
  2821. job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  2822. } else {
  2823. job->reply->reply_payload_rcv_len =
  2824. sizeof(struct fc_bsg_ctels_reply);
  2825. job->reply_len = sizeof(uint32_t);
  2826. job->reply->reply_data.ctels_reply.status =
  2827. FC_CTELS_STATUS_REJECT;
  2828. }
  2829. /* Copy the response data to the reply_payload sg list */
  2830. sg_copy_from_buffer(job->reply_payload.sg_list,
  2831. job->reply_payload.sg_cnt,
  2832. (uint8_t *)rsp_buf_info->virt,
  2833. job->reply_payload.payload_len);
  2834. out_free_mem:
  2835. bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
  2836. drv_fcxp->num_rsp_sgles);
  2837. bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
  2838. drv_fcxp->num_req_sgles);
  2839. kfree(req_kbuf);
  2840. kfree(rsp_kbuf);
  2841. /* Need a copy to user op */
  2842. if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
  2843. bsg_data->payload_len))
  2844. rc = -EIO;
  2845. kfree(bsg_fcpt);
  2846. kfree(drv_fcxp);
  2847. out:
  2848. job->reply->result = rc;
  2849. if (rc == BFA_STATUS_OK)
  2850. job->job_done(job);
  2851. return rc;
  2852. }
  2853. int
  2854. bfad_im_bsg_request(struct fc_bsg_job *job)
  2855. {
  2856. uint32_t rc = BFA_STATUS_OK;
  2857. switch (job->request->msgcode) {
  2858. case FC_BSG_HST_VENDOR:
  2859. /* Process BSG HST Vendor requests */
  2860. rc = bfad_im_bsg_vendor_request(job);
  2861. break;
  2862. case FC_BSG_HST_ELS_NOLOGIN:
  2863. case FC_BSG_RPT_ELS:
  2864. case FC_BSG_HST_CT:
  2865. case FC_BSG_RPT_CT:
  2866. /* Process BSG ELS/CT commands */
  2867. rc = bfad_im_bsg_els_ct_request(job);
  2868. break;
  2869. default:
  2870. job->reply->result = rc = -EINVAL;
  2871. job->reply->reply_payload_rcv_len = 0;
  2872. break;
  2873. }
  2874. return rc;
  2875. }
  2876. int
  2877. bfad_im_bsg_timeout(struct fc_bsg_job *job)
  2878. {
  2879. /* Don't complete the BSG job request - return -EAGAIN
  2880. * to reset bsg job timeout : for ELS/CT pass thru we
  2881. * already have timer to track the request.
  2882. */
  2883. return -EAGAIN;
  2884. }