bfad_bsg.c 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/uaccess.h>
  18. #include "bfad_drv.h"
  19. #include "bfad_im.h"
  20. #include "bfad_bsg.h"
  21. BFA_TRC_FILE(LDRV, BSG);
  22. /* bfad_im_bsg_get_kobject - increment the bfa refcnt */
  23. static void
  24. bfad_im_bsg_get_kobject(struct fc_bsg_job *job)
  25. {
  26. struct Scsi_Host *shost = job->shost;
  27. unsigned long flags;
  28. spin_lock_irqsave(shost->host_lock, flags);
  29. __module_get(shost->dma_dev->driver->owner);
  30. spin_unlock_irqrestore(shost->host_lock, flags);
  31. }
  32. /* bfad_im_bsg_put_kobject - decrement the bfa refcnt */
  33. static void
  34. bfad_im_bsg_put_kobject(struct fc_bsg_job *job)
  35. {
  36. struct Scsi_Host *shost = job->shost;
  37. unsigned long flags;
  38. spin_lock_irqsave(shost->host_lock, flags);
  39. module_put(shost->dma_dev->driver->owner);
  40. spin_unlock_irqrestore(shost->host_lock, flags);
  41. }
  42. int
  43. bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
  44. {
  45. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  46. int rc = 0;
  47. unsigned long flags;
  48. spin_lock_irqsave(&bfad->bfad_lock, flags);
  49. /* If IOC is not in disabled state - return */
  50. if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
  51. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  52. iocmd->status = BFA_STATUS_IOC_FAILURE;
  53. return rc;
  54. }
  55. init_completion(&bfad->enable_comp);
  56. bfa_iocfc_enable(&bfad->bfa);
  57. iocmd->status = BFA_STATUS_OK;
  58. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  59. wait_for_completion(&bfad->enable_comp);
  60. return rc;
  61. }
  62. int
  63. bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
  64. {
  65. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  66. int rc = 0;
  67. unsigned long flags;
  68. spin_lock_irqsave(&bfad->bfad_lock, flags);
  69. if (bfad->disable_active) {
  70. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  71. return EBUSY;
  72. }
  73. bfad->disable_active = BFA_TRUE;
  74. init_completion(&bfad->disable_comp);
  75. bfa_iocfc_disable(&bfad->bfa);
  76. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  77. wait_for_completion(&bfad->disable_comp);
  78. bfad->disable_active = BFA_FALSE;
  79. iocmd->status = BFA_STATUS_OK;
  80. return rc;
  81. }
  82. static int
  83. bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
  84. {
  85. int i;
  86. struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
  87. struct bfad_im_port_s *im_port;
  88. struct bfa_port_attr_s pattr;
  89. unsigned long flags;
  90. spin_lock_irqsave(&bfad->bfad_lock, flags);
  91. bfa_fcport_get_attr(&bfad->bfa, &pattr);
  92. iocmd->nwwn = pattr.nwwn;
  93. iocmd->pwwn = pattr.pwwn;
  94. iocmd->ioc_type = bfa_get_type(&bfad->bfa);
  95. iocmd->mac = bfa_get_mac(&bfad->bfa);
  96. iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
  97. bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
  98. iocmd->factorynwwn = pattr.factorynwwn;
  99. iocmd->factorypwwn = pattr.factorypwwn;
  100. im_port = bfad->pport.im_port;
  101. iocmd->host = im_port->shost->host_no;
  102. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  103. strcpy(iocmd->name, bfad->adapter_name);
  104. strcpy(iocmd->port_name, bfad->port_name);
  105. strcpy(iocmd->hwpath, bfad->pci_name);
  106. /* set adapter hw path */
  107. strcpy(iocmd->adapter_hwpath, bfad->pci_name);
  108. i = strlen(iocmd->adapter_hwpath) - 1;
  109. while (iocmd->adapter_hwpath[i] != '.')
  110. i--;
  111. iocmd->adapter_hwpath[i] = '\0';
  112. iocmd->status = BFA_STATUS_OK;
  113. return 0;
  114. }
  115. static int
  116. bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
  117. {
  118. struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
  119. unsigned long flags;
  120. spin_lock_irqsave(&bfad->bfad_lock, flags);
  121. bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
  122. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  123. /* fill in driver attr info */
  124. strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
  125. strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
  126. BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
  127. strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
  128. iocmd->ioc_attr.adapter_attr.fw_ver);
  129. strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
  130. iocmd->ioc_attr.adapter_attr.optrom_ver);
  131. /* copy chip rev info first otherwise it will be overwritten */
  132. memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
  133. sizeof(bfad->pci_attr.chip_rev));
  134. memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
  135. sizeof(struct bfa_ioc_pci_attr_s));
  136. iocmd->status = BFA_STATUS_OK;
  137. return 0;
  138. }
  139. int
  140. bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
  141. {
  142. struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
  143. bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
  144. iocmd->status = BFA_STATUS_OK;
  145. return 0;
  146. }
  147. int
  148. bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
  149. unsigned int payload_len)
  150. {
  151. struct bfa_bsg_ioc_fwstats_s *iocmd =
  152. (struct bfa_bsg_ioc_fwstats_s *)cmd;
  153. void *iocmd_bufptr;
  154. unsigned long flags;
  155. if (bfad_chk_iocmd_sz(payload_len,
  156. sizeof(struct bfa_bsg_ioc_fwstats_s),
  157. sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
  158. iocmd->status = BFA_STATUS_VERSION_FAIL;
  159. goto out;
  160. }
  161. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
  162. spin_lock_irqsave(&bfad->bfad_lock, flags);
  163. iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
  164. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  165. if (iocmd->status != BFA_STATUS_OK) {
  166. bfa_trc(bfad, iocmd->status);
  167. goto out;
  168. }
  169. out:
  170. bfa_trc(bfad, 0x6666);
  171. return 0;
  172. }
  173. int
  174. bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
  175. {
  176. struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
  177. iocmd->status = BFA_STATUS_OK;
  178. bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
  179. return 0;
  180. }
  181. int
  182. bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
  183. {
  184. struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
  185. unsigned long flags;
  186. spin_lock_irqsave(&bfad->bfad_lock, flags);
  187. iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
  188. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  189. return 0;
  190. }
  191. int
  192. bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
  193. {
  194. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  195. struct bfad_hal_comp fcomp;
  196. unsigned long flags;
  197. init_completion(&fcomp.comp);
  198. spin_lock_irqsave(&bfad->bfad_lock, flags);
  199. iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
  200. bfad_hcb_comp, &fcomp);
  201. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  202. if (iocmd->status != BFA_STATUS_OK) {
  203. bfa_trc(bfad, iocmd->status);
  204. return 0;
  205. }
  206. wait_for_completion(&fcomp.comp);
  207. iocmd->status = fcomp.status;
  208. return 0;
  209. }
  210. int
  211. bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
  212. {
  213. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  214. struct bfad_hal_comp fcomp;
  215. unsigned long flags;
  216. init_completion(&fcomp.comp);
  217. spin_lock_irqsave(&bfad->bfad_lock, flags);
  218. iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
  219. bfad_hcb_comp, &fcomp);
  220. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  221. if (iocmd->status != BFA_STATUS_OK) {
  222. bfa_trc(bfad, iocmd->status);
  223. return 0;
  224. }
  225. wait_for_completion(&fcomp.comp);
  226. iocmd->status = fcomp.status;
  227. return 0;
  228. }
  229. static int
  230. bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
  231. {
  232. struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
  233. struct bfa_lport_attr_s port_attr;
  234. unsigned long flags;
  235. spin_lock_irqsave(&bfad->bfad_lock, flags);
  236. bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
  237. bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
  238. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  239. if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
  240. iocmd->attr.pid = port_attr.pid;
  241. else
  242. iocmd->attr.pid = 0;
  243. iocmd->attr.port_type = port_attr.port_type;
  244. iocmd->attr.loopback = port_attr.loopback;
  245. iocmd->attr.authfail = port_attr.authfail;
  246. strncpy(iocmd->attr.port_symname.symname,
  247. port_attr.port_cfg.sym_name.symname,
  248. sizeof(port_attr.port_cfg.sym_name.symname));
  249. iocmd->status = BFA_STATUS_OK;
  250. return 0;
  251. }
  252. int
  253. bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
  254. unsigned int payload_len)
  255. {
  256. struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
  257. struct bfad_hal_comp fcomp;
  258. void *iocmd_bufptr;
  259. unsigned long flags;
  260. if (bfad_chk_iocmd_sz(payload_len,
  261. sizeof(struct bfa_bsg_port_stats_s),
  262. sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
  263. iocmd->status = BFA_STATUS_VERSION_FAIL;
  264. return 0;
  265. }
  266. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
  267. init_completion(&fcomp.comp);
  268. spin_lock_irqsave(&bfad->bfad_lock, flags);
  269. iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
  270. iocmd_bufptr, bfad_hcb_comp, &fcomp);
  271. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  272. if (iocmd->status != BFA_STATUS_OK) {
  273. bfa_trc(bfad, iocmd->status);
  274. goto out;
  275. }
  276. wait_for_completion(&fcomp.comp);
  277. iocmd->status = fcomp.status;
  278. out:
  279. return 0;
  280. }
  281. static int
  282. bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
  283. {
  284. struct bfa_fcs_lport_s *fcs_port;
  285. struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
  286. unsigned long flags;
  287. spin_lock_irqsave(&bfad->bfad_lock, flags);
  288. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  289. iocmd->vf_id, iocmd->pwwn);
  290. if (fcs_port == NULL) {
  291. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  292. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  293. goto out;
  294. }
  295. bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
  296. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  297. iocmd->status = BFA_STATUS_OK;
  298. out:
  299. return 0;
  300. }
  301. int
  302. bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
  303. {
  304. struct bfa_fcs_lport_s *fcs_port;
  305. struct bfa_bsg_lport_stats_s *iocmd =
  306. (struct bfa_bsg_lport_stats_s *)cmd;
  307. unsigned long flags;
  308. spin_lock_irqsave(&bfad->bfad_lock, flags);
  309. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  310. iocmd->vf_id, iocmd->pwwn);
  311. if (fcs_port == NULL) {
  312. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  313. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  314. goto out;
  315. }
  316. bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
  317. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  318. iocmd->status = BFA_STATUS_OK;
  319. out:
  320. return 0;
  321. }
  322. int
  323. bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
  324. {
  325. struct bfa_fcs_lport_s *fcs_port;
  326. struct bfa_bsg_lport_iostats_s *iocmd =
  327. (struct bfa_bsg_lport_iostats_s *)cmd;
  328. unsigned long flags;
  329. spin_lock_irqsave(&bfad->bfad_lock, flags);
  330. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  331. iocmd->vf_id, iocmd->pwwn);
  332. if (fcs_port == NULL) {
  333. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  334. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  335. goto out;
  336. }
  337. bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
  338. fcs_port->lp_tag);
  339. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  340. iocmd->status = BFA_STATUS_OK;
  341. out:
  342. return 0;
  343. }
  344. int
  345. bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
  346. unsigned int payload_len)
  347. {
  348. struct bfa_bsg_lport_get_rports_s *iocmd =
  349. (struct bfa_bsg_lport_get_rports_s *)cmd;
  350. struct bfa_fcs_lport_s *fcs_port;
  351. unsigned long flags;
  352. void *iocmd_bufptr;
  353. if (iocmd->nrports == 0)
  354. return EINVAL;
  355. if (bfad_chk_iocmd_sz(payload_len,
  356. sizeof(struct bfa_bsg_lport_get_rports_s),
  357. sizeof(wwn_t) * iocmd->nrports) != BFA_STATUS_OK) {
  358. iocmd->status = BFA_STATUS_VERSION_FAIL;
  359. return 0;
  360. }
  361. iocmd_bufptr = (char *)iocmd +
  362. sizeof(struct bfa_bsg_lport_get_rports_s);
  363. spin_lock_irqsave(&bfad->bfad_lock, flags);
  364. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  365. iocmd->vf_id, iocmd->pwwn);
  366. if (fcs_port == NULL) {
  367. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  368. bfa_trc(bfad, 0);
  369. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  370. goto out;
  371. }
  372. bfa_fcs_lport_get_rports(fcs_port, (wwn_t *)iocmd_bufptr,
  373. &iocmd->nrports);
  374. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  375. iocmd->status = BFA_STATUS_OK;
  376. out:
  377. return 0;
  378. }
  379. int
  380. bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
  381. {
  382. struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
  383. struct bfa_fcs_lport_s *fcs_port;
  384. struct bfa_fcs_rport_s *fcs_rport;
  385. unsigned long flags;
  386. spin_lock_irqsave(&bfad->bfad_lock, flags);
  387. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  388. iocmd->vf_id, iocmd->pwwn);
  389. if (fcs_port == NULL) {
  390. bfa_trc(bfad, 0);
  391. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  392. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  393. goto out;
  394. }
  395. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  396. if (fcs_rport == NULL) {
  397. bfa_trc(bfad, 0);
  398. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  399. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  400. goto out;
  401. }
  402. bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
  403. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  404. iocmd->status = BFA_STATUS_OK;
  405. out:
  406. return 0;
  407. }
  408. static int
  409. bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
  410. {
  411. struct bfa_bsg_rport_scsi_addr_s *iocmd =
  412. (struct bfa_bsg_rport_scsi_addr_s *)cmd;
  413. struct bfa_fcs_lport_s *fcs_port;
  414. struct bfa_fcs_itnim_s *fcs_itnim;
  415. struct bfad_itnim_s *drv_itnim;
  416. unsigned long flags;
  417. spin_lock_irqsave(&bfad->bfad_lock, flags);
  418. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  419. iocmd->vf_id, iocmd->pwwn);
  420. if (fcs_port == NULL) {
  421. bfa_trc(bfad, 0);
  422. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  423. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  424. goto out;
  425. }
  426. fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  427. if (fcs_itnim == NULL) {
  428. bfa_trc(bfad, 0);
  429. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  430. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  431. goto out;
  432. }
  433. drv_itnim = fcs_itnim->itnim_drv;
  434. if (drv_itnim && drv_itnim->im_port)
  435. iocmd->host = drv_itnim->im_port->shost->host_no;
  436. else {
  437. bfa_trc(bfad, 0);
  438. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  439. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  440. goto out;
  441. }
  442. iocmd->target = drv_itnim->scsi_tgt_id;
  443. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  444. iocmd->bus = 0;
  445. iocmd->lun = 0;
  446. iocmd->status = BFA_STATUS_OK;
  447. out:
  448. return 0;
  449. }
  450. int
  451. bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
  452. {
  453. struct bfa_bsg_rport_stats_s *iocmd =
  454. (struct bfa_bsg_rport_stats_s *)cmd;
  455. struct bfa_fcs_lport_s *fcs_port;
  456. struct bfa_fcs_rport_s *fcs_rport;
  457. unsigned long flags;
  458. spin_lock_irqsave(&bfad->bfad_lock, flags);
  459. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  460. iocmd->vf_id, iocmd->pwwn);
  461. if (fcs_port == NULL) {
  462. bfa_trc(bfad, 0);
  463. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  464. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  465. goto out;
  466. }
  467. fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
  468. if (fcs_rport == NULL) {
  469. bfa_trc(bfad, 0);
  470. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  471. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  472. goto out;
  473. }
  474. memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
  475. sizeof(struct bfa_rport_stats_s));
  476. memcpy((void *)&iocmd->stats.hal_stats,
  477. (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
  478. sizeof(struct bfa_rport_hal_stats_s));
  479. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  480. iocmd->status = BFA_STATUS_OK;
  481. out:
  482. return 0;
  483. }
  484. static int
  485. bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
  486. unsigned int payload_len)
  487. {
  488. struct bfa_bsg_fabric_get_lports_s *iocmd =
  489. (struct bfa_bsg_fabric_get_lports_s *)cmd;
  490. bfa_fcs_vf_t *fcs_vf;
  491. uint32_t nports = iocmd->nports;
  492. unsigned long flags;
  493. void *iocmd_bufptr;
  494. if (nports == 0) {
  495. iocmd->status = BFA_STATUS_EINVAL;
  496. goto out;
  497. }
  498. if (bfad_chk_iocmd_sz(payload_len,
  499. sizeof(struct bfa_bsg_fabric_get_lports_s),
  500. sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
  501. iocmd->status = BFA_STATUS_VERSION_FAIL;
  502. goto out;
  503. }
  504. iocmd_bufptr = (char *)iocmd +
  505. sizeof(struct bfa_bsg_fabric_get_lports_s);
  506. spin_lock_irqsave(&bfad->bfad_lock, flags);
  507. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  508. if (fcs_vf == NULL) {
  509. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  510. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  511. goto out;
  512. }
  513. bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
  514. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  515. iocmd->nports = nports;
  516. iocmd->status = BFA_STATUS_OK;
  517. out:
  518. return 0;
  519. }
  520. int
  521. bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
  522. {
  523. struct bfa_bsg_fcpim_modstats_s *iocmd =
  524. (struct bfa_bsg_fcpim_modstats_s *)cmd;
  525. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  526. struct list_head *qe, *qen;
  527. struct bfa_itnim_s *itnim;
  528. unsigned long flags;
  529. spin_lock_irqsave(&bfad->bfad_lock, flags);
  530. /* accumulate IO stats from itnim */
  531. memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
  532. list_for_each_safe(qe, qen, &fcpim->itnim_q) {
  533. itnim = (struct bfa_itnim_s *) qe;
  534. bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
  535. }
  536. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  537. iocmd->status = BFA_STATUS_OK;
  538. return 0;
  539. }
  540. int
  541. bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
  542. {
  543. struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
  544. (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
  545. struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
  546. unsigned long flags;
  547. spin_lock_irqsave(&bfad->bfad_lock, flags);
  548. memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
  549. sizeof(struct bfa_fcpim_del_itn_stats_s));
  550. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  551. iocmd->status = BFA_STATUS_OK;
  552. return 0;
  553. }
  554. static int
  555. bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
  556. {
  557. struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
  558. struct bfa_fcs_lport_s *fcs_port;
  559. unsigned long flags;
  560. spin_lock_irqsave(&bfad->bfad_lock, flags);
  561. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  562. iocmd->vf_id, iocmd->lpwwn);
  563. if (!fcs_port)
  564. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  565. else
  566. iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
  567. iocmd->rpwwn, &iocmd->attr);
  568. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  569. return 0;
  570. }
  571. static int
  572. bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
  573. {
  574. struct bfa_bsg_itnim_iostats_s *iocmd =
  575. (struct bfa_bsg_itnim_iostats_s *)cmd;
  576. struct bfa_fcs_lport_s *fcs_port;
  577. struct bfa_fcs_itnim_s *itnim;
  578. unsigned long flags;
  579. spin_lock_irqsave(&bfad->bfad_lock, flags);
  580. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  581. iocmd->vf_id, iocmd->lpwwn);
  582. if (!fcs_port) {
  583. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  584. bfa_trc(bfad, 0);
  585. } else {
  586. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  587. if (itnim == NULL)
  588. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  589. else {
  590. iocmd->status = BFA_STATUS_OK;
  591. memcpy((void *)&iocmd->iostats, (void *)
  592. &(bfa_fcs_itnim_get_halitn(itnim)->stats),
  593. sizeof(struct bfa_itnim_iostats_s));
  594. }
  595. }
  596. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  597. return 0;
  598. }
  599. static int
  600. bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
  601. {
  602. struct bfa_bsg_itnim_itnstats_s *iocmd =
  603. (struct bfa_bsg_itnim_itnstats_s *)cmd;
  604. struct bfa_fcs_lport_s *fcs_port;
  605. struct bfa_fcs_itnim_s *itnim;
  606. unsigned long flags;
  607. spin_lock_irqsave(&bfad->bfad_lock, flags);
  608. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  609. iocmd->vf_id, iocmd->lpwwn);
  610. if (!fcs_port) {
  611. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  612. bfa_trc(bfad, 0);
  613. } else {
  614. itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  615. if (itnim == NULL)
  616. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  617. else {
  618. iocmd->status = BFA_STATUS_OK;
  619. bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
  620. &iocmd->itnstats);
  621. }
  622. }
  623. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  624. return 0;
  625. }
  626. int
  627. bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
  628. {
  629. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  630. unsigned long flags;
  631. spin_lock_irqsave(&bfad->bfad_lock, flags);
  632. iocmd->status = bfa_fcport_enable(&bfad->bfa);
  633. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  634. return 0;
  635. }
  636. int
  637. bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
  638. {
  639. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  640. unsigned long flags;
  641. spin_lock_irqsave(&bfad->bfad_lock, flags);
  642. iocmd->status = bfa_fcport_disable(&bfad->bfa);
  643. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  644. return 0;
  645. }
  646. int
  647. bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
  648. {
  649. struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
  650. struct bfad_hal_comp fcomp;
  651. unsigned long flags;
  652. init_completion(&fcomp.comp);
  653. spin_lock_irqsave(&bfad->bfad_lock, flags);
  654. iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
  655. &iocmd->pcifn_cfg,
  656. bfad_hcb_comp, &fcomp);
  657. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  658. if (iocmd->status != BFA_STATUS_OK)
  659. goto out;
  660. wait_for_completion(&fcomp.comp);
  661. iocmd->status = fcomp.status;
  662. out:
  663. return 0;
  664. }
  665. int
  666. bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
  667. {
  668. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  669. struct bfad_hal_comp fcomp;
  670. unsigned long flags;
  671. init_completion(&fcomp.comp);
  672. spin_lock_irqsave(&bfad->bfad_lock, flags);
  673. iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
  674. &iocmd->pcifn_id, iocmd->port,
  675. iocmd->pcifn_class, iocmd->bandwidth,
  676. bfad_hcb_comp, &fcomp);
  677. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  678. if (iocmd->status != BFA_STATUS_OK)
  679. goto out;
  680. wait_for_completion(&fcomp.comp);
  681. iocmd->status = fcomp.status;
  682. out:
  683. return 0;
  684. }
  685. int
  686. bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
  687. {
  688. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  689. struct bfad_hal_comp fcomp;
  690. unsigned long flags;
  691. init_completion(&fcomp.comp);
  692. spin_lock_irqsave(&bfad->bfad_lock, flags);
  693. iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
  694. iocmd->pcifn_id,
  695. bfad_hcb_comp, &fcomp);
  696. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  697. if (iocmd->status != BFA_STATUS_OK)
  698. goto out;
  699. wait_for_completion(&fcomp.comp);
  700. iocmd->status = fcomp.status;
  701. out:
  702. return 0;
  703. }
  704. int
  705. bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
  706. {
  707. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  708. struct bfad_hal_comp fcomp;
  709. unsigned long flags;
  710. init_completion(&fcomp.comp);
  711. spin_lock_irqsave(&bfad->bfad_lock, flags);
  712. iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
  713. iocmd->pcifn_id, iocmd->bandwidth,
  714. bfad_hcb_comp, &fcomp);
  715. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  716. bfa_trc(bfad, iocmd->status);
  717. if (iocmd->status != BFA_STATUS_OK)
  718. goto out;
  719. wait_for_completion(&fcomp.comp);
  720. iocmd->status = fcomp.status;
  721. bfa_trc(bfad, iocmd->status);
  722. out:
  723. return 0;
  724. }
  725. int
  726. bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
  727. {
  728. struct bfa_bsg_adapter_cfg_mode_s *iocmd =
  729. (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
  730. struct bfad_hal_comp fcomp;
  731. unsigned long flags = 0;
  732. init_completion(&fcomp.comp);
  733. spin_lock_irqsave(&bfad->bfad_lock, flags);
  734. iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
  735. iocmd->cfg.mode, iocmd->cfg.max_pf,
  736. iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
  737. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  738. if (iocmd->status != BFA_STATUS_OK)
  739. goto out;
  740. wait_for_completion(&fcomp.comp);
  741. iocmd->status = fcomp.status;
  742. out:
  743. return 0;
  744. }
  745. int
  746. bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
  747. {
  748. struct bfa_bsg_port_cfg_mode_s *iocmd =
  749. (struct bfa_bsg_port_cfg_mode_s *)cmd;
  750. struct bfad_hal_comp fcomp;
  751. unsigned long flags = 0;
  752. init_completion(&fcomp.comp);
  753. spin_lock_irqsave(&bfad->bfad_lock, flags);
  754. iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
  755. iocmd->instance, iocmd->cfg.mode,
  756. iocmd->cfg.max_pf, iocmd->cfg.max_vf,
  757. bfad_hcb_comp, &fcomp);
  758. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  759. if (iocmd->status != BFA_STATUS_OK)
  760. goto out;
  761. wait_for_completion(&fcomp.comp);
  762. iocmd->status = fcomp.status;
  763. out:
  764. return 0;
  765. }
  766. int
  767. bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  768. {
  769. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  770. struct bfad_hal_comp fcomp;
  771. unsigned long flags;
  772. init_completion(&fcomp.comp);
  773. spin_lock_irqsave(&bfad->bfad_lock, flags);
  774. if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
  775. iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
  776. bfad_hcb_comp, &fcomp);
  777. else
  778. iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
  779. bfad_hcb_comp, &fcomp);
  780. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  781. if (iocmd->status != BFA_STATUS_OK)
  782. goto out;
  783. wait_for_completion(&fcomp.comp);
  784. iocmd->status = fcomp.status;
  785. out:
  786. return 0;
  787. }
  788. int
  789. bfad_iocmd_faa_enable(struct bfad_s *bfad, void *cmd)
  790. {
  791. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  792. unsigned long flags;
  793. struct bfad_hal_comp fcomp;
  794. init_completion(&fcomp.comp);
  795. iocmd->status = BFA_STATUS_OK;
  796. spin_lock_irqsave(&bfad->bfad_lock, flags);
  797. iocmd->status = bfa_faa_enable(&bfad->bfa, bfad_hcb_comp, &fcomp);
  798. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  799. if (iocmd->status != BFA_STATUS_OK)
  800. goto out;
  801. wait_for_completion(&fcomp.comp);
  802. iocmd->status = fcomp.status;
  803. out:
  804. return 0;
  805. }
  806. int
  807. bfad_iocmd_faa_disable(struct bfad_s *bfad, void *cmd)
  808. {
  809. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  810. unsigned long flags;
  811. struct bfad_hal_comp fcomp;
  812. init_completion(&fcomp.comp);
  813. iocmd->status = BFA_STATUS_OK;
  814. spin_lock_irqsave(&bfad->bfad_lock, flags);
  815. iocmd->status = bfa_faa_disable(&bfad->bfa, bfad_hcb_comp, &fcomp);
  816. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  817. if (iocmd->status != BFA_STATUS_OK)
  818. goto out;
  819. wait_for_completion(&fcomp.comp);
  820. iocmd->status = fcomp.status;
  821. out:
  822. return 0;
  823. }
  824. int
  825. bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
  826. {
  827. struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
  828. struct bfad_hal_comp fcomp;
  829. unsigned long flags;
  830. init_completion(&fcomp.comp);
  831. iocmd->status = BFA_STATUS_OK;
  832. spin_lock_irqsave(&bfad->bfad_lock, flags);
  833. iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
  834. bfad_hcb_comp, &fcomp);
  835. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  836. if (iocmd->status != BFA_STATUS_OK)
  837. goto out;
  838. wait_for_completion(&fcomp.comp);
  839. iocmd->status = fcomp.status;
  840. out:
  841. return 0;
  842. }
  843. int
  844. bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  845. {
  846. struct bfa_bsg_cee_attr_s *iocmd =
  847. (struct bfa_bsg_cee_attr_s *)cmd;
  848. void *iocmd_bufptr;
  849. struct bfad_hal_comp cee_comp;
  850. unsigned long flags;
  851. if (bfad_chk_iocmd_sz(payload_len,
  852. sizeof(struct bfa_bsg_cee_attr_s),
  853. sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
  854. iocmd->status = BFA_STATUS_VERSION_FAIL;
  855. return 0;
  856. }
  857. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
  858. cee_comp.status = 0;
  859. init_completion(&cee_comp.comp);
  860. mutex_lock(&bfad_mutex);
  861. spin_lock_irqsave(&bfad->bfad_lock, flags);
  862. iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
  863. bfad_hcb_comp, &cee_comp);
  864. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  865. if (iocmd->status != BFA_STATUS_OK) {
  866. mutex_unlock(&bfad_mutex);
  867. bfa_trc(bfad, 0x5555);
  868. goto out;
  869. }
  870. wait_for_completion(&cee_comp.comp);
  871. mutex_unlock(&bfad_mutex);
  872. out:
  873. return 0;
  874. }
  875. int
  876. bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
  877. unsigned int payload_len)
  878. {
  879. struct bfa_bsg_cee_stats_s *iocmd =
  880. (struct bfa_bsg_cee_stats_s *)cmd;
  881. void *iocmd_bufptr;
  882. struct bfad_hal_comp cee_comp;
  883. unsigned long flags;
  884. if (bfad_chk_iocmd_sz(payload_len,
  885. sizeof(struct bfa_bsg_cee_stats_s),
  886. sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
  887. iocmd->status = BFA_STATUS_VERSION_FAIL;
  888. return 0;
  889. }
  890. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
  891. cee_comp.status = 0;
  892. init_completion(&cee_comp.comp);
  893. mutex_lock(&bfad_mutex);
  894. spin_lock_irqsave(&bfad->bfad_lock, flags);
  895. iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
  896. bfad_hcb_comp, &cee_comp);
  897. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  898. if (iocmd->status != BFA_STATUS_OK) {
  899. mutex_unlock(&bfad_mutex);
  900. bfa_trc(bfad, 0x5555);
  901. goto out;
  902. }
  903. wait_for_completion(&cee_comp.comp);
  904. mutex_unlock(&bfad_mutex);
  905. out:
  906. return 0;
  907. }
  908. int
  909. bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
  910. {
  911. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
  912. unsigned long flags;
  913. spin_lock_irqsave(&bfad->bfad_lock, flags);
  914. iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
  915. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  916. if (iocmd->status != BFA_STATUS_OK)
  917. bfa_trc(bfad, 0x5555);
  918. return 0;
  919. }
  920. int
  921. bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
  922. {
  923. struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
  924. struct bfad_hal_comp fcomp;
  925. unsigned long flags;
  926. init_completion(&fcomp.comp);
  927. spin_lock_irqsave(&bfad->bfad_lock, flags);
  928. iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
  929. bfad_hcb_comp, &fcomp);
  930. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  931. bfa_trc(bfad, iocmd->status);
  932. if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
  933. goto out;
  934. wait_for_completion(&fcomp.comp);
  935. iocmd->status = fcomp.status;
  936. out:
  937. return 0;
  938. }
  939. int
  940. bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
  941. {
  942. struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
  943. struct bfad_hal_comp fcomp;
  944. unsigned long flags;
  945. init_completion(&fcomp.comp);
  946. spin_lock_irqsave(&bfad->bfad_lock, flags);
  947. iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
  948. bfad_hcb_comp, &fcomp);
  949. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  950. bfa_trc(bfad, iocmd->status);
  951. if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
  952. goto out;
  953. wait_for_completion(&fcomp.comp);
  954. iocmd->status = fcomp.status;
  955. out:
  956. return 0;
  957. }
  958. int
  959. bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
  960. {
  961. struct bfa_bsg_flash_attr_s *iocmd =
  962. (struct bfa_bsg_flash_attr_s *)cmd;
  963. struct bfad_hal_comp fcomp;
  964. unsigned long flags;
  965. init_completion(&fcomp.comp);
  966. spin_lock_irqsave(&bfad->bfad_lock, flags);
  967. iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
  968. bfad_hcb_comp, &fcomp);
  969. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  970. if (iocmd->status != BFA_STATUS_OK)
  971. goto out;
  972. wait_for_completion(&fcomp.comp);
  973. iocmd->status = fcomp.status;
  974. out:
  975. return 0;
  976. }
  977. int
  978. bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
  979. {
  980. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  981. struct bfad_hal_comp fcomp;
  982. unsigned long flags;
  983. init_completion(&fcomp.comp);
  984. spin_lock_irqsave(&bfad->bfad_lock, flags);
  985. iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
  986. iocmd->instance, bfad_hcb_comp, &fcomp);
  987. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  988. if (iocmd->status != BFA_STATUS_OK)
  989. goto out;
  990. wait_for_completion(&fcomp.comp);
  991. iocmd->status = fcomp.status;
  992. out:
  993. return 0;
  994. }
  995. int
  996. bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
  997. unsigned int payload_len)
  998. {
  999. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1000. void *iocmd_bufptr;
  1001. struct bfad_hal_comp fcomp;
  1002. unsigned long flags;
  1003. if (bfad_chk_iocmd_sz(payload_len,
  1004. sizeof(struct bfa_bsg_flash_s),
  1005. iocmd->bufsz) != BFA_STATUS_OK) {
  1006. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1007. return 0;
  1008. }
  1009. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
  1010. init_completion(&fcomp.comp);
  1011. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1012. iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
  1013. iocmd->type, iocmd->instance, iocmd_bufptr,
  1014. iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
  1015. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1016. if (iocmd->status != BFA_STATUS_OK)
  1017. goto out;
  1018. wait_for_completion(&fcomp.comp);
  1019. iocmd->status = fcomp.status;
  1020. out:
  1021. return 0;
  1022. }
  1023. int
  1024. bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
  1025. unsigned int payload_len)
  1026. {
  1027. struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
  1028. struct bfad_hal_comp fcomp;
  1029. void *iocmd_bufptr;
  1030. unsigned long flags;
  1031. if (bfad_chk_iocmd_sz(payload_len,
  1032. sizeof(struct bfa_bsg_flash_s),
  1033. iocmd->bufsz) != BFA_STATUS_OK) {
  1034. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1035. return 0;
  1036. }
  1037. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
  1038. init_completion(&fcomp.comp);
  1039. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1040. iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
  1041. iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
  1042. bfad_hcb_comp, &fcomp);
  1043. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1044. if (iocmd->status != BFA_STATUS_OK)
  1045. goto out;
  1046. wait_for_completion(&fcomp.comp);
  1047. iocmd->status = fcomp.status;
  1048. out:
  1049. return 0;
  1050. }
  1051. int
  1052. bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
  1053. {
  1054. struct bfa_bsg_diag_get_temp_s *iocmd =
  1055. (struct bfa_bsg_diag_get_temp_s *)cmd;
  1056. struct bfad_hal_comp fcomp;
  1057. unsigned long flags;
  1058. init_completion(&fcomp.comp);
  1059. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1060. iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
  1061. &iocmd->result, bfad_hcb_comp, &fcomp);
  1062. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1063. bfa_trc(bfad, iocmd->status);
  1064. if (iocmd->status != BFA_STATUS_OK)
  1065. goto out;
  1066. wait_for_completion(&fcomp.comp);
  1067. iocmd->status = fcomp.status;
  1068. out:
  1069. return 0;
  1070. }
  1071. int
  1072. bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
  1073. {
  1074. struct bfa_bsg_diag_memtest_s *iocmd =
  1075. (struct bfa_bsg_diag_memtest_s *)cmd;
  1076. struct bfad_hal_comp fcomp;
  1077. unsigned long flags;
  1078. init_completion(&fcomp.comp);
  1079. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1080. iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
  1081. &iocmd->memtest, iocmd->pat,
  1082. &iocmd->result, bfad_hcb_comp, &fcomp);
  1083. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1084. bfa_trc(bfad, iocmd->status);
  1085. if (iocmd->status != BFA_STATUS_OK)
  1086. goto out;
  1087. wait_for_completion(&fcomp.comp);
  1088. iocmd->status = fcomp.status;
  1089. out:
  1090. return 0;
  1091. }
  1092. int
  1093. bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
  1094. {
  1095. struct bfa_bsg_diag_loopback_s *iocmd =
  1096. (struct bfa_bsg_diag_loopback_s *)cmd;
  1097. struct bfad_hal_comp fcomp;
  1098. unsigned long flags;
  1099. init_completion(&fcomp.comp);
  1100. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1101. iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
  1102. iocmd->speed, iocmd->lpcnt, iocmd->pat,
  1103. &iocmd->result, bfad_hcb_comp, &fcomp);
  1104. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1105. bfa_trc(bfad, iocmd->status);
  1106. if (iocmd->status != BFA_STATUS_OK)
  1107. goto out;
  1108. wait_for_completion(&fcomp.comp);
  1109. iocmd->status = fcomp.status;
  1110. out:
  1111. return 0;
  1112. }
  1113. int
  1114. bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
  1115. {
  1116. struct bfa_bsg_diag_fwping_s *iocmd =
  1117. (struct bfa_bsg_diag_fwping_s *)cmd;
  1118. struct bfad_hal_comp fcomp;
  1119. unsigned long flags;
  1120. init_completion(&fcomp.comp);
  1121. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1122. iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
  1123. iocmd->pattern, &iocmd->result,
  1124. bfad_hcb_comp, &fcomp);
  1125. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1126. bfa_trc(bfad, iocmd->status);
  1127. if (iocmd->status != BFA_STATUS_OK)
  1128. goto out;
  1129. bfa_trc(bfad, 0x77771);
  1130. wait_for_completion(&fcomp.comp);
  1131. iocmd->status = fcomp.status;
  1132. out:
  1133. return 0;
  1134. }
  1135. int
  1136. bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
  1137. {
  1138. struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
  1139. struct bfad_hal_comp fcomp;
  1140. unsigned long flags;
  1141. init_completion(&fcomp.comp);
  1142. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1143. iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
  1144. iocmd->queue, &iocmd->result,
  1145. bfad_hcb_comp, &fcomp);
  1146. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1147. if (iocmd->status != BFA_STATUS_OK)
  1148. goto out;
  1149. wait_for_completion(&fcomp.comp);
  1150. iocmd->status = fcomp.status;
  1151. out:
  1152. return 0;
  1153. }
  1154. int
  1155. bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
  1156. {
  1157. struct bfa_bsg_sfp_show_s *iocmd =
  1158. (struct bfa_bsg_sfp_show_s *)cmd;
  1159. struct bfad_hal_comp fcomp;
  1160. unsigned long flags;
  1161. init_completion(&fcomp.comp);
  1162. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1163. iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
  1164. bfad_hcb_comp, &fcomp);
  1165. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1166. bfa_trc(bfad, iocmd->status);
  1167. if (iocmd->status != BFA_STATUS_OK)
  1168. goto out;
  1169. wait_for_completion(&fcomp.comp);
  1170. iocmd->status = fcomp.status;
  1171. bfa_trc(bfad, iocmd->status);
  1172. out:
  1173. return 0;
  1174. }
  1175. int
  1176. bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
  1177. {
  1178. struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
  1179. unsigned long flags;
  1180. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1181. iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
  1182. &iocmd->ledtest);
  1183. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1184. return 0;
  1185. }
  1186. int
  1187. bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
  1188. {
  1189. struct bfa_bsg_diag_beacon_s *iocmd =
  1190. (struct bfa_bsg_diag_beacon_s *)cmd;
  1191. unsigned long flags;
  1192. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1193. iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
  1194. iocmd->beacon, iocmd->link_e2e_beacon,
  1195. iocmd->second);
  1196. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1197. return 0;
  1198. }
  1199. int
  1200. bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
  1201. {
  1202. struct bfa_bsg_diag_lb_stat_s *iocmd =
  1203. (struct bfa_bsg_diag_lb_stat_s *)cmd;
  1204. unsigned long flags;
  1205. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1206. iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
  1207. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1208. bfa_trc(bfad, iocmd->status);
  1209. return 0;
  1210. }
  1211. int
  1212. bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
  1213. {
  1214. struct bfa_bsg_phy_attr_s *iocmd =
  1215. (struct bfa_bsg_phy_attr_s *)cmd;
  1216. struct bfad_hal_comp fcomp;
  1217. unsigned long flags;
  1218. init_completion(&fcomp.comp);
  1219. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1220. iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
  1221. &iocmd->attr, bfad_hcb_comp, &fcomp);
  1222. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1223. if (iocmd->status != BFA_STATUS_OK)
  1224. goto out;
  1225. wait_for_completion(&fcomp.comp);
  1226. iocmd->status = fcomp.status;
  1227. out:
  1228. return 0;
  1229. }
  1230. int
  1231. bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
  1232. {
  1233. struct bfa_bsg_phy_stats_s *iocmd =
  1234. (struct bfa_bsg_phy_stats_s *)cmd;
  1235. struct bfad_hal_comp fcomp;
  1236. unsigned long flags;
  1237. init_completion(&fcomp.comp);
  1238. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1239. iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
  1240. &iocmd->stats, bfad_hcb_comp, &fcomp);
  1241. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1242. if (iocmd->status != BFA_STATUS_OK)
  1243. goto out;
  1244. wait_for_completion(&fcomp.comp);
  1245. iocmd->status = fcomp.status;
  1246. out:
  1247. return 0;
  1248. }
  1249. int
  1250. bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1251. {
  1252. struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
  1253. struct bfad_hal_comp fcomp;
  1254. void *iocmd_bufptr;
  1255. unsigned long flags;
  1256. if (bfad_chk_iocmd_sz(payload_len,
  1257. sizeof(struct bfa_bsg_phy_s),
  1258. iocmd->bufsz) != BFA_STATUS_OK) {
  1259. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1260. return 0;
  1261. }
  1262. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
  1263. init_completion(&fcomp.comp);
  1264. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1265. iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
  1266. iocmd->instance, iocmd_bufptr, iocmd->bufsz,
  1267. 0, bfad_hcb_comp, &fcomp);
  1268. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1269. if (iocmd->status != BFA_STATUS_OK)
  1270. goto out;
  1271. wait_for_completion(&fcomp.comp);
  1272. iocmd->status = fcomp.status;
  1273. if (iocmd->status != BFA_STATUS_OK)
  1274. goto out;
  1275. out:
  1276. return 0;
  1277. }
  1278. int
  1279. bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
  1280. {
  1281. struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
  1282. void *iocmd_bufptr;
  1283. struct bfad_hal_comp fcomp;
  1284. unsigned long flags;
  1285. if (bfad_chk_iocmd_sz(payload_len,
  1286. sizeof(struct bfa_bsg_phy_s),
  1287. iocmd->bufsz) != BFA_STATUS_OK) {
  1288. iocmd->status = BFA_STATUS_VERSION_FAIL;
  1289. return 0;
  1290. }
  1291. iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
  1292. init_completion(&fcomp.comp);
  1293. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1294. iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
  1295. iocmd->instance, iocmd_bufptr, iocmd->bufsz,
  1296. 0, bfad_hcb_comp, &fcomp);
  1297. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1298. if (iocmd->status != BFA_STATUS_OK)
  1299. goto out;
  1300. wait_for_completion(&fcomp.comp);
  1301. iocmd->status = fcomp.status;
  1302. out:
  1303. return 0;
  1304. }
  1305. static int
  1306. bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
  1307. unsigned int payload_len)
  1308. {
  1309. int rc = EINVAL;
  1310. switch (cmd) {
  1311. case IOCMD_IOC_ENABLE:
  1312. rc = bfad_iocmd_ioc_enable(bfad, iocmd);
  1313. break;
  1314. case IOCMD_IOC_DISABLE:
  1315. rc = bfad_iocmd_ioc_disable(bfad, iocmd);
  1316. break;
  1317. case IOCMD_IOC_GET_INFO:
  1318. rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
  1319. break;
  1320. case IOCMD_IOC_GET_ATTR:
  1321. rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
  1322. break;
  1323. case IOCMD_IOC_GET_STATS:
  1324. rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
  1325. break;
  1326. case IOCMD_IOC_GET_FWSTATS:
  1327. rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
  1328. break;
  1329. case IOCMD_IOCFC_GET_ATTR:
  1330. rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
  1331. break;
  1332. case IOCMD_IOCFC_SET_INTR:
  1333. rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
  1334. break;
  1335. case IOCMD_PORT_ENABLE:
  1336. rc = bfad_iocmd_port_enable(bfad, iocmd);
  1337. break;
  1338. case IOCMD_PORT_DISABLE:
  1339. rc = bfad_iocmd_port_disable(bfad, iocmd);
  1340. break;
  1341. case IOCMD_PORT_GET_ATTR:
  1342. rc = bfad_iocmd_port_get_attr(bfad, iocmd);
  1343. break;
  1344. case IOCMD_PORT_GET_STATS:
  1345. rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
  1346. break;
  1347. case IOCMD_LPORT_GET_ATTR:
  1348. rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
  1349. break;
  1350. case IOCMD_LPORT_GET_STATS:
  1351. rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
  1352. break;
  1353. case IOCMD_LPORT_GET_IOSTATS:
  1354. rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
  1355. break;
  1356. case IOCMD_LPORT_GET_RPORTS:
  1357. rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
  1358. break;
  1359. case IOCMD_RPORT_GET_ATTR:
  1360. rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
  1361. break;
  1362. case IOCMD_RPORT_GET_ADDR:
  1363. rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
  1364. break;
  1365. case IOCMD_RPORT_GET_STATS:
  1366. rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
  1367. break;
  1368. case IOCMD_FABRIC_GET_LPORTS:
  1369. rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
  1370. break;
  1371. case IOCMD_FCPIM_MODSTATS:
  1372. rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
  1373. break;
  1374. case IOCMD_FCPIM_DEL_ITN_STATS:
  1375. rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
  1376. break;
  1377. case IOCMD_ITNIM_GET_ATTR:
  1378. rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
  1379. break;
  1380. case IOCMD_ITNIM_GET_IOSTATS:
  1381. rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
  1382. break;
  1383. case IOCMD_ITNIM_GET_ITNSTATS:
  1384. rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
  1385. break;
  1386. case IOCMD_FCPORT_ENABLE:
  1387. rc = bfad_iocmd_fcport_enable(bfad, iocmd);
  1388. break;
  1389. case IOCMD_FCPORT_DISABLE:
  1390. rc = bfad_iocmd_fcport_disable(bfad, iocmd);
  1391. break;
  1392. case IOCMD_IOC_PCIFN_CFG:
  1393. rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
  1394. break;
  1395. case IOCMD_PCIFN_CREATE:
  1396. rc = bfad_iocmd_pcifn_create(bfad, iocmd);
  1397. break;
  1398. case IOCMD_PCIFN_DELETE:
  1399. rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
  1400. break;
  1401. case IOCMD_PCIFN_BW:
  1402. rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
  1403. break;
  1404. case IOCMD_ADAPTER_CFG_MODE:
  1405. rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
  1406. break;
  1407. case IOCMD_PORT_CFG_MODE:
  1408. rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
  1409. break;
  1410. case IOCMD_FLASH_ENABLE_OPTROM:
  1411. case IOCMD_FLASH_DISABLE_OPTROM:
  1412. rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
  1413. break;
  1414. case IOCMD_FAA_ENABLE:
  1415. rc = bfad_iocmd_faa_enable(bfad, iocmd);
  1416. break;
  1417. case IOCMD_FAA_DISABLE:
  1418. rc = bfad_iocmd_faa_disable(bfad, iocmd);
  1419. break;
  1420. case IOCMD_FAA_QUERY:
  1421. rc = bfad_iocmd_faa_query(bfad, iocmd);
  1422. break;
  1423. case IOCMD_CEE_GET_ATTR:
  1424. rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
  1425. break;
  1426. case IOCMD_CEE_GET_STATS:
  1427. rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
  1428. break;
  1429. case IOCMD_CEE_RESET_STATS:
  1430. rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
  1431. break;
  1432. case IOCMD_SFP_MEDIA:
  1433. rc = bfad_iocmd_sfp_media(bfad, iocmd);
  1434. break;
  1435. case IOCMD_SFP_SPEED:
  1436. rc = bfad_iocmd_sfp_speed(bfad, iocmd);
  1437. break;
  1438. case IOCMD_FLASH_GET_ATTR:
  1439. rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
  1440. break;
  1441. case IOCMD_FLASH_ERASE_PART:
  1442. rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
  1443. break;
  1444. case IOCMD_FLASH_UPDATE_PART:
  1445. rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
  1446. break;
  1447. case IOCMD_FLASH_READ_PART:
  1448. rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
  1449. break;
  1450. case IOCMD_DIAG_TEMP:
  1451. rc = bfad_iocmd_diag_temp(bfad, iocmd);
  1452. break;
  1453. case IOCMD_DIAG_MEMTEST:
  1454. rc = bfad_iocmd_diag_memtest(bfad, iocmd);
  1455. break;
  1456. case IOCMD_DIAG_LOOPBACK:
  1457. rc = bfad_iocmd_diag_loopback(bfad, iocmd);
  1458. break;
  1459. case IOCMD_DIAG_FWPING:
  1460. rc = bfad_iocmd_diag_fwping(bfad, iocmd);
  1461. break;
  1462. case IOCMD_DIAG_QUEUETEST:
  1463. rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
  1464. break;
  1465. case IOCMD_DIAG_SFP:
  1466. rc = bfad_iocmd_diag_sfp(bfad, iocmd);
  1467. break;
  1468. case IOCMD_DIAG_LED:
  1469. rc = bfad_iocmd_diag_led(bfad, iocmd);
  1470. break;
  1471. case IOCMD_DIAG_BEACON_LPORT:
  1472. rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
  1473. break;
  1474. case IOCMD_DIAG_LB_STAT:
  1475. rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
  1476. break;
  1477. case IOCMD_PHY_GET_ATTR:
  1478. rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
  1479. break;
  1480. case IOCMD_PHY_GET_STATS:
  1481. rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
  1482. break;
  1483. case IOCMD_PHY_UPDATE_FW:
  1484. rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
  1485. break;
  1486. case IOCMD_PHY_READ_FW:
  1487. rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
  1488. break;
  1489. default:
  1490. rc = EINVAL;
  1491. break;
  1492. }
  1493. return -rc;
  1494. }
  1495. static int
  1496. bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
  1497. {
  1498. uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
  1499. struct bfad_im_port_s *im_port =
  1500. (struct bfad_im_port_s *) job->shost->hostdata[0];
  1501. struct bfad_s *bfad = im_port->bfad;
  1502. void *payload_kbuf;
  1503. int rc = -EINVAL;
  1504. /* Allocate a temp buffer to hold the passed in user space command */
  1505. payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  1506. if (!payload_kbuf) {
  1507. rc = -ENOMEM;
  1508. goto out;
  1509. }
  1510. /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
  1511. sg_copy_to_buffer(job->request_payload.sg_list,
  1512. job->request_payload.sg_cnt, payload_kbuf,
  1513. job->request_payload.payload_len);
  1514. /* Invoke IOCMD handler - to handle all the vendor command requests */
  1515. rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
  1516. job->request_payload.payload_len);
  1517. if (rc != BFA_STATUS_OK)
  1518. goto error;
  1519. /* Copy the response data to the job->reply_payload sg_list */
  1520. sg_copy_from_buffer(job->reply_payload.sg_list,
  1521. job->reply_payload.sg_cnt,
  1522. payload_kbuf,
  1523. job->reply_payload.payload_len);
  1524. /* free the command buffer */
  1525. kfree(payload_kbuf);
  1526. /* Fill the BSG job reply data */
  1527. job->reply_len = job->reply_payload.payload_len;
  1528. job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
  1529. job->reply->result = rc;
  1530. job->job_done(job);
  1531. return rc;
  1532. error:
  1533. /* free the command buffer */
  1534. kfree(payload_kbuf);
  1535. out:
  1536. job->reply->result = rc;
  1537. job->reply_len = sizeof(uint32_t);
  1538. job->reply->reply_payload_rcv_len = 0;
  1539. return rc;
  1540. }
  1541. /* FC passthru call backs */
  1542. u64
  1543. bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
  1544. {
  1545. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  1546. struct bfa_sge_s *sge;
  1547. u64 addr;
  1548. sge = drv_fcxp->req_sge + sgeid;
  1549. addr = (u64)(size_t) sge->sg_addr;
  1550. return addr;
  1551. }
  1552. u32
  1553. bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
  1554. {
  1555. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  1556. struct bfa_sge_s *sge;
  1557. sge = drv_fcxp->req_sge + sgeid;
  1558. return sge->sg_len;
  1559. }
  1560. u64
  1561. bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
  1562. {
  1563. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  1564. struct bfa_sge_s *sge;
  1565. u64 addr;
  1566. sge = drv_fcxp->rsp_sge + sgeid;
  1567. addr = (u64)(size_t) sge->sg_addr;
  1568. return addr;
  1569. }
  1570. u32
  1571. bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
  1572. {
  1573. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  1574. struct bfa_sge_s *sge;
  1575. sge = drv_fcxp->rsp_sge + sgeid;
  1576. return sge->sg_len;
  1577. }
  1578. void
  1579. bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
  1580. bfa_status_t req_status, u32 rsp_len, u32 resid_len,
  1581. struct fchs_s *rsp_fchs)
  1582. {
  1583. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  1584. drv_fcxp->req_status = req_status;
  1585. drv_fcxp->rsp_len = rsp_len;
  1586. /* bfa_fcxp will be automatically freed by BFA */
  1587. drv_fcxp->bfa_fcxp = NULL;
  1588. complete(&drv_fcxp->comp);
  1589. }
  1590. struct bfad_buf_info *
  1591. bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
  1592. uint32_t payload_len, uint32_t *num_sgles)
  1593. {
  1594. struct bfad_buf_info *buf_base, *buf_info;
  1595. struct bfa_sge_s *sg_table;
  1596. int sge_num = 1;
  1597. buf_base = kzalloc((sizeof(struct bfad_buf_info) +
  1598. sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
  1599. if (!buf_base)
  1600. return NULL;
  1601. sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
  1602. (sizeof(struct bfad_buf_info) * sge_num));
  1603. /* Allocate dma coherent memory */
  1604. buf_info = buf_base;
  1605. buf_info->size = payload_len;
  1606. buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
  1607. &buf_info->phys, GFP_KERNEL);
  1608. if (!buf_info->virt)
  1609. goto out_free_mem;
  1610. /* copy the linear bsg buffer to buf_info */
  1611. memset(buf_info->virt, 0, buf_info->size);
  1612. memcpy(buf_info->virt, payload_kbuf, buf_info->size);
  1613. /*
  1614. * Setup SG table
  1615. */
  1616. sg_table->sg_len = buf_info->size;
  1617. sg_table->sg_addr = (void *)(size_t) buf_info->phys;
  1618. *num_sgles = sge_num;
  1619. return buf_base;
  1620. out_free_mem:
  1621. kfree(buf_base);
  1622. return NULL;
  1623. }
  1624. void
  1625. bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
  1626. uint32_t num_sgles)
  1627. {
  1628. int i;
  1629. struct bfad_buf_info *buf_info = buf_base;
  1630. if (buf_base) {
  1631. for (i = 0; i < num_sgles; buf_info++, i++) {
  1632. if (buf_info->virt != NULL)
  1633. dma_free_coherent(&bfad->pcidev->dev,
  1634. buf_info->size, buf_info->virt,
  1635. buf_info->phys);
  1636. }
  1637. kfree(buf_base);
  1638. }
  1639. }
  1640. int
  1641. bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
  1642. bfa_bsg_fcpt_t *bsg_fcpt)
  1643. {
  1644. struct bfa_fcxp_s *hal_fcxp;
  1645. struct bfad_s *bfad = drv_fcxp->port->bfad;
  1646. unsigned long flags;
  1647. uint8_t lp_tag;
  1648. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1649. /* Allocate bfa_fcxp structure */
  1650. hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
  1651. drv_fcxp->num_req_sgles,
  1652. drv_fcxp->num_rsp_sgles,
  1653. bfad_fcxp_get_req_sgaddr_cb,
  1654. bfad_fcxp_get_req_sglen_cb,
  1655. bfad_fcxp_get_rsp_sgaddr_cb,
  1656. bfad_fcxp_get_rsp_sglen_cb);
  1657. if (!hal_fcxp) {
  1658. bfa_trc(bfad, 0);
  1659. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1660. return BFA_STATUS_ENOMEM;
  1661. }
  1662. drv_fcxp->bfa_fcxp = hal_fcxp;
  1663. lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
  1664. bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
  1665. bsg_fcpt->cts, bsg_fcpt->cos,
  1666. job->request_payload.payload_len,
  1667. &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
  1668. job->reply_payload.payload_len, bsg_fcpt->tsecs);
  1669. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1670. return BFA_STATUS_OK;
  1671. }
  1672. int
  1673. bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
  1674. {
  1675. struct bfa_bsg_data *bsg_data;
  1676. struct bfad_im_port_s *im_port =
  1677. (struct bfad_im_port_s *) job->shost->hostdata[0];
  1678. struct bfad_s *bfad = im_port->bfad;
  1679. bfa_bsg_fcpt_t *bsg_fcpt;
  1680. struct bfad_fcxp *drv_fcxp;
  1681. struct bfa_fcs_lport_s *fcs_port;
  1682. struct bfa_fcs_rport_s *fcs_rport;
  1683. uint32_t command_type = job->request->msgcode;
  1684. unsigned long flags;
  1685. struct bfad_buf_info *rsp_buf_info;
  1686. void *req_kbuf = NULL, *rsp_kbuf = NULL;
  1687. int rc = -EINVAL;
  1688. job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
  1689. job->reply->reply_payload_rcv_len = 0;
  1690. /* Get the payload passed in from userspace */
  1691. bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
  1692. sizeof(struct fc_bsg_request));
  1693. if (bsg_data == NULL)
  1694. goto out;
  1695. /*
  1696. * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
  1697. * buffer of size bsg_data->payload_len
  1698. */
  1699. bsg_fcpt = (struct bfa_bsg_fcpt_s *)
  1700. kzalloc(bsg_data->payload_len, GFP_KERNEL);
  1701. if (!bsg_fcpt)
  1702. goto out;
  1703. if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
  1704. bsg_data->payload_len)) {
  1705. kfree(bsg_fcpt);
  1706. goto out;
  1707. }
  1708. drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
  1709. if (drv_fcxp == NULL) {
  1710. rc = -ENOMEM;
  1711. goto out;
  1712. }
  1713. spin_lock_irqsave(&bfad->bfad_lock, flags);
  1714. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
  1715. bsg_fcpt->lpwwn);
  1716. if (fcs_port == NULL) {
  1717. bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
  1718. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1719. goto out_free_mem;
  1720. }
  1721. /* Check if the port is online before sending FC Passthru cmd */
  1722. if (!bfa_fcs_lport_is_online(fcs_port)) {
  1723. bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
  1724. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1725. goto out_free_mem;
  1726. }
  1727. drv_fcxp->port = fcs_port->bfad_port;
  1728. if (drv_fcxp->port->bfad == 0)
  1729. drv_fcxp->port->bfad = bfad;
  1730. /* Fetch the bfa_rport - if nexus needed */
  1731. if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
  1732. command_type == FC_BSG_HST_CT) {
  1733. /* BSG HST commands: no nexus needed */
  1734. drv_fcxp->bfa_rport = NULL;
  1735. } else if (command_type == FC_BSG_RPT_ELS ||
  1736. command_type == FC_BSG_RPT_CT) {
  1737. /* BSG RPT commands: nexus needed */
  1738. fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
  1739. bsg_fcpt->dpwwn);
  1740. if (fcs_rport == NULL) {
  1741. bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
  1742. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1743. goto out_free_mem;
  1744. }
  1745. drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
  1746. } else { /* Unknown BSG msgcode; return -EINVAL */
  1747. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1748. goto out_free_mem;
  1749. }
  1750. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  1751. /* allocate memory for req / rsp buffers */
  1752. req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  1753. if (!req_kbuf) {
  1754. printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
  1755. bfad->pci_name);
  1756. rc = -ENOMEM;
  1757. goto out_free_mem;
  1758. }
  1759. rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
  1760. if (!rsp_kbuf) {
  1761. printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
  1762. bfad->pci_name);
  1763. rc = -ENOMEM;
  1764. goto out_free_mem;
  1765. }
  1766. /* map req sg - copy the sg_list passed in to the linear buffer */
  1767. sg_copy_to_buffer(job->request_payload.sg_list,
  1768. job->request_payload.sg_cnt, req_kbuf,
  1769. job->request_payload.payload_len);
  1770. drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
  1771. job->request_payload.payload_len,
  1772. &drv_fcxp->num_req_sgles);
  1773. if (!drv_fcxp->reqbuf_info) {
  1774. printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
  1775. bfad->pci_name);
  1776. rc = -ENOMEM;
  1777. goto out_free_mem;
  1778. }
  1779. drv_fcxp->req_sge = (struct bfa_sge_s *)
  1780. (((uint8_t *)drv_fcxp->reqbuf_info) +
  1781. (sizeof(struct bfad_buf_info) *
  1782. drv_fcxp->num_req_sgles));
  1783. /* map rsp sg */
  1784. drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
  1785. job->reply_payload.payload_len,
  1786. &drv_fcxp->num_rsp_sgles);
  1787. if (!drv_fcxp->rspbuf_info) {
  1788. printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
  1789. bfad->pci_name);
  1790. rc = -ENOMEM;
  1791. goto out_free_mem;
  1792. }
  1793. rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
  1794. drv_fcxp->rsp_sge = (struct bfa_sge_s *)
  1795. (((uint8_t *)drv_fcxp->rspbuf_info) +
  1796. (sizeof(struct bfad_buf_info) *
  1797. drv_fcxp->num_rsp_sgles));
  1798. /* fcxp send */
  1799. init_completion(&drv_fcxp->comp);
  1800. rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
  1801. if (rc == BFA_STATUS_OK) {
  1802. wait_for_completion(&drv_fcxp->comp);
  1803. bsg_fcpt->status = drv_fcxp->req_status;
  1804. } else {
  1805. bsg_fcpt->status = rc;
  1806. goto out_free_mem;
  1807. }
  1808. /* fill the job->reply data */
  1809. if (drv_fcxp->req_status == BFA_STATUS_OK) {
  1810. job->reply_len = drv_fcxp->rsp_len;
  1811. job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
  1812. job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  1813. } else {
  1814. job->reply->reply_payload_rcv_len =
  1815. sizeof(struct fc_bsg_ctels_reply);
  1816. job->reply_len = sizeof(uint32_t);
  1817. job->reply->reply_data.ctels_reply.status =
  1818. FC_CTELS_STATUS_REJECT;
  1819. }
  1820. /* Copy the response data to the reply_payload sg list */
  1821. sg_copy_from_buffer(job->reply_payload.sg_list,
  1822. job->reply_payload.sg_cnt,
  1823. (uint8_t *)rsp_buf_info->virt,
  1824. job->reply_payload.payload_len);
  1825. out_free_mem:
  1826. bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
  1827. drv_fcxp->num_rsp_sgles);
  1828. bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
  1829. drv_fcxp->num_req_sgles);
  1830. kfree(req_kbuf);
  1831. kfree(rsp_kbuf);
  1832. /* Need a copy to user op */
  1833. if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
  1834. bsg_data->payload_len))
  1835. rc = -EIO;
  1836. kfree(bsg_fcpt);
  1837. kfree(drv_fcxp);
  1838. out:
  1839. job->reply->result = rc;
  1840. if (rc == BFA_STATUS_OK)
  1841. job->job_done(job);
  1842. return rc;
  1843. }
  1844. int
  1845. bfad_im_bsg_request(struct fc_bsg_job *job)
  1846. {
  1847. uint32_t rc = BFA_STATUS_OK;
  1848. /* Increment the bfa module refcnt - if bsg request is in service */
  1849. bfad_im_bsg_get_kobject(job);
  1850. switch (job->request->msgcode) {
  1851. case FC_BSG_HST_VENDOR:
  1852. /* Process BSG HST Vendor requests */
  1853. rc = bfad_im_bsg_vendor_request(job);
  1854. break;
  1855. case FC_BSG_HST_ELS_NOLOGIN:
  1856. case FC_BSG_RPT_ELS:
  1857. case FC_BSG_HST_CT:
  1858. case FC_BSG_RPT_CT:
  1859. /* Process BSG ELS/CT commands */
  1860. rc = bfad_im_bsg_els_ct_request(job);
  1861. break;
  1862. default:
  1863. job->reply->result = rc = -EINVAL;
  1864. job->reply->reply_payload_rcv_len = 0;
  1865. break;
  1866. }
  1867. /* Decrement the bfa module refcnt - on completion of bsg request */
  1868. bfad_im_bsg_put_kobject(job);
  1869. return rc;
  1870. }
  1871. int
  1872. bfad_im_bsg_timeout(struct fc_bsg_job *job)
  1873. {
  1874. /* Don't complete the BSG job request - return -EAGAIN
  1875. * to reset bsg job timeout : for ELS/CT pass thru we
  1876. * already have timer to track the request.
  1877. */
  1878. return -EAGAIN;
  1879. }