bfad_bsg.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954
  1. /*
  2. * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. #include <linux/uaccess.h>
  18. #include "bfad_drv.h"
  19. #include "bfad_im.h"
  20. #include "bfad_bsg.h"
  21. BFA_TRC_FILE(LDRV, BSG);
  22. /* bfad_im_bsg_get_kobject - increment the bfa refcnt */
  23. static void
  24. bfad_im_bsg_get_kobject(struct fc_bsg_job *job)
  25. {
  26. struct Scsi_Host *shost = job->shost;
  27. unsigned long flags;
  28. spin_lock_irqsave(shost->host_lock, flags);
  29. __module_get(shost->dma_dev->driver->owner);
  30. spin_unlock_irqrestore(shost->host_lock, flags);
  31. }
  32. /* bfad_im_bsg_put_kobject - decrement the bfa refcnt */
  33. static void
  34. bfad_im_bsg_put_kobject(struct fc_bsg_job *job)
  35. {
  36. struct Scsi_Host *shost = job->shost;
  37. unsigned long flags;
  38. spin_lock_irqsave(shost->host_lock, flags);
  39. module_put(shost->dma_dev->driver->owner);
  40. spin_unlock_irqrestore(shost->host_lock, flags);
  41. }
  42. static int
  43. bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
  44. {
  45. int i;
  46. struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
  47. struct bfad_im_port_s *im_port;
  48. struct bfa_port_attr_s pattr;
  49. unsigned long flags;
  50. spin_lock_irqsave(&bfad->bfad_lock, flags);
  51. bfa_fcport_get_attr(&bfad->bfa, &pattr);
  52. iocmd->nwwn = pattr.nwwn;
  53. iocmd->pwwn = pattr.pwwn;
  54. iocmd->ioc_type = bfa_get_type(&bfad->bfa);
  55. iocmd->mac = bfa_get_mac(&bfad->bfa);
  56. iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
  57. bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
  58. iocmd->factorynwwn = pattr.factorynwwn;
  59. iocmd->factorypwwn = pattr.factorypwwn;
  60. im_port = bfad->pport.im_port;
  61. iocmd->host = im_port->shost->host_no;
  62. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  63. strcpy(iocmd->name, bfad->adapter_name);
  64. strcpy(iocmd->port_name, bfad->port_name);
  65. strcpy(iocmd->hwpath, bfad->pci_name);
  66. /* set adapter hw path */
  67. strcpy(iocmd->adapter_hwpath, bfad->pci_name);
  68. i = strlen(iocmd->adapter_hwpath) - 1;
  69. while (iocmd->adapter_hwpath[i] != '.')
  70. i--;
  71. iocmd->adapter_hwpath[i] = '\0';
  72. iocmd->status = BFA_STATUS_OK;
  73. return 0;
  74. }
  75. static int
  76. bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
  77. {
  78. struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
  79. unsigned long flags;
  80. spin_lock_irqsave(&bfad->bfad_lock, flags);
  81. bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
  82. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  83. /* fill in driver attr info */
  84. strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
  85. strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
  86. BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
  87. strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
  88. iocmd->ioc_attr.adapter_attr.fw_ver);
  89. strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
  90. iocmd->ioc_attr.adapter_attr.optrom_ver);
  91. /* copy chip rev info first otherwise it will be overwritten */
  92. memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
  93. sizeof(bfad->pci_attr.chip_rev));
  94. memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
  95. sizeof(struct bfa_ioc_pci_attr_s));
  96. iocmd->status = BFA_STATUS_OK;
  97. return 0;
  98. }
  99. static int
  100. bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
  101. {
  102. struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
  103. struct bfa_lport_attr_s port_attr;
  104. unsigned long flags;
  105. spin_lock_irqsave(&bfad->bfad_lock, flags);
  106. bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
  107. bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
  108. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  109. if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
  110. iocmd->attr.pid = port_attr.pid;
  111. else
  112. iocmd->attr.pid = 0;
  113. iocmd->attr.port_type = port_attr.port_type;
  114. iocmd->attr.loopback = port_attr.loopback;
  115. iocmd->attr.authfail = port_attr.authfail;
  116. strncpy(iocmd->attr.port_symname.symname,
  117. port_attr.port_cfg.sym_name.symname,
  118. sizeof(port_attr.port_cfg.sym_name.symname));
  119. iocmd->status = BFA_STATUS_OK;
  120. return 0;
  121. }
  122. static int
  123. bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
  124. {
  125. struct bfa_fcs_lport_s *fcs_port;
  126. struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
  127. unsigned long flags;
  128. spin_lock_irqsave(&bfad->bfad_lock, flags);
  129. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  130. iocmd->vf_id, iocmd->pwwn);
  131. if (fcs_port == NULL) {
  132. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  133. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  134. goto out;
  135. }
  136. bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
  137. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  138. iocmd->status = BFA_STATUS_OK;
  139. out:
  140. return 0;
  141. }
  142. static int
  143. bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
  144. {
  145. struct bfa_bsg_rport_scsi_addr_s *iocmd =
  146. (struct bfa_bsg_rport_scsi_addr_s *)cmd;
  147. struct bfa_fcs_lport_s *fcs_port;
  148. struct bfa_fcs_itnim_s *fcs_itnim;
  149. struct bfad_itnim_s *drv_itnim;
  150. unsigned long flags;
  151. spin_lock_irqsave(&bfad->bfad_lock, flags);
  152. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  153. iocmd->vf_id, iocmd->pwwn);
  154. if (fcs_port == NULL) {
  155. bfa_trc(bfad, 0);
  156. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  157. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  158. goto out;
  159. }
  160. fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
  161. if (fcs_itnim == NULL) {
  162. bfa_trc(bfad, 0);
  163. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  164. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  165. goto out;
  166. }
  167. drv_itnim = fcs_itnim->itnim_drv;
  168. if (drv_itnim && drv_itnim->im_port)
  169. iocmd->host = drv_itnim->im_port->shost->host_no;
  170. else {
  171. bfa_trc(bfad, 0);
  172. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  173. iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
  174. goto out;
  175. }
  176. iocmd->target = drv_itnim->scsi_tgt_id;
  177. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  178. iocmd->bus = 0;
  179. iocmd->lun = 0;
  180. iocmd->status = BFA_STATUS_OK;
  181. out:
  182. return 0;
  183. }
  184. static int
  185. bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
  186. unsigned int payload_len)
  187. {
  188. struct bfa_bsg_fabric_get_lports_s *iocmd =
  189. (struct bfa_bsg_fabric_get_lports_s *)cmd;
  190. bfa_fcs_vf_t *fcs_vf;
  191. uint32_t nports = iocmd->nports;
  192. unsigned long flags;
  193. void *iocmd_bufptr;
  194. if (nports == 0) {
  195. iocmd->status = BFA_STATUS_EINVAL;
  196. goto out;
  197. }
  198. if (bfad_chk_iocmd_sz(payload_len,
  199. sizeof(struct bfa_bsg_fabric_get_lports_s),
  200. sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
  201. iocmd->status = BFA_STATUS_VERSION_FAIL;
  202. goto out;
  203. }
  204. iocmd_bufptr = (char *)iocmd +
  205. sizeof(struct bfa_bsg_fabric_get_lports_s);
  206. spin_lock_irqsave(&bfad->bfad_lock, flags);
  207. fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
  208. if (fcs_vf == NULL) {
  209. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  210. iocmd->status = BFA_STATUS_UNKNOWN_VFID;
  211. goto out;
  212. }
  213. bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
  214. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  215. iocmd->nports = nports;
  216. iocmd->status = BFA_STATUS_OK;
  217. out:
  218. return 0;
  219. }
  220. static int
  221. bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
  222. {
  223. struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
  224. struct bfa_fcs_lport_s *fcs_port;
  225. unsigned long flags;
  226. spin_lock_irqsave(&bfad->bfad_lock, flags);
  227. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
  228. iocmd->vf_id, iocmd->lpwwn);
  229. if (!fcs_port)
  230. iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
  231. else
  232. iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
  233. iocmd->rpwwn, &iocmd->attr);
  234. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  235. return 0;
  236. }
  237. int
  238. bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
  239. {
  240. struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
  241. struct bfad_hal_comp fcomp;
  242. unsigned long flags;
  243. init_completion(&fcomp.comp);
  244. spin_lock_irqsave(&bfad->bfad_lock, flags);
  245. iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
  246. &iocmd->pcifn_cfg,
  247. bfad_hcb_comp, &fcomp);
  248. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  249. if (iocmd->status != BFA_STATUS_OK)
  250. goto out;
  251. wait_for_completion(&fcomp.comp);
  252. iocmd->status = fcomp.status;
  253. out:
  254. return 0;
  255. }
  256. int
  257. bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
  258. {
  259. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  260. struct bfad_hal_comp fcomp;
  261. unsigned long flags;
  262. init_completion(&fcomp.comp);
  263. spin_lock_irqsave(&bfad->bfad_lock, flags);
  264. iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
  265. &iocmd->pcifn_id, iocmd->port,
  266. iocmd->pcifn_class, iocmd->bandwidth,
  267. bfad_hcb_comp, &fcomp);
  268. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  269. if (iocmd->status != BFA_STATUS_OK)
  270. goto out;
  271. wait_for_completion(&fcomp.comp);
  272. iocmd->status = fcomp.status;
  273. out:
  274. return 0;
  275. }
  276. int
  277. bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
  278. {
  279. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  280. struct bfad_hal_comp fcomp;
  281. unsigned long flags;
  282. init_completion(&fcomp.comp);
  283. spin_lock_irqsave(&bfad->bfad_lock, flags);
  284. iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
  285. iocmd->pcifn_id,
  286. bfad_hcb_comp, &fcomp);
  287. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  288. if (iocmd->status != BFA_STATUS_OK)
  289. goto out;
  290. wait_for_completion(&fcomp.comp);
  291. iocmd->status = fcomp.status;
  292. out:
  293. return 0;
  294. }
  295. int
  296. bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
  297. {
  298. struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
  299. struct bfad_hal_comp fcomp;
  300. unsigned long flags;
  301. init_completion(&fcomp.comp);
  302. spin_lock_irqsave(&bfad->bfad_lock, flags);
  303. iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
  304. iocmd->pcifn_id, iocmd->bandwidth,
  305. bfad_hcb_comp, &fcomp);
  306. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  307. bfa_trc(bfad, iocmd->status);
  308. if (iocmd->status != BFA_STATUS_OK)
  309. goto out;
  310. wait_for_completion(&fcomp.comp);
  311. iocmd->status = fcomp.status;
  312. bfa_trc(bfad, iocmd->status);
  313. out:
  314. return 0;
  315. }
  316. int
  317. bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
  318. {
  319. struct bfa_bsg_adapter_cfg_mode_s *iocmd =
  320. (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
  321. struct bfad_hal_comp fcomp;
  322. unsigned long flags = 0;
  323. init_completion(&fcomp.comp);
  324. spin_lock_irqsave(&bfad->bfad_lock, flags);
  325. iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
  326. iocmd->cfg.mode, iocmd->cfg.max_pf,
  327. iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
  328. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  329. if (iocmd->status != BFA_STATUS_OK)
  330. goto out;
  331. wait_for_completion(&fcomp.comp);
  332. iocmd->status = fcomp.status;
  333. out:
  334. return 0;
  335. }
  336. int
  337. bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
  338. {
  339. struct bfa_bsg_port_cfg_mode_s *iocmd =
  340. (struct bfa_bsg_port_cfg_mode_s *)cmd;
  341. struct bfad_hal_comp fcomp;
  342. unsigned long flags = 0;
  343. init_completion(&fcomp.comp);
  344. spin_lock_irqsave(&bfad->bfad_lock, flags);
  345. iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
  346. iocmd->instance, iocmd->cfg.mode,
  347. iocmd->cfg.max_pf, iocmd->cfg.max_vf,
  348. bfad_hcb_comp, &fcomp);
  349. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  350. if (iocmd->status != BFA_STATUS_OK)
  351. goto out;
  352. wait_for_completion(&fcomp.comp);
  353. iocmd->status = fcomp.status;
  354. out:
  355. return 0;
  356. }
  357. int
  358. bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
  359. {
  360. struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
  361. struct bfad_hal_comp fcomp;
  362. unsigned long flags;
  363. init_completion(&fcomp.comp);
  364. spin_lock_irqsave(&bfad->bfad_lock, flags);
  365. if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
  366. iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
  367. bfad_hcb_comp, &fcomp);
  368. else
  369. iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
  370. bfad_hcb_comp, &fcomp);
  371. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  372. if (iocmd->status != BFA_STATUS_OK)
  373. goto out;
  374. wait_for_completion(&fcomp.comp);
  375. iocmd->status = fcomp.status;
  376. out:
  377. return 0;
  378. }
  379. static int
  380. bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
  381. unsigned int payload_len)
  382. {
  383. int rc = EINVAL;
  384. switch (cmd) {
  385. case IOCMD_IOC_GET_INFO:
  386. rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
  387. break;
  388. case IOCMD_IOC_GET_ATTR:
  389. rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
  390. break;
  391. case IOCMD_PORT_GET_ATTR:
  392. rc = bfad_iocmd_port_get_attr(bfad, iocmd);
  393. break;
  394. case IOCMD_LPORT_GET_ATTR:
  395. rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
  396. break;
  397. case IOCMD_RPORT_GET_ADDR:
  398. rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
  399. break;
  400. case IOCMD_FABRIC_GET_LPORTS:
  401. rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
  402. break;
  403. case IOCMD_ITNIM_GET_ATTR:
  404. rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
  405. break;
  406. case IOCMD_IOC_PCIFN_CFG:
  407. rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
  408. break;
  409. case IOCMD_PCIFN_CREATE:
  410. rc = bfad_iocmd_pcifn_create(bfad, iocmd);
  411. break;
  412. case IOCMD_PCIFN_DELETE:
  413. rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
  414. break;
  415. case IOCMD_PCIFN_BW:
  416. rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
  417. break;
  418. case IOCMD_ADAPTER_CFG_MODE:
  419. rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
  420. break;
  421. case IOCMD_PORT_CFG_MODE:
  422. rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
  423. break;
  424. case IOCMD_FLASH_ENABLE_OPTROM:
  425. case IOCMD_FLASH_DISABLE_OPTROM:
  426. rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
  427. break;
  428. default:
  429. rc = EINVAL;
  430. break;
  431. }
  432. return -rc;
  433. }
  434. static int
  435. bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
  436. {
  437. uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
  438. struct bfad_im_port_s *im_port =
  439. (struct bfad_im_port_s *) job->shost->hostdata[0];
  440. struct bfad_s *bfad = im_port->bfad;
  441. void *payload_kbuf;
  442. int rc = -EINVAL;
  443. /* Allocate a temp buffer to hold the passed in user space command */
  444. payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  445. if (!payload_kbuf) {
  446. rc = -ENOMEM;
  447. goto out;
  448. }
  449. /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
  450. sg_copy_to_buffer(job->request_payload.sg_list,
  451. job->request_payload.sg_cnt, payload_kbuf,
  452. job->request_payload.payload_len);
  453. /* Invoke IOCMD handler - to handle all the vendor command requests */
  454. rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
  455. job->request_payload.payload_len);
  456. if (rc != BFA_STATUS_OK)
  457. goto error;
  458. /* Copy the response data to the job->reply_payload sg_list */
  459. sg_copy_from_buffer(job->reply_payload.sg_list,
  460. job->reply_payload.sg_cnt,
  461. payload_kbuf,
  462. job->reply_payload.payload_len);
  463. /* free the command buffer */
  464. kfree(payload_kbuf);
  465. /* Fill the BSG job reply data */
  466. job->reply_len = job->reply_payload.payload_len;
  467. job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
  468. job->reply->result = rc;
  469. job->job_done(job);
  470. return rc;
  471. error:
  472. /* free the command buffer */
  473. kfree(payload_kbuf);
  474. out:
  475. job->reply->result = rc;
  476. job->reply_len = sizeof(uint32_t);
  477. job->reply->reply_payload_rcv_len = 0;
  478. return rc;
  479. }
  480. /* FC passthru call backs */
  481. u64
  482. bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
  483. {
  484. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  485. struct bfa_sge_s *sge;
  486. u64 addr;
  487. sge = drv_fcxp->req_sge + sgeid;
  488. addr = (u64)(size_t) sge->sg_addr;
  489. return addr;
  490. }
  491. u32
  492. bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
  493. {
  494. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  495. struct bfa_sge_s *sge;
  496. sge = drv_fcxp->req_sge + sgeid;
  497. return sge->sg_len;
  498. }
  499. u64
  500. bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
  501. {
  502. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  503. struct bfa_sge_s *sge;
  504. u64 addr;
  505. sge = drv_fcxp->rsp_sge + sgeid;
  506. addr = (u64)(size_t) sge->sg_addr;
  507. return addr;
  508. }
  509. u32
  510. bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
  511. {
  512. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  513. struct bfa_sge_s *sge;
  514. sge = drv_fcxp->rsp_sge + sgeid;
  515. return sge->sg_len;
  516. }
  517. void
  518. bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
  519. bfa_status_t req_status, u32 rsp_len, u32 resid_len,
  520. struct fchs_s *rsp_fchs)
  521. {
  522. struct bfad_fcxp *drv_fcxp = bfad_fcxp;
  523. drv_fcxp->req_status = req_status;
  524. drv_fcxp->rsp_len = rsp_len;
  525. /* bfa_fcxp will be automatically freed by BFA */
  526. drv_fcxp->bfa_fcxp = NULL;
  527. complete(&drv_fcxp->comp);
  528. }
  529. struct bfad_buf_info *
  530. bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
  531. uint32_t payload_len, uint32_t *num_sgles)
  532. {
  533. struct bfad_buf_info *buf_base, *buf_info;
  534. struct bfa_sge_s *sg_table;
  535. int sge_num = 1;
  536. buf_base = kzalloc((sizeof(struct bfad_buf_info) +
  537. sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
  538. if (!buf_base)
  539. return NULL;
  540. sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
  541. (sizeof(struct bfad_buf_info) * sge_num));
  542. /* Allocate dma coherent memory */
  543. buf_info = buf_base;
  544. buf_info->size = payload_len;
  545. buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
  546. &buf_info->phys, GFP_KERNEL);
  547. if (!buf_info->virt)
  548. goto out_free_mem;
  549. /* copy the linear bsg buffer to buf_info */
  550. memset(buf_info->virt, 0, buf_info->size);
  551. memcpy(buf_info->virt, payload_kbuf, buf_info->size);
  552. /*
  553. * Setup SG table
  554. */
  555. sg_table->sg_len = buf_info->size;
  556. sg_table->sg_addr = (void *)(size_t) buf_info->phys;
  557. *num_sgles = sge_num;
  558. return buf_base;
  559. out_free_mem:
  560. kfree(buf_base);
  561. return NULL;
  562. }
  563. void
  564. bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
  565. uint32_t num_sgles)
  566. {
  567. int i;
  568. struct bfad_buf_info *buf_info = buf_base;
  569. if (buf_base) {
  570. for (i = 0; i < num_sgles; buf_info++, i++) {
  571. if (buf_info->virt != NULL)
  572. dma_free_coherent(&bfad->pcidev->dev,
  573. buf_info->size, buf_info->virt,
  574. buf_info->phys);
  575. }
  576. kfree(buf_base);
  577. }
  578. }
  579. int
  580. bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
  581. bfa_bsg_fcpt_t *bsg_fcpt)
  582. {
  583. struct bfa_fcxp_s *hal_fcxp;
  584. struct bfad_s *bfad = drv_fcxp->port->bfad;
  585. unsigned long flags;
  586. uint8_t lp_tag;
  587. spin_lock_irqsave(&bfad->bfad_lock, flags);
  588. /* Allocate bfa_fcxp structure */
  589. hal_fcxp = bfa_fcxp_alloc(drv_fcxp, &bfad->bfa,
  590. drv_fcxp->num_req_sgles,
  591. drv_fcxp->num_rsp_sgles,
  592. bfad_fcxp_get_req_sgaddr_cb,
  593. bfad_fcxp_get_req_sglen_cb,
  594. bfad_fcxp_get_rsp_sgaddr_cb,
  595. bfad_fcxp_get_rsp_sglen_cb);
  596. if (!hal_fcxp) {
  597. bfa_trc(bfad, 0);
  598. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  599. return BFA_STATUS_ENOMEM;
  600. }
  601. drv_fcxp->bfa_fcxp = hal_fcxp;
  602. lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
  603. bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
  604. bsg_fcpt->cts, bsg_fcpt->cos,
  605. job->request_payload.payload_len,
  606. &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
  607. job->reply_payload.payload_len, bsg_fcpt->tsecs);
  608. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  609. return BFA_STATUS_OK;
  610. }
  611. int
  612. bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
  613. {
  614. struct bfa_bsg_data *bsg_data;
  615. struct bfad_im_port_s *im_port =
  616. (struct bfad_im_port_s *) job->shost->hostdata[0];
  617. struct bfad_s *bfad = im_port->bfad;
  618. bfa_bsg_fcpt_t *bsg_fcpt;
  619. struct bfad_fcxp *drv_fcxp;
  620. struct bfa_fcs_lport_s *fcs_port;
  621. struct bfa_fcs_rport_s *fcs_rport;
  622. uint32_t command_type = job->request->msgcode;
  623. unsigned long flags;
  624. struct bfad_buf_info *rsp_buf_info;
  625. void *req_kbuf = NULL, *rsp_kbuf = NULL;
  626. int rc = -EINVAL;
  627. job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
  628. job->reply->reply_payload_rcv_len = 0;
  629. /* Get the payload passed in from userspace */
  630. bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
  631. sizeof(struct fc_bsg_request));
  632. if (bsg_data == NULL)
  633. goto out;
  634. /*
  635. * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
  636. * buffer of size bsg_data->payload_len
  637. */
  638. bsg_fcpt = (struct bfa_bsg_fcpt_s *)
  639. kzalloc(bsg_data->payload_len, GFP_KERNEL);
  640. if (!bsg_fcpt)
  641. goto out;
  642. if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
  643. bsg_data->payload_len)) {
  644. kfree(bsg_fcpt);
  645. goto out;
  646. }
  647. drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
  648. if (drv_fcxp == NULL) {
  649. rc = -ENOMEM;
  650. goto out;
  651. }
  652. spin_lock_irqsave(&bfad->bfad_lock, flags);
  653. fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
  654. bsg_fcpt->lpwwn);
  655. if (fcs_port == NULL) {
  656. bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
  657. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  658. goto out_free_mem;
  659. }
  660. /* Check if the port is online before sending FC Passthru cmd */
  661. if (!bfa_fcs_lport_is_online(fcs_port)) {
  662. bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
  663. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  664. goto out_free_mem;
  665. }
  666. drv_fcxp->port = fcs_port->bfad_port;
  667. if (drv_fcxp->port->bfad == 0)
  668. drv_fcxp->port->bfad = bfad;
  669. /* Fetch the bfa_rport - if nexus needed */
  670. if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
  671. command_type == FC_BSG_HST_CT) {
  672. /* BSG HST commands: no nexus needed */
  673. drv_fcxp->bfa_rport = NULL;
  674. } else if (command_type == FC_BSG_RPT_ELS ||
  675. command_type == FC_BSG_RPT_CT) {
  676. /* BSG RPT commands: nexus needed */
  677. fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
  678. bsg_fcpt->dpwwn);
  679. if (fcs_rport == NULL) {
  680. bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
  681. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  682. goto out_free_mem;
  683. }
  684. drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
  685. } else { /* Unknown BSG msgcode; return -EINVAL */
  686. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  687. goto out_free_mem;
  688. }
  689. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  690. /* allocate memory for req / rsp buffers */
  691. req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
  692. if (!req_kbuf) {
  693. printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
  694. bfad->pci_name);
  695. rc = -ENOMEM;
  696. goto out_free_mem;
  697. }
  698. rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
  699. if (!rsp_kbuf) {
  700. printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
  701. bfad->pci_name);
  702. rc = -ENOMEM;
  703. goto out_free_mem;
  704. }
  705. /* map req sg - copy the sg_list passed in to the linear buffer */
  706. sg_copy_to_buffer(job->request_payload.sg_list,
  707. job->request_payload.sg_cnt, req_kbuf,
  708. job->request_payload.payload_len);
  709. drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
  710. job->request_payload.payload_len,
  711. &drv_fcxp->num_req_sgles);
  712. if (!drv_fcxp->reqbuf_info) {
  713. printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
  714. bfad->pci_name);
  715. rc = -ENOMEM;
  716. goto out_free_mem;
  717. }
  718. drv_fcxp->req_sge = (struct bfa_sge_s *)
  719. (((uint8_t *)drv_fcxp->reqbuf_info) +
  720. (sizeof(struct bfad_buf_info) *
  721. drv_fcxp->num_req_sgles));
  722. /* map rsp sg */
  723. drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
  724. job->reply_payload.payload_len,
  725. &drv_fcxp->num_rsp_sgles);
  726. if (!drv_fcxp->rspbuf_info) {
  727. printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
  728. bfad->pci_name);
  729. rc = -ENOMEM;
  730. goto out_free_mem;
  731. }
  732. rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
  733. drv_fcxp->rsp_sge = (struct bfa_sge_s *)
  734. (((uint8_t *)drv_fcxp->rspbuf_info) +
  735. (sizeof(struct bfad_buf_info) *
  736. drv_fcxp->num_rsp_sgles));
  737. /* fcxp send */
  738. init_completion(&drv_fcxp->comp);
  739. rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
  740. if (rc == BFA_STATUS_OK) {
  741. wait_for_completion(&drv_fcxp->comp);
  742. bsg_fcpt->status = drv_fcxp->req_status;
  743. } else {
  744. bsg_fcpt->status = rc;
  745. goto out_free_mem;
  746. }
  747. /* fill the job->reply data */
  748. if (drv_fcxp->req_status == BFA_STATUS_OK) {
  749. job->reply_len = drv_fcxp->rsp_len;
  750. job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
  751. job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
  752. } else {
  753. job->reply->reply_payload_rcv_len =
  754. sizeof(struct fc_bsg_ctels_reply);
  755. job->reply_len = sizeof(uint32_t);
  756. job->reply->reply_data.ctels_reply.status =
  757. FC_CTELS_STATUS_REJECT;
  758. }
  759. /* Copy the response data to the reply_payload sg list */
  760. sg_copy_from_buffer(job->reply_payload.sg_list,
  761. job->reply_payload.sg_cnt,
  762. (uint8_t *)rsp_buf_info->virt,
  763. job->reply_payload.payload_len);
  764. out_free_mem:
  765. bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
  766. drv_fcxp->num_rsp_sgles);
  767. bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
  768. drv_fcxp->num_req_sgles);
  769. kfree(req_kbuf);
  770. kfree(rsp_kbuf);
  771. /* Need a copy to user op */
  772. if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
  773. bsg_data->payload_len))
  774. rc = -EIO;
  775. kfree(bsg_fcpt);
  776. kfree(drv_fcxp);
  777. out:
  778. job->reply->result = rc;
  779. if (rc == BFA_STATUS_OK)
  780. job->job_done(job);
  781. return rc;
  782. }
  783. int
  784. bfad_im_bsg_request(struct fc_bsg_job *job)
  785. {
  786. uint32_t rc = BFA_STATUS_OK;
  787. /* Increment the bfa module refcnt - if bsg request is in service */
  788. bfad_im_bsg_get_kobject(job);
  789. switch (job->request->msgcode) {
  790. case FC_BSG_HST_VENDOR:
  791. /* Process BSG HST Vendor requests */
  792. rc = bfad_im_bsg_vendor_request(job);
  793. break;
  794. case FC_BSG_HST_ELS_NOLOGIN:
  795. case FC_BSG_RPT_ELS:
  796. case FC_BSG_HST_CT:
  797. case FC_BSG_RPT_CT:
  798. /* Process BSG ELS/CT commands */
  799. rc = bfad_im_bsg_els_ct_request(job);
  800. break;
  801. default:
  802. job->reply->result = rc = -EINVAL;
  803. job->reply->reply_payload_rcv_len = 0;
  804. break;
  805. }
  806. /* Decrement the bfa module refcnt - on completion of bsg request */
  807. bfad_im_bsg_put_kobject(job);
  808. return rc;
  809. }
  810. int
  811. bfad_im_bsg_timeout(struct fc_bsg_job *job)
  812. {
  813. /* Don't complete the BSG job request - return -EAGAIN
  814. * to reset bsg job timeout : for ELS/CT pass thru we
  815. * already have timer to track the request.
  816. */
  817. return -EAGAIN;
  818. }