bfad_im.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225
  1. /*
  2. * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
  3. * All rights reserved
  4. * www.brocade.com
  5. *
  6. * Linux driver for Brocade Fibre Channel Host Bus Adapter.
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms of the GNU General Public License (GPL) Version 2 as
  10. * published by the Free Software Foundation
  11. *
  12. * This program is distributed in the hope that it will be useful, but
  13. * WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * General Public License for more details.
  16. */
  17. /**
  18. * bfad_im.c Linux driver IM module.
  19. */
  20. #include "bfad_drv.h"
  21. #include "bfad_im.h"
  22. #include "bfad_trcmod.h"
  23. #include "bfa_cb_ioim_macros.h"
  24. #include <fcb/bfa_fcb_fcpim.h>
  25. BFA_TRC_FILE(LDRV, IM);
  26. DEFINE_IDR(bfad_im_port_index);
  27. struct scsi_transport_template *bfad_im_scsi_transport_template;
  28. static void bfad_im_itnim_work_handler(struct work_struct *work);
  29. static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
  30. void (*done)(struct scsi_cmnd *));
  31. static int bfad_im_slave_alloc(struct scsi_device *sdev);
  32. void
  33. bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
  34. enum bfi_ioim_status io_status, u8 scsi_status,
  35. int sns_len, u8 *sns_info, s32 residue)
  36. {
  37. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
  38. struct bfad_s *bfad = drv;
  39. struct bfad_itnim_data_s *itnim_data;
  40. struct bfad_itnim_s *itnim;
  41. switch (io_status) {
  42. case BFI_IOIM_STS_OK:
  43. bfa_trc(bfad, scsi_status);
  44. cmnd->result = ScsiResult(DID_OK, scsi_status);
  45. scsi_set_resid(cmnd, 0);
  46. if (sns_len > 0) {
  47. bfa_trc(bfad, sns_len);
  48. if (sns_len > SCSI_SENSE_BUFFERSIZE)
  49. sns_len = SCSI_SENSE_BUFFERSIZE;
  50. memcpy(cmnd->sense_buffer, sns_info, sns_len);
  51. }
  52. if (residue > 0)
  53. scsi_set_resid(cmnd, residue);
  54. break;
  55. case BFI_IOIM_STS_ABORTED:
  56. case BFI_IOIM_STS_TIMEDOUT:
  57. case BFI_IOIM_STS_PATHTOV:
  58. default:
  59. cmnd->result = ScsiResult(DID_ERROR, 0);
  60. }
  61. /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
  62. if (cmnd->device->host != NULL)
  63. scsi_dma_unmap(cmnd);
  64. cmnd->host_scribble = NULL;
  65. bfa_trc(bfad, cmnd->result);
  66. itnim_data = cmnd->device->hostdata;
  67. if (itnim_data) {
  68. itnim = itnim_data->itnim;
  69. if (!cmnd->result && itnim &&
  70. (bfa_lun_queue_depth > cmnd->device->queue_depth)) {
  71. /* Queue depth adjustment for good status completion */
  72. bfad_os_ramp_up_qdepth(itnim, cmnd->device);
  73. } else if (cmnd->result == SAM_STAT_TASK_SET_FULL && itnim) {
  74. /* qfull handling */
  75. bfad_os_handle_qfull(itnim, cmnd->device);
  76. }
  77. }
  78. cmnd->scsi_done(cmnd);
  79. }
  80. void
  81. bfa_cb_ioim_good_comp(void *drv, struct bfad_ioim_s *dio)
  82. {
  83. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
  84. struct bfad_itnim_data_s *itnim_data;
  85. struct bfad_itnim_s *itnim;
  86. cmnd->result = ScsiResult(DID_OK, SCSI_STATUS_GOOD);
  87. /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
  88. if (cmnd->device->host != NULL)
  89. scsi_dma_unmap(cmnd);
  90. cmnd->host_scribble = NULL;
  91. /* Queue depth adjustment */
  92. if (bfa_lun_queue_depth > cmnd->device->queue_depth) {
  93. itnim_data = cmnd->device->hostdata;
  94. if (itnim_data) {
  95. itnim = itnim_data->itnim;
  96. if (itnim)
  97. bfad_os_ramp_up_qdepth(itnim, cmnd->device);
  98. }
  99. }
  100. cmnd->scsi_done(cmnd);
  101. }
  102. void
  103. bfa_cb_ioim_abort(void *drv, struct bfad_ioim_s *dio)
  104. {
  105. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
  106. struct bfad_s *bfad = drv;
  107. cmnd->result = ScsiResult(DID_ERROR, 0);
  108. /* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
  109. if (cmnd->device->host != NULL)
  110. scsi_dma_unmap(cmnd);
  111. bfa_trc(bfad, cmnd->result);
  112. cmnd->host_scribble = NULL;
  113. }
  114. void
  115. bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
  116. enum bfi_tskim_status tsk_status)
  117. {
  118. struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dtsk;
  119. wait_queue_head_t *wq;
  120. cmnd->SCp.Status |= tsk_status << 1;
  121. set_bit(IO_DONE_BIT, (unsigned long *)&cmnd->SCp.Status);
  122. wq = (wait_queue_head_t *) cmnd->SCp.ptr;
  123. cmnd->SCp.ptr = NULL;
  124. if (wq)
  125. wake_up(wq);
  126. }
  127. void
  128. bfa_cb_ioim_resfree(void *drv)
  129. {
  130. }
  131. /**
  132. * Scsi_Host_template SCSI host template
  133. */
  134. /**
  135. * Scsi_Host template entry, returns BFAD PCI info.
  136. */
  137. static const char *
  138. bfad_im_info(struct Scsi_Host *shost)
  139. {
  140. static char bfa_buf[256];
  141. struct bfad_im_port_s *im_port =
  142. (struct bfad_im_port_s *) shost->hostdata[0];
  143. struct bfad_s *bfad = im_port->bfad;
  144. char model[BFA_ADAPTER_MODEL_NAME_LEN];
  145. bfa_get_adapter_model(&bfad->bfa, model);
  146. memset(bfa_buf, 0, sizeof(bfa_buf));
  147. snprintf(bfa_buf, sizeof(bfa_buf),
  148. "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
  149. model, bfad->pci_name, BFAD_DRIVER_VERSION);
  150. return bfa_buf;
  151. }
  152. /**
  153. * Scsi_Host template entry, aborts the specified SCSI command.
  154. *
  155. * Returns: SUCCESS or FAILED.
  156. */
  157. static int
  158. bfad_im_abort_handler(struct scsi_cmnd *cmnd)
  159. {
  160. struct Scsi_Host *shost = cmnd->device->host;
  161. struct bfad_im_port_s *im_port =
  162. (struct bfad_im_port_s *) shost->hostdata[0];
  163. struct bfad_s *bfad = im_port->bfad;
  164. struct bfa_ioim_s *hal_io;
  165. unsigned long flags;
  166. u32 timeout;
  167. int rc = FAILED;
  168. spin_lock_irqsave(&bfad->bfad_lock, flags);
  169. hal_io = (struct bfa_ioim_s *) cmnd->host_scribble;
  170. if (!hal_io) {
  171. /* IO has been completed, retrun success */
  172. rc = SUCCESS;
  173. goto out;
  174. }
  175. if (hal_io->dio != (struct bfad_ioim_s *) cmnd) {
  176. rc = FAILED;
  177. goto out;
  178. }
  179. bfa_trc(bfad, hal_io->iotag);
  180. bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT,
  181. im_port->shost->host_no, cmnd, hal_io->iotag);
  182. bfa_ioim_abort(hal_io);
  183. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  184. /* Need to wait until the command get aborted */
  185. timeout = 10;
  186. while ((struct bfa_ioim_s *) cmnd->host_scribble == hal_io) {
  187. set_current_state(TASK_UNINTERRUPTIBLE);
  188. schedule_timeout(timeout);
  189. if (timeout < 4 * HZ)
  190. timeout *= 2;
  191. }
  192. cmnd->scsi_done(cmnd);
  193. bfa_trc(bfad, hal_io->iotag);
  194. bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP,
  195. im_port->shost->host_no, cmnd, hal_io->iotag);
  196. return SUCCESS;
  197. out:
  198. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  199. return rc;
  200. }
  201. static bfa_status_t
  202. bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
  203. struct bfad_itnim_s *itnim)
  204. {
  205. struct bfa_tskim_s *tskim;
  206. struct bfa_itnim_s *bfa_itnim;
  207. bfa_status_t rc = BFA_STATUS_OK;
  208. bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
  209. tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
  210. if (!tskim) {
  211. BFA_DEV_PRINTF(bfad, BFA_ERR,
  212. "target reset, fail to allocate tskim\n");
  213. rc = BFA_STATUS_FAILED;
  214. goto out;
  215. }
  216. /*
  217. * Set host_scribble to NULL to avoid aborting a task command if
  218. * happens.
  219. */
  220. cmnd->host_scribble = NULL;
  221. cmnd->SCp.Status = 0;
  222. bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
  223. bfa_tskim_start(tskim, bfa_itnim, (lun_t)0,
  224. FCP_TM_TARGET_RESET, BFAD_TARGET_RESET_TMO);
  225. out:
  226. return rc;
  227. }
  228. /**
  229. * Scsi_Host template entry, resets a LUN and abort its all commands.
  230. *
  231. * Returns: SUCCESS or FAILED.
  232. *
  233. */
  234. static int
  235. bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
  236. {
  237. struct Scsi_Host *shost = cmnd->device->host;
  238. struct bfad_im_port_s *im_port =
  239. (struct bfad_im_port_s *) shost->hostdata[0];
  240. struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
  241. struct bfad_s *bfad = im_port->bfad;
  242. struct bfa_tskim_s *tskim;
  243. struct bfad_itnim_s *itnim;
  244. struct bfa_itnim_s *bfa_itnim;
  245. DECLARE_WAIT_QUEUE_HEAD(wq);
  246. int rc = SUCCESS;
  247. unsigned long flags;
  248. enum bfi_tskim_status task_status;
  249. spin_lock_irqsave(&bfad->bfad_lock, flags);
  250. itnim = itnim_data->itnim;
  251. if (!itnim) {
  252. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  253. rc = FAILED;
  254. goto out;
  255. }
  256. tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
  257. if (!tskim) {
  258. BFA_DEV_PRINTF(bfad, BFA_ERR,
  259. "LUN reset, fail to allocate tskim");
  260. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  261. rc = FAILED;
  262. goto out;
  263. }
  264. /**
  265. * Set host_scribble to NULL to avoid aborting a task command
  266. * if happens.
  267. */
  268. cmnd->host_scribble = NULL;
  269. cmnd->SCp.ptr = (char *)&wq;
  270. cmnd->SCp.Status = 0;
  271. bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim->fcs_itnim);
  272. bfa_tskim_start(tskim, bfa_itnim,
  273. bfad_int_to_lun(cmnd->device->lun),
  274. FCP_TM_LUN_RESET, BFAD_LUN_RESET_TMO);
  275. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  276. wait_event(wq, test_bit(IO_DONE_BIT,
  277. (unsigned long *)&cmnd->SCp.Status));
  278. task_status = cmnd->SCp.Status >> 1;
  279. if (task_status != BFI_TSKIM_STS_OK) {
  280. BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n",
  281. task_status);
  282. rc = FAILED;
  283. }
  284. out:
  285. return rc;
  286. }
  287. /**
  288. * Scsi_Host template entry, resets the bus and abort all commands.
  289. */
  290. static int
  291. bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
  292. {
  293. struct Scsi_Host *shost = cmnd->device->host;
  294. struct bfad_im_port_s *im_port =
  295. (struct bfad_im_port_s *) shost->hostdata[0];
  296. struct bfad_s *bfad = im_port->bfad;
  297. struct bfad_itnim_s *itnim;
  298. unsigned long flags;
  299. u32 i, rc, err_cnt = 0;
  300. DECLARE_WAIT_QUEUE_HEAD(wq);
  301. enum bfi_tskim_status task_status;
  302. spin_lock_irqsave(&bfad->bfad_lock, flags);
  303. for (i = 0; i < MAX_FCP_TARGET; i++) {
  304. itnim = bfad_os_get_itnim(im_port, i);
  305. if (itnim) {
  306. cmnd->SCp.ptr = (char *)&wq;
  307. rc = bfad_im_target_reset_send(bfad, cmnd, itnim);
  308. if (rc != BFA_STATUS_OK) {
  309. err_cnt++;
  310. continue;
  311. }
  312. /* wait target reset to complete */
  313. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  314. wait_event(wq, test_bit(IO_DONE_BIT,
  315. (unsigned long *)&cmnd->SCp.Status));
  316. spin_lock_irqsave(&bfad->bfad_lock, flags);
  317. task_status = cmnd->SCp.Status >> 1;
  318. if (task_status != BFI_TSKIM_STS_OK) {
  319. BFA_DEV_PRINTF(bfad, BFA_ERR,
  320. "target reset failure,"
  321. " status: %d\n", task_status);
  322. err_cnt++;
  323. }
  324. }
  325. }
  326. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  327. if (err_cnt)
  328. return FAILED;
  329. return SUCCESS;
  330. }
  331. /**
  332. * Scsi_Host template entry slave_destroy.
  333. */
  334. static void
  335. bfad_im_slave_destroy(struct scsi_device *sdev)
  336. {
  337. sdev->hostdata = NULL;
  338. return;
  339. }
  340. /**
  341. * BFA FCS itnim callbacks
  342. */
  343. /**
  344. * BFA FCS itnim alloc callback, after successful PRLI
  345. * Context: Interrupt
  346. */
  347. void
  348. bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
  349. struct bfad_itnim_s **itnim_drv)
  350. {
  351. *itnim_drv = kzalloc(sizeof(struct bfad_itnim_s), GFP_ATOMIC);
  352. if (*itnim_drv == NULL)
  353. return;
  354. (*itnim_drv)->im = bfad->im;
  355. *itnim = &(*itnim_drv)->fcs_itnim;
  356. (*itnim_drv)->state = ITNIM_STATE_NONE;
  357. /*
  358. * Initiaze the itnim_work
  359. */
  360. INIT_WORK(&(*itnim_drv)->itnim_work, bfad_im_itnim_work_handler);
  361. bfad->bfad_flags |= BFAD_RPORT_ONLINE;
  362. }
  363. /**
  364. * BFA FCS itnim free callback.
  365. * Context: Interrupt. bfad_lock is held
  366. */
  367. void
  368. bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
  369. {
  370. struct bfad_port_s *port;
  371. wwn_t wwpn;
  372. u32 fcid;
  373. char wwpn_str[32], fcid_str[16];
  374. /* online to free state transtion should not happen */
  375. bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
  376. itnim_drv->queue_work = 1;
  377. /* offline request is not yet done, use the same request to free */
  378. if (itnim_drv->state == ITNIM_STATE_OFFLINE_PENDING)
  379. itnim_drv->queue_work = 0;
  380. itnim_drv->state = ITNIM_STATE_FREE;
  381. port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
  382. itnim_drv->im_port = port->im_port;
  383. wwpn = bfa_fcs_itnim_get_pwwn(&itnim_drv->fcs_itnim);
  384. fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
  385. wwn2str(wwpn_str, wwpn);
  386. fcid2str(fcid_str, fcid);
  387. bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE,
  388. port->im_port->shost->host_no,
  389. fcid_str, wwpn_str);
  390. bfad_os_itnim_process(itnim_drv);
  391. }
  392. /**
  393. * BFA FCS itnim online callback.
  394. * Context: Interrupt. bfad_lock is held
  395. */
  396. void
  397. bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
  398. {
  399. struct bfad_port_s *port;
  400. itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
  401. port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
  402. itnim_drv->state = ITNIM_STATE_ONLINE;
  403. itnim_drv->queue_work = 1;
  404. itnim_drv->im_port = port->im_port;
  405. bfad_os_itnim_process(itnim_drv);
  406. }
  407. /**
  408. * BFA FCS itnim offline callback.
  409. * Context: Interrupt. bfad_lock is held
  410. */
  411. void
  412. bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
  413. {
  414. struct bfad_port_s *port;
  415. struct bfad_s *bfad;
  416. port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
  417. bfad = port->bfad;
  418. if ((bfad->pport.flags & BFAD_PORT_DELETE) ||
  419. (port->flags & BFAD_PORT_DELETE)) {
  420. itnim_drv->state = ITNIM_STATE_OFFLINE;
  421. return;
  422. }
  423. itnim_drv->im_port = port->im_port;
  424. itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
  425. itnim_drv->queue_work = 1;
  426. bfad_os_itnim_process(itnim_drv);
  427. }
  428. /**
  429. * BFA FCS itnim timeout callback.
  430. * Context: Interrupt. bfad_lock is held
  431. */
  432. void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
  433. {
  434. itnim->state = ITNIM_STATE_TIMEOUT;
  435. }
  436. /**
  437. * Path TOV processing begin notification -- dummy for linux
  438. */
  439. void
  440. bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim)
  441. {
  442. }
  443. /**
  444. * Allocate a Scsi_Host for a port.
  445. */
  446. int
  447. bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
  448. {
  449. int error = 1;
  450. if (!idr_pre_get(&bfad_im_port_index, GFP_KERNEL)) {
  451. printk(KERN_WARNING "idr_pre_get failure\n");
  452. goto out;
  453. }
  454. error = idr_get_new(&bfad_im_port_index, im_port,
  455. &im_port->idr_id);
  456. if (error) {
  457. printk(KERN_WARNING "idr_get_new failure\n");
  458. goto out;
  459. }
  460. im_port->shost = bfad_os_scsi_host_alloc(im_port, bfad);
  461. if (!im_port->shost) {
  462. error = 1;
  463. goto out_free_idr;
  464. }
  465. im_port->shost->hostdata[0] = (unsigned long)im_port;
  466. im_port->shost->unique_id = im_port->idr_id;
  467. im_port->shost->this_id = -1;
  468. im_port->shost->max_id = MAX_FCP_TARGET;
  469. im_port->shost->max_lun = MAX_FCP_LUN;
  470. im_port->shost->max_cmd_len = 16;
  471. im_port->shost->can_queue = bfad->cfg_data.ioc_queue_depth;
  472. im_port->shost->transportt = bfad_im_scsi_transport_template;
  473. error = bfad_os_scsi_add_host(im_port->shost, im_port, bfad);
  474. if (error) {
  475. printk(KERN_WARNING "bfad_os_scsi_add_host failure %d\n",
  476. error);
  477. goto out_fc_rel;
  478. }
  479. /* setup host fixed attribute if the lk supports */
  480. bfad_os_fc_host_init(im_port);
  481. return 0;
  482. out_fc_rel:
  483. scsi_host_put(im_port->shost);
  484. out_free_idr:
  485. idr_remove(&bfad_im_port_index, im_port->idr_id);
  486. out:
  487. return error;
  488. }
  489. void
  490. bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
  491. {
  492. unsigned long flags;
  493. bfa_trc(bfad, bfad->inst_no);
  494. bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
  495. im_port->shost->host_no);
  496. fc_remove_host(im_port->shost);
  497. scsi_remove_host(im_port->shost);
  498. scsi_host_put(im_port->shost);
  499. spin_lock_irqsave(&bfad->bfad_lock, flags);
  500. idr_remove(&bfad_im_port_index, im_port->idr_id);
  501. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  502. }
  503. static void
  504. bfad_im_port_delete_handler(struct work_struct *work)
  505. {
  506. struct bfad_im_port_s *im_port =
  507. container_of(work, struct bfad_im_port_s, port_delete_work);
  508. bfad_im_scsi_host_free(im_port->bfad, im_port);
  509. bfad_im_port_clean(im_port);
  510. kfree(im_port);
  511. }
  512. bfa_status_t
  513. bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port)
  514. {
  515. int rc = BFA_STATUS_OK;
  516. struct bfad_im_port_s *im_port;
  517. im_port = kzalloc(sizeof(struct bfad_im_port_s), GFP_ATOMIC);
  518. if (im_port == NULL) {
  519. rc = BFA_STATUS_ENOMEM;
  520. goto ext;
  521. }
  522. port->im_port = im_port;
  523. im_port->port = port;
  524. im_port->bfad = bfad;
  525. INIT_WORK(&im_port->port_delete_work, bfad_im_port_delete_handler);
  526. INIT_LIST_HEAD(&im_port->itnim_mapped_list);
  527. INIT_LIST_HEAD(&im_port->binding_list);
  528. ext:
  529. return rc;
  530. }
  531. void
  532. bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
  533. {
  534. struct bfad_im_port_s *im_port = port->im_port;
  535. queue_work(bfad->im->drv_workq,
  536. &im_port->port_delete_work);
  537. }
  538. void
  539. bfad_im_port_clean(struct bfad_im_port_s *im_port)
  540. {
  541. struct bfad_fcp_binding *bp, *bp_new;
  542. unsigned long flags;
  543. struct bfad_s *bfad = im_port->bfad;
  544. spin_lock_irqsave(&bfad->bfad_lock, flags);
  545. list_for_each_entry_safe(bp, bp_new, &im_port->binding_list,
  546. list_entry) {
  547. list_del(&bp->list_entry);
  548. kfree(bp);
  549. }
  550. /* the itnim_mapped_list must be empty at this time */
  551. bfa_assert(list_empty(&im_port->itnim_mapped_list));
  552. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  553. }
  554. void
  555. bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port)
  556. {
  557. }
  558. void
  559. bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port)
  560. {
  561. }
  562. bfa_status_t
  563. bfad_im_probe(struct bfad_s *bfad)
  564. {
  565. struct bfad_im_s *im;
  566. bfa_status_t rc = BFA_STATUS_OK;
  567. im = kzalloc(sizeof(struct bfad_im_s), GFP_KERNEL);
  568. if (im == NULL) {
  569. rc = BFA_STATUS_ENOMEM;
  570. goto ext;
  571. }
  572. bfad->im = im;
  573. im->bfad = bfad;
  574. if (bfad_os_thread_workq(bfad) != BFA_STATUS_OK) {
  575. kfree(im);
  576. rc = BFA_STATUS_FAILED;
  577. }
  578. ext:
  579. return rc;
  580. }
  581. void
  582. bfad_im_probe_undo(struct bfad_s *bfad)
  583. {
  584. if (bfad->im) {
  585. bfad_os_destroy_workq(bfad->im);
  586. kfree(bfad->im);
  587. bfad->im = NULL;
  588. }
  589. }
  590. int
  591. bfad_os_scsi_add_host(struct Scsi_Host *shost, struct bfad_im_port_s *im_port,
  592. struct bfad_s *bfad)
  593. {
  594. struct device *dev;
  595. if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
  596. dev = &bfad->pcidev->dev;
  597. else
  598. dev = &bfad->pport.im_port->shost->shost_gendev;
  599. return scsi_add_host(shost, dev);
  600. }
  601. struct Scsi_Host *
  602. bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
  603. {
  604. struct scsi_host_template *sht;
  605. if (im_port->port->pvb_type == BFAD_PORT_PHYS_BASE)
  606. sht = &bfad_im_scsi_host_template;
  607. else
  608. sht = &bfad_im_vport_template;
  609. sht->sg_tablesize = bfad->cfg_data.io_max_sge;
  610. return scsi_host_alloc(sht, sizeof(unsigned long));
  611. }
  612. void
  613. bfad_os_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
  614. {
  615. flush_workqueue(bfad->im->drv_workq);
  616. bfad_im_scsi_host_free(im_port->bfad, im_port);
  617. bfad_im_port_clean(im_port);
  618. kfree(im_port);
  619. }
  620. void
  621. bfad_os_destroy_workq(struct bfad_im_s *im)
  622. {
  623. if (im && im->drv_workq) {
  624. destroy_workqueue(im->drv_workq);
  625. im->drv_workq = NULL;
  626. }
  627. }
  628. bfa_status_t
  629. bfad_os_thread_workq(struct bfad_s *bfad)
  630. {
  631. struct bfad_im_s *im = bfad->im;
  632. bfa_trc(bfad, 0);
  633. snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d",
  634. bfad->inst_no);
  635. im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
  636. if (!im->drv_workq)
  637. return BFA_STATUS_FAILED;
  638. return BFA_STATUS_OK;
  639. }
  640. /**
  641. * Scsi_Host template entry.
  642. *
  643. * Description:
  644. * OS entry point to adjust the queue_depths on a per-device basis.
  645. * Called once per device during the bus scan.
  646. * Return non-zero if fails.
  647. */
  648. static int
  649. bfad_im_slave_configure(struct scsi_device *sdev)
  650. {
  651. if (sdev->tagged_supported)
  652. scsi_activate_tcq(sdev, bfa_lun_queue_depth);
  653. else
  654. scsi_deactivate_tcq(sdev, bfa_lun_queue_depth);
  655. return 0;
  656. }
  657. struct scsi_host_template bfad_im_scsi_host_template = {
  658. .module = THIS_MODULE,
  659. .name = BFAD_DRIVER_NAME,
  660. .info = bfad_im_info,
  661. .queuecommand = bfad_im_queuecommand,
  662. .eh_abort_handler = bfad_im_abort_handler,
  663. .eh_device_reset_handler = bfad_im_reset_lun_handler,
  664. .eh_bus_reset_handler = bfad_im_reset_bus_handler,
  665. .slave_alloc = bfad_im_slave_alloc,
  666. .slave_configure = bfad_im_slave_configure,
  667. .slave_destroy = bfad_im_slave_destroy,
  668. .this_id = -1,
  669. .sg_tablesize = BFAD_IO_MAX_SGE,
  670. .cmd_per_lun = 3,
  671. .use_clustering = ENABLE_CLUSTERING,
  672. .shost_attrs = bfad_im_host_attrs,
  673. .max_sectors = 0xFFFF,
  674. };
  675. struct scsi_host_template bfad_im_vport_template = {
  676. .module = THIS_MODULE,
  677. .name = BFAD_DRIVER_NAME,
  678. .info = bfad_im_info,
  679. .queuecommand = bfad_im_queuecommand,
  680. .eh_abort_handler = bfad_im_abort_handler,
  681. .eh_device_reset_handler = bfad_im_reset_lun_handler,
  682. .eh_bus_reset_handler = bfad_im_reset_bus_handler,
  683. .slave_alloc = bfad_im_slave_alloc,
  684. .slave_configure = bfad_im_slave_configure,
  685. .slave_destroy = bfad_im_slave_destroy,
  686. .this_id = -1,
  687. .sg_tablesize = BFAD_IO_MAX_SGE,
  688. .cmd_per_lun = 3,
  689. .use_clustering = ENABLE_CLUSTERING,
  690. .shost_attrs = bfad_im_vport_attrs,
  691. .max_sectors = 0xFFFF,
  692. };
  693. void
  694. bfad_im_probe_post(struct bfad_im_s *im)
  695. {
  696. flush_workqueue(im->drv_workq);
  697. }
  698. bfa_status_t
  699. bfad_im_module_init(void)
  700. {
  701. bfad_im_scsi_transport_template =
  702. fc_attach_transport(&bfad_im_fc_function_template);
  703. if (!bfad_im_scsi_transport_template)
  704. return BFA_STATUS_ENOMEM;
  705. return BFA_STATUS_OK;
  706. }
  707. void
  708. bfad_im_module_exit(void)
  709. {
  710. if (bfad_im_scsi_transport_template)
  711. fc_release_transport(bfad_im_scsi_transport_template);
  712. }
  713. void
  714. bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv)
  715. {
  716. struct bfad_im_s *im = itnim_drv->im;
  717. if (itnim_drv->queue_work)
  718. queue_work(im->drv_workq, &itnim_drv->itnim_work);
  719. }
  720. void
  721. bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
  722. {
  723. struct scsi_device *tmp_sdev;
  724. if (((jiffies - itnim->last_ramp_up_time) >
  725. BFA_QUEUE_FULL_RAMP_UP_TIME * HZ) &&
  726. ((jiffies - itnim->last_queue_full_time) >
  727. BFA_QUEUE_FULL_RAMP_UP_TIME * HZ)) {
  728. shost_for_each_device(tmp_sdev, sdev->host) {
  729. if (bfa_lun_queue_depth > tmp_sdev->queue_depth) {
  730. if (tmp_sdev->id != sdev->id)
  731. continue;
  732. if (tmp_sdev->ordered_tags)
  733. scsi_adjust_queue_depth(tmp_sdev,
  734. MSG_ORDERED_TAG,
  735. tmp_sdev->queue_depth + 1);
  736. else
  737. scsi_adjust_queue_depth(tmp_sdev,
  738. MSG_SIMPLE_TAG,
  739. tmp_sdev->queue_depth + 1);
  740. itnim->last_ramp_up_time = jiffies;
  741. }
  742. }
  743. }
  744. }
  745. void
  746. bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
  747. {
  748. struct scsi_device *tmp_sdev;
  749. itnim->last_queue_full_time = jiffies;
  750. shost_for_each_device(tmp_sdev, sdev->host) {
  751. if (tmp_sdev->id != sdev->id)
  752. continue;
  753. scsi_track_queue_full(tmp_sdev, tmp_sdev->queue_depth - 1);
  754. }
  755. }
  756. struct bfad_itnim_s *
  757. bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
  758. {
  759. struct bfad_itnim_s *itnim = NULL;
  760. /* Search the mapped list for this target ID */
  761. list_for_each_entry(itnim, &im_port->itnim_mapped_list, list_entry) {
  762. if (id == itnim->scsi_tgt_id)
  763. return itnim;
  764. }
  765. return NULL;
  766. }
  767. /**
  768. * Scsi_Host template entry slave_alloc
  769. */
  770. static int
  771. bfad_im_slave_alloc(struct scsi_device *sdev)
  772. {
  773. struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
  774. if (!rport || fc_remote_port_chkready(rport))
  775. return -ENXIO;
  776. sdev->hostdata = rport->dd_data;
  777. return 0;
  778. }
  779. void
  780. bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
  781. {
  782. struct Scsi_Host *host = im_port->shost;
  783. struct bfad_s *bfad = im_port->bfad;
  784. struct bfad_port_s *port = im_port->port;
  785. struct bfa_pport_attr_s pattr;
  786. char model[BFA_ADAPTER_MODEL_NAME_LEN];
  787. char fw_ver[BFA_VERSION_LEN];
  788. fc_host_node_name(host) =
  789. bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
  790. fc_host_port_name(host) =
  791. bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
  792. fc_host_supported_classes(host) = FC_COS_CLASS3;
  793. memset(fc_host_supported_fc4s(host), 0,
  794. sizeof(fc_host_supported_fc4s(host)));
  795. if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
  796. /* For FCP type 0x08 */
  797. fc_host_supported_fc4s(host)[2] = 1;
  798. if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
  799. /* For LLC/SNAP type 0x05 */
  800. fc_host_supported_fc4s(host)[3] = 0x20;
  801. /* For fibre channel services type 0x20 */
  802. fc_host_supported_fc4s(host)[7] = 1;
  803. bfa_get_adapter_model(&bfad->bfa, model);
  804. bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
  805. sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
  806. model, fw_ver, BFAD_DRIVER_VERSION);
  807. fc_host_supported_speeds(host) = 0;
  808. fc_host_supported_speeds(host) |=
  809. FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
  810. FC_PORTSPEED_1GBIT;
  811. bfa_fcport_get_attr(&bfad->bfa, &pattr);
  812. fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
  813. }
  814. static void
  815. bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
  816. {
  817. struct fc_rport_identifiers rport_ids;
  818. struct fc_rport *fc_rport;
  819. struct bfad_itnim_data_s *itnim_data;
  820. rport_ids.node_name =
  821. bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
  822. rport_ids.port_name =
  823. bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
  824. rport_ids.port_id =
  825. bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
  826. rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
  827. itnim->fc_rport = fc_rport =
  828. fc_remote_port_add(im_port->shost, 0, &rport_ids);
  829. if (!fc_rport)
  830. return;
  831. fc_rport->maxframe_size =
  832. bfa_fcs_itnim_get_maxfrsize(&itnim->fcs_itnim);
  833. fc_rport->supported_classes = bfa_fcs_itnim_get_cos(&itnim->fcs_itnim);
  834. itnim_data = fc_rport->dd_data;
  835. itnim_data->itnim = itnim;
  836. rport_ids.roles |= FC_RPORT_ROLE_FCP_TARGET;
  837. if (rport_ids.roles != FC_RPORT_ROLE_UNKNOWN)
  838. fc_remote_port_rolechg(fc_rport, rport_ids.roles);
  839. if ((fc_rport->scsi_target_id != -1)
  840. && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
  841. itnim->scsi_tgt_id = fc_rport->scsi_target_id;
  842. return;
  843. }
  844. /**
  845. * Work queue handler using FC transport service
  846. * Context: kernel
  847. */
  848. static void
  849. bfad_im_itnim_work_handler(struct work_struct *work)
  850. {
  851. struct bfad_itnim_s *itnim = container_of(work, struct bfad_itnim_s,
  852. itnim_work);
  853. struct bfad_im_s *im = itnim->im;
  854. struct bfad_s *bfad = im->bfad;
  855. struct bfad_im_port_s *im_port;
  856. unsigned long flags;
  857. struct fc_rport *fc_rport;
  858. wwn_t wwpn;
  859. u32 fcid;
  860. char wwpn_str[32], fcid_str[16];
  861. spin_lock_irqsave(&bfad->bfad_lock, flags);
  862. im_port = itnim->im_port;
  863. bfa_trc(bfad, itnim->state);
  864. switch (itnim->state) {
  865. case ITNIM_STATE_ONLINE:
  866. if (!itnim->fc_rport) {
  867. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  868. bfad_im_fc_rport_add(im_port, itnim);
  869. spin_lock_irqsave(&bfad->bfad_lock, flags);
  870. wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
  871. fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
  872. wwn2str(wwpn_str, wwpn);
  873. fcid2str(fcid_str, fcid);
  874. list_add_tail(&itnim->list_entry,
  875. &im_port->itnim_mapped_list);
  876. bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE,
  877. im_port->shost->host_no,
  878. itnim->scsi_tgt_id,
  879. fcid_str, wwpn_str);
  880. } else {
  881. printk(KERN_WARNING
  882. "%s: itnim %llx is already in online state\n",
  883. __func__,
  884. bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
  885. }
  886. break;
  887. case ITNIM_STATE_OFFLINE_PENDING:
  888. itnim->state = ITNIM_STATE_OFFLINE;
  889. if (itnim->fc_rport) {
  890. fc_rport = itnim->fc_rport;
  891. ((struct bfad_itnim_data_s *)
  892. fc_rport->dd_data)->itnim = NULL;
  893. itnim->fc_rport = NULL;
  894. if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
  895. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  896. fc_rport->dev_loss_tmo =
  897. bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
  898. fc_remote_port_delete(fc_rport);
  899. spin_lock_irqsave(&bfad->bfad_lock, flags);
  900. }
  901. wwpn = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
  902. fcid = bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim);
  903. wwn2str(wwpn_str, wwpn);
  904. fcid2str(fcid_str, fcid);
  905. list_del(&itnim->list_entry);
  906. bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE,
  907. im_port->shost->host_no,
  908. itnim->scsi_tgt_id,
  909. fcid_str, wwpn_str);
  910. }
  911. break;
  912. case ITNIM_STATE_FREE:
  913. if (itnim->fc_rport) {
  914. fc_rport = itnim->fc_rport;
  915. ((struct bfad_itnim_data_s *)
  916. fc_rport->dd_data)->itnim = NULL;
  917. itnim->fc_rport = NULL;
  918. if (!(im_port->port->flags & BFAD_PORT_DELETE)) {
  919. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  920. fc_rport->dev_loss_tmo =
  921. bfa_fcpim_path_tov_get(&bfad->bfa) + 1;
  922. fc_remote_port_delete(fc_rport);
  923. spin_lock_irqsave(&bfad->bfad_lock, flags);
  924. }
  925. list_del(&itnim->list_entry);
  926. }
  927. kfree(itnim);
  928. break;
  929. default:
  930. bfa_assert(0);
  931. break;
  932. }
  933. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  934. }
  935. /**
  936. * Scsi_Host template entry, queue a SCSI command to the BFAD.
  937. */
  938. static int
  939. bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
  940. {
  941. struct bfad_im_port_s *im_port =
  942. (struct bfad_im_port_s *) cmnd->device->host->hostdata[0];
  943. struct bfad_s *bfad = im_port->bfad;
  944. struct bfad_itnim_data_s *itnim_data = cmnd->device->hostdata;
  945. struct bfad_itnim_s *itnim;
  946. struct bfa_ioim_s *hal_io;
  947. unsigned long flags;
  948. int rc;
  949. s16 sg_cnt = 0;
  950. struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
  951. rc = fc_remote_port_chkready(rport);
  952. if (rc) {
  953. cmnd->result = rc;
  954. done(cmnd);
  955. return 0;
  956. }
  957. sg_cnt = scsi_dma_map(cmnd);
  958. if (sg_cnt < 0)
  959. return SCSI_MLQUEUE_HOST_BUSY;
  960. cmnd->scsi_done = done;
  961. spin_lock_irqsave(&bfad->bfad_lock, flags);
  962. if (!(bfad->bfad_flags & BFAD_HAL_START_DONE)) {
  963. printk(KERN_WARNING
  964. "bfad%d, queuecommand %p %x failed, BFA stopped\n",
  965. bfad->inst_no, cmnd, cmnd->cmnd[0]);
  966. cmnd->result = ScsiResult(DID_NO_CONNECT, 0);
  967. goto out_fail_cmd;
  968. }
  969. itnim = itnim_data->itnim;
  970. if (!itnim) {
  971. cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
  972. goto out_fail_cmd;
  973. }
  974. hal_io = bfa_ioim_alloc(&bfad->bfa, (struct bfad_ioim_s *) cmnd,
  975. itnim->bfa_itnim, sg_cnt);
  976. if (!hal_io) {
  977. printk(KERN_WARNING "hal_io failure\n");
  978. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  979. scsi_dma_unmap(cmnd);
  980. return SCSI_MLQUEUE_HOST_BUSY;
  981. }
  982. cmnd->host_scribble = (char *)hal_io;
  983. bfa_trc_fp(bfad, hal_io->iotag);
  984. bfa_ioim_start(hal_io);
  985. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  986. return 0;
  987. out_fail_cmd:
  988. spin_unlock_irqrestore(&bfad->bfad_lock, flags);
  989. scsi_dma_unmap(cmnd);
  990. if (done)
  991. done(cmnd);
  992. return 0;
  993. }
  994. void
  995. bfad_os_rport_online_wait(struct bfad_s *bfad)
  996. {
  997. int i;
  998. int rport_delay = 10;
  999. for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
  1000. && i < bfa_linkup_delay; i++)
  1001. schedule_timeout_uninterruptible(HZ);
  1002. if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
  1003. rport_delay = rport_delay < bfa_linkup_delay ?
  1004. rport_delay : bfa_linkup_delay;
  1005. for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
  1006. && i < rport_delay; i++)
  1007. schedule_timeout_uninterruptible(HZ);
  1008. if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE))
  1009. schedule_timeout_uninterruptible(rport_delay * HZ);
  1010. }
  1011. }
  1012. int
  1013. bfad_os_get_linkup_delay(struct bfad_s *bfad)
  1014. {
  1015. u8 nwwns = 0;
  1016. wwn_t *wwns;
  1017. int ldelay;
  1018. /*
  1019. * Querying for the boot target port wwns
  1020. * -- read from boot information in flash.
  1021. * If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30
  1022. * else => local boot machine set bfa_linkup_delay = 10
  1023. */
  1024. bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, &wwns);
  1025. if (nwwns > 0) {
  1026. /* If boot over SAN; linkup_delay = 30sec */
  1027. ldelay = 30;
  1028. } else {
  1029. /* If local boot; linkup_delay = 10sec */
  1030. ldelay = 0;
  1031. }
  1032. return ldelay;
  1033. }