Эх сурвалжийг харах

Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (84 commits)
  [SCSI] be2iscsi: SGE Len == 64K
  [SCSI] be2iscsi: Remove premature free of cid
  [SCSI] be2iscsi: More time for FW
  [SCSI] libsas: fix bug for vacant phy
  [SCSI] sd: Fix overflow with big physical blocks
  [SCSI] st: add MTWEOFI to write filemarks without flushing drive buffer
  [SCSI] libsas: Don't issue commands to devices that have been hot-removed
  [SCSI] megaraid_sas: Add Online Controller Reset to MegaRAID SAS drive
  [SCSI] lpfc 8.3.17: Update lpfc driver version to 8.3.17
  [SCSI] lpfc 8.3.17: Replace function reset methodology
  [SCSI] lpfc 8.3.17: SCSI fixes
  [SCSI] lpfc 8.3.17: BSG fixes
  [SCSI] lpfc 8.3.17: SLI Additions and Fixes
  [SCSI] lpfc 8.3.17: Code Cleanup and Locking fixes
  [SCSI] zfcp: Remove scsi_cmnd->serial_number from debug traces
  [SCSI] ipr: fix array error logging
  [SCSI] aha152x: enable PCMCIA on 64bit
  [SCSI] scsi_dh_alua: Handle all states correctly
  [SCSI] cxgb4i: connection and ddp setting update
  [SCSI] cxgb3i: fixed connection over vlan
  ...
Linus Torvalds 14 жил өмнө
parent
commit
c70b5296e7
100 өөрчлөгдсөн 21431 нэмэгдсэн , 16300 устгасан
  1. 14 1
      Documentation/scsi/st.txt
  2. 3 1
      drivers/message/fusion/mptbase.c
  3. 3 2
      drivers/s390/scsi/Makefile
  4. 6 120
      drivers/s390/scsi/zfcp_aux.c
  5. 4 13
      drivers/s390/scsi/zfcp_ccw.c
  6. 184 2
      drivers/s390/scsi/zfcp_cfdc.c
  7. 15 17
      drivers/s390/scsi/zfcp_dbf.c
  8. 6 8
      drivers/s390/scsi/zfcp_dbf.h
  9. 58 20
      drivers/s390/scsi/zfcp_def.h
  10. 287 344
      drivers/s390/scsi/zfcp_erp.c
  11. 33 30
      drivers/s390/scsi/zfcp_ext.h
  12. 1 1
      drivers/s390/scsi/zfcp_fc.c
  13. 223 319
      drivers/s390/scsi/zfcp_fsf.c
  14. 10 8
      drivers/s390/scsi/zfcp_qdio.c
  15. 55 103
      drivers/s390/scsi/zfcp_scsi.c
  16. 111 110
      drivers/s390/scsi/zfcp_sysfs.c
  17. 244 0
      drivers/s390/scsi/zfcp_unit.c
  18. 3 2
      drivers/scsi/Kconfig
  19. 2 1
      drivers/scsi/Makefile
  20. 1 1
      drivers/scsi/aacraid/commctrl.c
  21. 1 1
      drivers/scsi/aacraid/commsup.c
  22. 2 2
      drivers/scsi/arcmsr/arcmsr_hba.c
  23. 1 1
      drivers/scsi/be2iscsi/be_cmds.c
  24. 0 3
      drivers/scsi/be2iscsi/be_iscsi.c
  25. 1 1
      drivers/scsi/be2iscsi/be_main.c
  26. 5 12
      drivers/scsi/bfa/Makefile
  27. 438 0
      drivers/scsi/bfa/bfa.h
  28. 0 57
      drivers/scsi/bfa/bfa_callback_priv.h
  29. 9 21
      drivers/scsi/bfa/bfa_cb_ioim.h
  30. 0 492
      drivers/scsi/bfa/bfa_cee.c
  31. 985 20
      drivers/scsi/bfa/bfa_core.c
  32. 364 0
      drivers/scsi/bfa/bfa_cs.h
  33. 0 58
      drivers/scsi/bfa/bfa_csdebug.c
  34. 466 0
      drivers/scsi/bfa/bfa_defs.h
  35. 457 0
      drivers/scsi/bfa/bfa_defs_fcs.h
  36. 1081 0
      drivers/scsi/bfa/bfa_defs_svc.h
  37. 29 12
      drivers/scsi/bfa/bfa_drv.c
  38. 908 103
      drivers/scsi/bfa/bfa_fc.h
  39. 127 166
      drivers/scsi/bfa/bfa_fcbuild.c
  40. 316 0
      drivers/scsi/bfa/bfa_fcbuild.h
  41. 3439 21
      drivers/scsi/bfa/bfa_fcpim.c
  42. 401 0
      drivers/scsi/bfa/bfa_fcpim.h
  43. 0 192
      drivers/scsi/bfa/bfa_fcpim_priv.h
  44. 0 1962
      drivers/scsi/bfa/bfa_fcport.c
  45. 1565 46
      drivers/scsi/bfa/bfa_fcs.c
  46. 779 0
      drivers/scsi/bfa/bfa_fcs.h
  47. 98 139
      drivers/scsi/bfa/bfa_fcs_fcpim.c
  48. 263 262
      drivers/scsi/bfa/bfa_fcs_lport.c
  49. 0 61
      drivers/scsi/bfa/bfa_fcs_port.c
  50. 250 271
      drivers/scsi/bfa/bfa_fcs_rport.c
  51. 0 99
      drivers/scsi/bfa/bfa_fcs_uf.c
  52. 0 774
      drivers/scsi/bfa/bfa_fcxp.c
  53. 0 138
      drivers/scsi/bfa/bfa_fcxp_priv.h
  54. 0 44
      drivers/scsi/bfa/bfa_fwimg_priv.h
  55. 4 4
      drivers/scsi/bfa/bfa_hw_cb.c
  56. 5 6
      drivers/scsi/bfa/bfa_hw_ct.c
  57. 0 270
      drivers/scsi/bfa/bfa_intr.c
  58. 0 117
      drivers/scsi/bfa/bfa_intr_priv.h
  59. 627 187
      drivers/scsi/bfa/bfa_ioc.c
  60. 196 52
      drivers/scsi/bfa/bfa_ioc.h
  61. 51 73
      drivers/scsi/bfa/bfa_ioc_cb.c
  62. 55 82
      drivers/scsi/bfa/bfa_ioc_ct.c
  63. 0 927
      drivers/scsi/bfa/bfa_iocfc.c
  64. 0 184
      drivers/scsi/bfa/bfa_iocfc.h
  65. 0 44
      drivers/scsi/bfa/bfa_iocfc_q.c
  66. 0 1364
      drivers/scsi/bfa/bfa_ioim.c
  67. 0 1088
      drivers/scsi/bfa/bfa_itnim.c
  68. 0 346
      drivers/scsi/bfa/bfa_log.c
  69. 0 537
      drivers/scsi/bfa/bfa_log_module.c
  70. 0 892
      drivers/scsi/bfa/bfa_lps.c
  71. 0 38
      drivers/scsi/bfa/bfa_lps_priv.h
  72. 42 22
      drivers/scsi/bfa/bfa_modules.h
  73. 0 43
      drivers/scsi/bfa/bfa_modules_priv.h
  74. 57 87
      drivers/scsi/bfa/bfa_os_inc.h
  75. 56 64
      drivers/scsi/bfa/bfa_plog.h
  76. 62 72
      drivers/scsi/bfa/bfa_port.c
  77. 66 0
      drivers/scsi/bfa/bfa_port.h
  78. 0 94
      drivers/scsi/bfa/bfa_port_priv.h
  79. 0 906
      drivers/scsi/bfa/bfa_rport.c
  80. 0 45
      drivers/scsi/bfa/bfa_rport_priv.h
  81. 0 226
      drivers/scsi/bfa/bfa_sgpg.c
  82. 0 79
      drivers/scsi/bfa/bfa_sgpg_priv.h
  83. 0 38
      drivers/scsi/bfa/bfa_sm.c
  84. 5423 0
      drivers/scsi/bfa/bfa_svc.c
  85. 657 0
      drivers/scsi/bfa/bfa_svc.h
  86. 0 90
      drivers/scsi/bfa/bfa_timer.c
  87. 0 64
      drivers/scsi/bfa/bfa_trcmod_priv.h
  88. 0 690
      drivers/scsi/bfa/bfa_tskim.c
  89. 0 343
      drivers/scsi/bfa/bfa_uf.c
  90. 0 47
      drivers/scsi/bfa/bfa_uf_priv.h
  91. 420 308
      drivers/scsi/bfa/bfad.c
  92. 145 96
      drivers/scsi/bfa/bfad_attr.c
  93. 0 56
      drivers/scsi/bfa/bfad_attr.h
  94. 5 5
      drivers/scsi/bfa/bfad_debugfs.c
  95. 151 103
      drivers/scsi/bfa/bfad_drv.h
  96. 0 131
      drivers/scsi/bfa/bfad_fwimg.c
  97. 123 134
      drivers/scsi/bfa/bfad_im.c
  98. 38 18
      drivers/scsi/bfa/bfad_im.h
  99. 0 45
      drivers/scsi/bfa/bfad_im_compat.h
  100. 0 222
      drivers/scsi/bfa/bfad_intr.c

+ 14 - 1
Documentation/scsi/st.txt

@@ -2,7 +2,7 @@ This file contains brief information about the SCSI tape driver.
 The driver is currently maintained by Kai Mäkisara (email
 The driver is currently maintained by Kai Mäkisara (email
 Kai.Makisara@kolumbus.fi)
 Kai.Makisara@kolumbus.fi)
 
 
-Last modified: Sun Feb 24 21:59:07 2008 by kai.makisara
+Last modified: Sun Aug 29 18:25:47 2010 by kai.makisara
 
 
 
 
 BASICS
 BASICS
@@ -85,6 +85,17 @@ writing and the last operation has been a write. Two filemarks can be
 optionally written. In both cases end of data is signified by
 optionally written. In both cases end of data is signified by
 returning zero bytes for two consecutive reads.
 returning zero bytes for two consecutive reads.
 
 
+Writing filemarks without the immediate bit set in the SCSI command block acts
+as a synchronization point, i.e., all remaining data form the drive buffers is
+written to tape before the command returns. This makes sure that write errors
+are caught at that point, but this takes time. In some applications, several
+consecutive files must be written fast. The MTWEOFI operation can be used to
+write the filemarks without flushing the drive buffer. Writing filemark at
+close() is always flushing the drive buffers. However, if the previous
+operation is MTWEOFI, close() does not write a filemark. This can be used if
+the program wants to close/open the tape device between files and wants to
+skip waiting.
+
 If rewind, offline, bsf, or seek is done and previous tape operation was
 If rewind, offline, bsf, or seek is done and previous tape operation was
 write, a filemark is written before moving tape.
 write, a filemark is written before moving tape.
 
 
@@ -301,6 +312,8 @@ MTBSR   Space backward over count records.
 MTFSS   Space forward over count setmarks.
 MTFSS   Space forward over count setmarks.
 MTBSS   Space backward over count setmarks.
 MTBSS   Space backward over count setmarks.
 MTWEOF  Write count filemarks.
 MTWEOF  Write count filemarks.
+MTWEOFI	Write count filemarks with immediate bit set (i.e., does not
+	wait until data is on tape)
 MTWSM   Write count setmarks.
 MTWSM   Write count setmarks.
 MTREW   Rewind tape.
 MTREW   Rewind tape.
 MTOFFL  Set device off line (often rewind plus eject).
 MTOFFL  Set device off line (often rewind plus eject).

+ 3 - 1
drivers/message/fusion/mptbase.c

@@ -5945,8 +5945,10 @@ mpt_findImVolumes(MPT_ADAPTER *ioc)
 		goto out;
 		goto out;
 
 
 	mem = kmalloc(iocpage2sz, GFP_KERNEL);
 	mem = kmalloc(iocpage2sz, GFP_KERNEL);
-	if (!mem)
+	if (!mem) {
+		rc = -ENOMEM;
 		goto out;
 		goto out;
+	}
 
 
 	memcpy(mem, (u8 *)pIoc2, iocpage2sz);
 	memcpy(mem, (u8 *)pIoc2, iocpage2sz);
 	ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;
 	ioc->raid_data.pIocPg2 = (IOCPage2_t *) mem;

+ 3 - 2
drivers/s390/scsi/Makefile

@@ -2,7 +2,8 @@
 # Makefile for the S/390 specific device drivers
 # Makefile for the S/390 specific device drivers
 #
 #
 
 
-zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_scsi.o zfcp_erp.o zfcp_qdio.o \
-	     zfcp_fsf.o zfcp_dbf.o zfcp_sysfs.o zfcp_fc.o zfcp_cfdc.o
+zfcp-objs := zfcp_aux.o zfcp_ccw.o zfcp_cfdc.o zfcp_dbf.o zfcp_erp.o \
+	     zfcp_fc.o zfcp_fsf.o zfcp_qdio.o zfcp_scsi.o zfcp_sysfs.o \
+	     zfcp_unit.o
 
 
 obj-$(CONFIG_ZFCP) += zfcp.o
 obj-$(CONFIG_ZFCP) += zfcp.o

+ 6 - 120
drivers/s390/scsi/zfcp_aux.c

@@ -56,7 +56,6 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
 	struct ccw_device *cdev;
 	struct ccw_device *cdev;
 	struct zfcp_adapter *adapter;
 	struct zfcp_adapter *adapter;
 	struct zfcp_port *port;
 	struct zfcp_port *port;
-	struct zfcp_unit *unit;
 
 
 	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
 	cdev = get_ccwdev_by_busid(&zfcp_ccw_driver, busid);
 	if (!cdev)
 	if (!cdev)
@@ -72,17 +71,11 @@ static void __init zfcp_init_device_configure(char *busid, u64 wwpn, u64 lun)
 	port = zfcp_get_port_by_wwpn(adapter, wwpn);
 	port = zfcp_get_port_by_wwpn(adapter, wwpn);
 	if (!port)
 	if (!port)
 		goto out_port;
 		goto out_port;
+	flush_work(&port->rport_work);
 
 
-	unit = zfcp_unit_enqueue(port, lun);
-	if (IS_ERR(unit))
-		goto out_unit;
-
-	zfcp_erp_unit_reopen(unit, 0, "auidc_1", NULL);
-	zfcp_erp_wait(adapter);
-	flush_work(&unit->scsi_work);
-
-out_unit:
+	zfcp_unit_add(port, lun);
 	put_device(&port->dev);
 	put_device(&port->dev);
+
 out_port:
 out_port:
 	zfcp_ccw_adapter_put(adapter);
 	zfcp_ccw_adapter_put(adapter);
 out_ccw_device:
 out_ccw_device:
@@ -158,6 +151,9 @@ static int __init zfcp_module_init(void)
 		fc_attach_transport(&zfcp_transport_functions);
 		fc_attach_transport(&zfcp_transport_functions);
 	if (!zfcp_data.scsi_transport_template)
 	if (!zfcp_data.scsi_transport_template)
 		goto out_transport;
 		goto out_transport;
+	scsi_transport_reserve_device(zfcp_data.scsi_transport_template,
+				      sizeof(struct zfcp_scsi_dev));
+
 
 
 	retval = misc_register(&zfcp_cfdc_misc);
 	retval = misc_register(&zfcp_cfdc_misc);
 	if (retval) {
 	if (retval) {
@@ -210,30 +206,6 @@ static void __exit zfcp_module_exit(void)
 
 
 module_exit(zfcp_module_exit);
 module_exit(zfcp_module_exit);
 
 
-/**
- * zfcp_get_unit_by_lun - find unit in unit list of port by FCP LUN
- * @port: pointer to port to search for unit
- * @fcp_lun: FCP LUN to search for
- *
- * Returns: pointer to zfcp_unit or NULL
- */
-struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *port, u64 fcp_lun)
-{
-	unsigned long flags;
-	struct zfcp_unit *unit;
-
-	read_lock_irqsave(&port->unit_list_lock, flags);
-	list_for_each_entry(unit, &port->unit_list, list)
-		if (unit->fcp_lun == fcp_lun) {
-			if (!get_device(&unit->dev))
-				unit = NULL;
-			read_unlock_irqrestore(&port->unit_list_lock, flags);
-			return unit;
-		}
-	read_unlock_irqrestore(&port->unit_list_lock, flags);
-	return NULL;
-}
-
 /**
 /**
  * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
  * zfcp_get_port_by_wwpn - find port in port list of adapter by wwpn
  * @adapter: pointer to adapter to search for port
  * @adapter: pointer to adapter to search for port
@@ -259,92 +231,6 @@ struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *adapter,
 	return NULL;
 	return NULL;
 }
 }
 
 
-/**
- * zfcp_unit_release - dequeue unit
- * @dev: pointer to device
- *
- * waits until all work is done on unit and removes it then from the unit->list
- * of the associated port.
- */
-static void zfcp_unit_release(struct device *dev)
-{
-	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
-
-	put_device(&unit->port->dev);
-	kfree(unit);
-}
-
-/**
- * zfcp_unit_enqueue - enqueue unit to unit list of a port.
- * @port: pointer to port where unit is added
- * @fcp_lun: FCP LUN of unit to be enqueued
- * Returns: pointer to enqueued unit on success, ERR_PTR on error
- *
- * Sets up some unit internal structures and creates sysfs entry.
- */
-struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *port, u64 fcp_lun)
-{
-	struct zfcp_unit *unit;
-	int retval = -ENOMEM;
-
-	get_device(&port->dev);
-
-	unit = zfcp_get_unit_by_lun(port, fcp_lun);
-	if (unit) {
-		put_device(&unit->dev);
-		retval = -EEXIST;
-		goto err_out;
-	}
-
-	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
-	if (!unit)
-		goto err_out;
-
-	unit->port = port;
-	unit->fcp_lun = fcp_lun;
-	unit->dev.parent = &port->dev;
-	unit->dev.release = zfcp_unit_release;
-
-	if (dev_set_name(&unit->dev, "0x%016llx",
-			 (unsigned long long) fcp_lun)) {
-		kfree(unit);
-		goto err_out;
-	}
-	retval = -EINVAL;
-
-	INIT_WORK(&unit->scsi_work, zfcp_scsi_scan_work);
-
-	spin_lock_init(&unit->latencies.lock);
-	unit->latencies.write.channel.min = 0xFFFFFFFF;
-	unit->latencies.write.fabric.min = 0xFFFFFFFF;
-	unit->latencies.read.channel.min = 0xFFFFFFFF;
-	unit->latencies.read.fabric.min = 0xFFFFFFFF;
-	unit->latencies.cmd.channel.min = 0xFFFFFFFF;
-	unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
-
-	if (device_register(&unit->dev)) {
-		put_device(&unit->dev);
-		goto err_out;
-	}
-
-	if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs))
-		goto err_out_put;
-
-	write_lock_irq(&port->unit_list_lock);
-	list_add_tail(&unit->list, &port->unit_list);
-	write_unlock_irq(&port->unit_list_lock);
-
-	atomic_set_mask(ZFCP_STATUS_COMMON_RUNNING, &unit->status);
-
-	return unit;
-
-err_out_put:
-	device_unregister(&unit->dev);
-err_out:
-	put_device(&port->dev);
-	return ERR_PTR(retval);
-}
-
 static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 {
 {
 	adapter->pool.erp_req =
 	adapter->pool.erp_req =

+ 4 - 13
drivers/s390/scsi/zfcp_ccw.c

@@ -46,8 +46,7 @@ static int zfcp_ccw_activate(struct ccw_device *cdev)
 	if (!adapter)
 	if (!adapter)
 		return 0;
 		return 0;
 
 
-	zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL,
-				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 				"ccresu2", NULL);
 				"ccresu2", NULL);
 	zfcp_erp_wait(adapter);
 	zfcp_erp_wait(adapter);
@@ -164,14 +163,7 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
 	BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
 	BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
 	adapter->req_no = 0;
 	adapter->req_no = 0;
 
 
-	zfcp_erp_modify_adapter_status(adapter, "ccsonl1", NULL,
-				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
-	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-				"ccsonl2", NULL);
-	zfcp_erp_wait(adapter);
-
-	flush_work(&adapter->scan_work);
-
+	zfcp_ccw_activate(cdev);
 	zfcp_ccw_adapter_put(adapter);
 	zfcp_ccw_adapter_put(adapter);
 	return 0;
 	return 0;
 }
 }
@@ -224,9 +216,8 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
 		break;
 		break;
 	case CIO_OPER:
 	case CIO_OPER:
 		dev_info(&cdev->dev, "The FCP device is operational again\n");
 		dev_info(&cdev->dev, "The FCP device is operational again\n");
-		zfcp_erp_modify_adapter_status(adapter, "ccnoti3", NULL,
-					       ZFCP_STATUS_COMMON_RUNNING,
-					       ZFCP_SET);
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_RUNNING);
 		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 		zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 					"ccnoti4", NULL);
 					"ccnoti4", NULL);
 		break;
 		break;

+ 184 - 2
drivers/s390/scsi/zfcp_cfdc.c

@@ -2,9 +2,10 @@
  * zfcp device driver
  * zfcp device driver
  *
  *
  * Userspace interface for accessing the
  * Userspace interface for accessing the
- * Access Control Lists / Control File Data Channel
+ * Access Control Lists / Control File Data Channel;
+ * handling of response code and states for ports and LUNs.
  *
  *
- * Copyright IBM Corporation 2008, 2009
+ * Copyright IBM Corporation 2008, 2010
  */
  */
 
 
 #define KMSG_COMPONENT "zfcp"
 #define KMSG_COMPONENT "zfcp"
@@ -261,3 +262,184 @@ struct miscdevice zfcp_cfdc_misc = {
 	.name = "zfcp_cfdc",
 	.name = "zfcp_cfdc",
 	.fops = &zfcp_cfdc_fops,
 	.fops = &zfcp_cfdc_fops,
 };
 };
+
+/**
+ * zfcp_cfdc_adapter_access_changed - Process change in adapter ACT
+ * @adapter: Adapter where the Access Control Table (ACT) changed
+ *
+ * After a change in the adapter ACT, check if access to any
+ * previously denied resources is now possible.
+ */
+void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
+{
+	unsigned long flags;
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
+	int status;
+
+	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
+		return;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		status = atomic_read(&port->status);
+		if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
+		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+			zfcp_erp_port_reopen(port,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "cfaac_1", NULL);
+	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	shost_for_each_device(sdev, port->adapter->scsi_host) {
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		status = atomic_read(&zfcp_sdev->status);
+		if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
+		    (status & ZFCP_STATUS_COMMON_ACCESS_BOXED))
+			zfcp_erp_lun_reopen(sdev,
+					    ZFCP_STATUS_COMMON_ERP_FAILED,
+					    "cfaac_2", NULL);
+	}
+}
+
+static void zfcp_act_eval_err(struct zfcp_adapter *adapter, u32 table)
+{
+	u16 subtable = table >> 16;
+	u16 rule = table & 0xffff;
+	const char *act_type[] = { "unknown", "OS", "WWPN", "DID", "LUN" };
+
+	if (subtable && subtable < ARRAY_SIZE(act_type))
+		dev_warn(&adapter->ccw_device->dev,
+			 "Access denied according to ACT rule type %s, "
+			 "rule %d\n", act_type[subtable], rule);
+}
+
+/**
+ * zfcp_cfdc_port_denied - Process "access denied" for port
+ * @port: The port where the acces has been denied
+ * @qual: The FSF status qualifier for the access denied FSF status
+ */
+void zfcp_cfdc_port_denied(struct zfcp_port *port,
+			   union fsf_status_qual *qual)
+{
+	dev_warn(&port->adapter->ccw_device->dev,
+		 "Access denied to port 0x%016Lx\n",
+		 (unsigned long long)port->wwpn);
+
+	zfcp_act_eval_err(port->adapter, qual->halfword[0]);
+	zfcp_act_eval_err(port->adapter, qual->halfword[1]);
+	zfcp_erp_set_port_status(port,
+				 ZFCP_STATUS_COMMON_ERP_FAILED |
+				 ZFCP_STATUS_COMMON_ACCESS_DENIED);
+}
+
+/**
+ * zfcp_cfdc_lun_denied - Process "access denied" for LUN
+ * @sdev: The SCSI device / LUN where the access has been denied
+ * @qual: The FSF status qualifier for the access denied FSF status
+ */
+void zfcp_cfdc_lun_denied(struct scsi_device *sdev,
+			  union fsf_status_qual *qual)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
+		 "Access denied to LUN 0x%016Lx on port 0x%016Lx\n",
+		 zfcp_scsi_dev_lun(sdev),
+		 (unsigned long long)zfcp_sdev->port->wwpn);
+	zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[0]);
+	zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->halfword[1]);
+	zfcp_erp_set_lun_status(sdev,
+				ZFCP_STATUS_COMMON_ERP_FAILED |
+				ZFCP_STATUS_COMMON_ACCESS_DENIED);
+
+	atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
+	atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
+}
+
+/**
+ * zfcp_cfdc_lun_shrng_vltn - Evaluate LUN sharing violation status
+ * @sdev: The LUN / SCSI device where sharing violation occurred
+ * @qual: The FSF status qualifier from the LUN sharing violation
+ */
+void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *sdev,
+			      union fsf_status_qual *qual)
+{
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (qual->word[0])
+		dev_warn(&zfcp_sdev->port->adapter->ccw_device->dev,
+			 "LUN 0x%Lx on port 0x%Lx is already in "
+			 "use by CSS%d, MIF Image ID %x\n",
+			 zfcp_scsi_dev_lun(sdev),
+			 (unsigned long long)zfcp_sdev->port->wwpn,
+			 qual->fsf_queue_designator.cssid,
+			 qual->fsf_queue_designator.hla);
+	else
+		zfcp_act_eval_err(zfcp_sdev->port->adapter, qual->word[2]);
+
+	zfcp_erp_set_lun_status(sdev,
+				ZFCP_STATUS_COMMON_ERP_FAILED |
+				ZFCP_STATUS_COMMON_ACCESS_DENIED);
+	atomic_clear_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
+	atomic_clear_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
+}
+
+/**
+ * zfcp_cfdc_open_lun_eval - Eval access ctrl. status for successful "open lun"
+ * @sdev: The SCSI device / LUN where to evaluate the status
+ * @bottom: The qtcb bottom with the status from the "open lun"
+ *
+ * Returns: 0 if LUN is usable, -EACCES if the access control table
+ *          reports an unsupported configuration.
+ */
+int zfcp_cfdc_open_lun_eval(struct scsi_device *sdev,
+			    struct fsf_qtcb_bottom_support *bottom)
+{
+	int shared, rw;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
+
+	if ((adapter->connection_features & FSF_FEATURE_NPIV_MODE) ||
+	    !(adapter->adapter_features & FSF_FEATURE_LUN_SHARING) ||
+	    zfcp_ccw_priv_sch(adapter))
+		return 0;
+
+	shared = !(bottom->lun_access_info & FSF_UNIT_ACCESS_EXCLUSIVE);
+	rw = (bottom->lun_access_info & FSF_UNIT_ACCESS_OUTBOUND_TRANSFER);
+
+	if (shared)
+		atomic_set_mask(ZFCP_STATUS_LUN_SHARED, &zfcp_sdev->status);
+
+	if (!rw) {
+		atomic_set_mask(ZFCP_STATUS_LUN_READONLY, &zfcp_sdev->status);
+		dev_info(&adapter->ccw_device->dev, "SCSI device at LUN "
+			 "0x%016Lx on port 0x%016Lx opened read-only\n",
+			 zfcp_scsi_dev_lun(sdev),
+			 (unsigned long long)zfcp_sdev->port->wwpn);
+	}
+
+	if (!shared && !rw) {
+		dev_err(&adapter->ccw_device->dev, "Exclusive read-only access "
+			"not supported (LUN 0x%016Lx, port 0x%016Lx)\n",
+			zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsouh_6", NULL);
+		return -EACCES;
+	}
+
+	if (shared && rw) {
+		dev_err(&adapter->ccw_device->dev,
+			"Shared read-write access not supported "
+			"(LUN 0x%016Lx, port 0x%016Lx)\n",
+			zfcp_scsi_dev_lun(sdev),
+			(unsigned long long)zfcp_sdev->port->wwpn);
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_ERP_FAILED);
+		zfcp_erp_lun_shutdown(sdev, 0, "fsosh_8", NULL);
+		return -EACCES;
+	}
+
+	return 0;
+}

+ 15 - 17
drivers/s390/scsi/zfcp_dbf.c

@@ -154,7 +154,6 @@ void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
 		scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
 		scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
 		if (scsi_cmnd) {
 		if (scsi_cmnd) {
 			response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
 			response->u.fcp.cmnd = (unsigned long)scsi_cmnd;
-			response->u.fcp.serial = scsi_cmnd->serial_number;
 			response->u.fcp.data_dir =
 			response->u.fcp.data_dir =
 				qtcb->bottom.io.data_direction;
 				qtcb->bottom.io.data_direction;
 		}
 		}
@@ -330,7 +329,6 @@ static void zfcp_dbf_hba_view_response(char **p,
 			break;
 			break;
 		zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
 		zfcp_dbf_out(p, "data_direction", "0x%04x", r->u.fcp.data_dir);
 		zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
 		zfcp_dbf_out(p, "scsi_cmnd", "0x%0Lx", r->u.fcp.cmnd);
-		zfcp_dbf_out(p, "scsi_serial", "0x%016Lx", r->u.fcp.serial);
 		*p += sprintf(*p, "\n");
 		*p += sprintf(*p, "\n");
 		break;
 		break;
 
 
@@ -482,7 +480,7 @@ static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
 		zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
 		zfcp_dbf_out(&p, "fcp_lun", "0x%016Lx", r->u.trigger.fcp_lun);
 		zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
 		zfcp_dbf_out(&p, "adapter_status", "0x%08x", r->u.trigger.as);
 		zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
 		zfcp_dbf_out(&p, "port_status", "0x%08x", r->u.trigger.ps);
-		zfcp_dbf_out(&p, "unit_status", "0x%08x", r->u.trigger.us);
+		zfcp_dbf_out(&p, "lun_status", "0x%08x", r->u.trigger.ls);
 		break;
 		break;
 	case ZFCP_REC_DBF_ID_ACTION:
 	case ZFCP_REC_DBF_ID_ACTION:
 		zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
 		zfcp_dbf_out(&p, "erp_action", "0x%016Lx", r->u.action.action);
@@ -600,19 +598,20 @@ void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
 }
 }
 
 
 /**
 /**
- * zfcp_dbf_rec_unit - trace event for unit state change
+ * zfcp_dbf_rec_lun - trace event for LUN state change
  * @id: identifier for trigger of state change
  * @id: identifier for trigger of state change
  * @ref: additional reference (e.g. request)
  * @ref: additional reference (e.g. request)
- * @unit: unit
+ * @sdev: SCSI device
  */
  */
-void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
+void zfcp_dbf_rec_lun(char *id, void *ref, struct scsi_device *sdev)
 {
 {
-	struct zfcp_port *port = unit->port;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port = zfcp_sdev->port;
 	struct zfcp_dbf *dbf = port->adapter->dbf;
 	struct zfcp_dbf *dbf = port->adapter->dbf;
 
 
-	zfcp_dbf_rec_target(id, ref, dbf, &unit->status,
-				  &unit->erp_counter, port->wwpn, port->d_id,
-				  unit->fcp_lun);
+	zfcp_dbf_rec_target(id, ref, dbf, &zfcp_sdev->status,
+			    &zfcp_sdev->erp_counter, port->wwpn, port->d_id,
+			    zfcp_scsi_dev_lun(sdev));
 }
 }
 
 
 /**
 /**
@@ -624,11 +623,11 @@ void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
  * @action: address of error recovery action struct
  * @action: address of error recovery action struct
  * @adapter: adapter
  * @adapter: adapter
  * @port: port
  * @port: port
- * @unit: unit
+ * @sdev: SCSI device
  */
  */
 void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
 void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
 			  struct zfcp_adapter *adapter, struct zfcp_port *port,
 			  struct zfcp_adapter *adapter, struct zfcp_port *port,
-			  struct zfcp_unit *unit)
+			  struct scsi_device *sdev)
 {
 {
 	struct zfcp_dbf *dbf = adapter->dbf;
 	struct zfcp_dbf *dbf = adapter->dbf;
 	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
 	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
@@ -647,9 +646,10 @@ void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
 		r->u.trigger.ps = atomic_read(&port->status);
 		r->u.trigger.ps = atomic_read(&port->status);
 		r->u.trigger.wwpn = port->wwpn;
 		r->u.trigger.wwpn = port->wwpn;
 	}
 	}
-	if (unit)
-		r->u.trigger.us = atomic_read(&unit->status);
-	r->u.trigger.fcp_lun = unit ? unit->fcp_lun : ZFCP_DBF_INVALID_LUN;
+	if (sdev)
+		r->u.trigger.ls = atomic_read(&sdev_to_zfcp(sdev)->status);
+	r->u.trigger.fcp_lun = sdev ? zfcp_scsi_dev_lun(sdev) :
+				      ZFCP_DBF_INVALID_LUN;
 	debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
 	debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 }
@@ -879,7 +879,6 @@ void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
 				}
 				}
 				rec->scsi_result = scsi_cmnd->result;
 				rec->scsi_result = scsi_cmnd->result;
 				rec->scsi_cmnd = (unsigned long)scsi_cmnd;
 				rec->scsi_cmnd = (unsigned long)scsi_cmnd;
-				rec->scsi_serial = scsi_cmnd->serial_number;
 				memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
 				memcpy(rec->scsi_opcode, scsi_cmnd->cmnd,
 					min((int)scsi_cmnd->cmd_len,
 					min((int)scsi_cmnd->cmd_len,
 						ZFCP_DBF_SCSI_OPCODE));
 						ZFCP_DBF_SCSI_OPCODE));
@@ -948,7 +947,6 @@ static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view,
 	zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
 	zfcp_dbf_out(&p, "scsi_lun", "0x%08x", r->scsi_lun);
 	zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
 	zfcp_dbf_out(&p, "scsi_result", "0x%08x", r->scsi_result);
 	zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
 	zfcp_dbf_out(&p, "scsi_cmnd", "0x%0Lx", r->scsi_cmnd);
-	zfcp_dbf_out(&p, "scsi_serial", "0x%016Lx", r->scsi_serial);
 	zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
 	zfcp_dbf_outd(&p, "scsi_opcode", r->scsi_opcode, ZFCP_DBF_SCSI_OPCODE,
 		      0, ZFCP_DBF_SCSI_OPCODE);
 		      0, ZFCP_DBF_SCSI_OPCODE);
 	zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);
 	zfcp_dbf_out(&p, "scsi_retries", "0x%02x", r->scsi_retries);

+ 6 - 8
drivers/s390/scsi/zfcp_dbf.h

@@ -60,7 +60,7 @@ struct zfcp_dbf_rec_record_trigger {
 	u8 need;
 	u8 need;
 	u32 as;
 	u32 as;
 	u32 ps;
 	u32 ps;
-	u32 us;
+	u32 ls;
 	u64 ref;
 	u64 ref;
 	u64 action;
 	u64 action;
 	u64 wwpn;
 	u64 wwpn;
@@ -110,7 +110,6 @@ struct zfcp_dbf_hba_record_response {
 	union {
 	union {
 		struct {
 		struct {
 			u64 cmnd;
 			u64 cmnd;
-			u64 serial;
 			u32 data_dir;
 			u32 data_dir;
 		} fcp;
 		} fcp;
 		struct {
 		struct {
@@ -206,7 +205,6 @@ struct zfcp_dbf_scsi_record {
 	u32 scsi_lun;
 	u32 scsi_lun;
 	u32 scsi_result;
 	u32 scsi_result;
 	u64 scsi_cmnd;
 	u64 scsi_cmnd;
-	u64 scsi_serial;
 #define ZFCP_DBF_SCSI_OPCODE	16
 #define ZFCP_DBF_SCSI_OPCODE	16
 	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
 	u8 scsi_opcode[ZFCP_DBF_SCSI_OPCODE];
 	u8 scsi_retries;
 	u8 scsi_retries;
@@ -350,16 +348,16 @@ void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
 /**
 /**
  * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
  * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
  * @tag: tag indicating success or failure of reset operation
  * @tag: tag indicating success or failure of reset operation
+ * @scmnd: SCSI command which caused this error recovery
  * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
  * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
- * @unit: unit that needs reset
- * @scsi_cmnd: SCSI command which caused this error recovery
  */
  */
 static inline
 static inline
-void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
-			    struct scsi_cmnd *scsi_cmnd)
+void zfcp_dbf_scsi_devreset(const char *tag, struct scsi_cmnd *scmnd, u8 flag)
 {
 {
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scmnd->device);
+
 	zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
 	zfcp_dbf_scsi(flag == FCP_TMF_TGT_RESET ? "trst" : "lrst", tag, 1,
-			    unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
+		      zfcp_sdev->port->adapter->dbf, scmnd, NULL, 0);
 }
 }
 
 
 #endif /* ZFCP_DBF_H */
 #endif /* ZFCP_DBF_H */

+ 58 - 20
drivers/s390/scsi/zfcp_def.h

@@ -85,8 +85,8 @@ struct zfcp_reqlist;
 #define ZFCP_STATUS_PORT_LINK_TEST		0x00000002
 #define ZFCP_STATUS_PORT_LINK_TEST		0x00000002
 
 
 /* logical unit status */
 /* logical unit status */
-#define ZFCP_STATUS_UNIT_SHARED			0x00000004
-#define ZFCP_STATUS_UNIT_READONLY		0x00000008
+#define ZFCP_STATUS_LUN_SHARED			0x00000004
+#define ZFCP_STATUS_LUN_READONLY		0x00000008
 
 
 /* FSF request status (this does not have a common part) */
 /* FSF request status (this does not have a common part) */
 #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT	0x00000002
 #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT	0x00000002
@@ -118,7 +118,7 @@ struct zfcp_erp_action {
 	int action;	              /* requested action code */
 	int action;	              /* requested action code */
 	struct zfcp_adapter *adapter; /* device which should be recovered */
 	struct zfcp_adapter *adapter; /* device which should be recovered */
 	struct zfcp_port *port;
 	struct zfcp_port *port;
-	struct zfcp_unit *unit;
+	struct scsi_device *sdev;
 	u32		status;	      /* recovery status */
 	u32		status;	      /* recovery status */
 	u32 step;	              /* active step of this erp action */
 	u32 step;	              /* active step of this erp action */
 	unsigned long		fsf_req_id;
 	unsigned long		fsf_req_id;
@@ -219,20 +219,65 @@ struct zfcp_port {
 	unsigned int		starget_id;
 	unsigned int		starget_id;
 };
 };
 
 
+/**
+ * struct zfcp_unit - LUN configured via zfcp sysfs
+ * @dev: struct device for sysfs representation and reference counting
+ * @list: entry in LUN/unit list per zfcp_port
+ * @port: reference to zfcp_port where this LUN is configured
+ * @fcp_lun: 64 bit LUN value
+ * @scsi_work: for running scsi_scan_target
+ *
+ * This is the representation of a LUN that has been configured for
+ * usage. The main data here is the 64 bit LUN value, data for
+ * running I/O and recovery is in struct zfcp_scsi_dev.
+ */
 struct zfcp_unit {
 struct zfcp_unit {
-	struct device          dev;
-	struct list_head       list;	       /* list of logical units */
-	struct zfcp_port       *port;	       /* remote port of unit */
-	atomic_t	       status;	       /* status of this logical unit */
-	u64		       fcp_lun;	       /* own FCP_LUN */
-	u32		       handle;	       /* handle assigned by FSF */
-        struct scsi_device     *device;        /* scsi device struct pointer */
-	struct zfcp_erp_action erp_action;     /* pending error recovery */
-        atomic_t               erp_counter;
-	struct zfcp_latencies	latencies;
+	struct device		dev;
+	struct list_head	list;
+	struct zfcp_port	*port;
+	u64			fcp_lun;
 	struct work_struct	scsi_work;
 	struct work_struct	scsi_work;
 };
 };
 
 
+/**
+ * struct zfcp_scsi_dev - zfcp data per SCSI device
+ * @status: zfcp internal status flags
+ * @lun_handle: handle from "open lun" for issuing FSF requests
+ * @erp_action: zfcp erp data for opening and recovering this LUN
+ * @erp_counter: zfcp erp counter for this LUN
+ * @latencies: FSF channel and fabric latencies
+ * @port: zfcp_port where this LUN belongs to
+ */
+struct zfcp_scsi_dev {
+	atomic_t		status;
+	u32			lun_handle;
+	struct zfcp_erp_action	erp_action;
+	atomic_t		erp_counter;
+	struct zfcp_latencies	latencies;
+	struct zfcp_port	*port;
+};
+
+/**
+ * sdev_to_zfcp - Access zfcp LUN data for SCSI device
+ * @sdev: scsi_device where to get the zfcp_scsi_dev pointer
+ */
+static inline struct zfcp_scsi_dev *sdev_to_zfcp(struct scsi_device *sdev)
+{
+	return scsi_transport_device_data(sdev);
+}
+
+/**
+ * zfcp_scsi_dev_lun - Return SCSI device LUN as 64 bit FCP LUN
+ * @sdev: SCSI device where to get the LUN from
+ */
+static inline u64 zfcp_scsi_dev_lun(struct scsi_device *sdev)
+{
+	u64 fcp_lun;
+
+	int_to_scsilun(sdev->lun, (struct scsi_lun *)&fcp_lun);
+	return fcp_lun;
+}
+
 /**
 /**
  * struct zfcp_fsf_req - basic FSF request structure
  * struct zfcp_fsf_req - basic FSF request structure
  * @list: list of FSF requests
  * @list: list of FSF requests
@@ -249,7 +294,6 @@ struct zfcp_unit {
  * @erp_action: reference to erp action if request issued on behalf of ERP
  * @erp_action: reference to erp action if request issued on behalf of ERP
  * @pool: reference to memory pool if used for this request
  * @pool: reference to memory pool if used for this request
  * @issued: time when request was send (STCK)
  * @issued: time when request was send (STCK)
- * @unit: reference to unit if this request is a SCSI request
  * @handler: handler which should be called to process response
  * @handler: handler which should be called to process response
  */
  */
 struct zfcp_fsf_req {
 struct zfcp_fsf_req {
@@ -267,7 +311,6 @@ struct zfcp_fsf_req {
 	struct zfcp_erp_action	*erp_action;
 	struct zfcp_erp_action	*erp_action;
 	mempool_t		*pool;
 	mempool_t		*pool;
 	unsigned long long	issued;
 	unsigned long long	issued;
-	struct zfcp_unit	*unit;
 	void			(*handler)(struct zfcp_fsf_req *);
 	void			(*handler)(struct zfcp_fsf_req *);
 };
 };
 
 
@@ -282,9 +325,4 @@ struct zfcp_data {
 	struct kmem_cache	*adisc_cache;
 	struct kmem_cache	*adisc_cache;
 };
 };
 
 
-/********************** ZFCP SPECIFIC DEFINES ********************************/
-
-#define ZFCP_SET                0x00000100
-#define ZFCP_CLEAR              0x00000200
-
 #endif /* ZFCP_DEF_H */
 #endif /* ZFCP_DEF_H */

+ 287 - 344
drivers/s390/scsi/zfcp_erp.c

@@ -21,6 +21,7 @@ enum zfcp_erp_act_flags {
 	ZFCP_STATUS_ERP_DISMISSING	= 0x00100000,
 	ZFCP_STATUS_ERP_DISMISSING	= 0x00100000,
 	ZFCP_STATUS_ERP_DISMISSED	= 0x00200000,
 	ZFCP_STATUS_ERP_DISMISSED	= 0x00200000,
 	ZFCP_STATUS_ERP_LOWMEM		= 0x00400000,
 	ZFCP_STATUS_ERP_LOWMEM		= 0x00400000,
+	ZFCP_STATUS_ERP_NO_REF		= 0x00800000,
 };
 };
 
 
 enum zfcp_erp_steps {
 enum zfcp_erp_steps {
@@ -29,12 +30,12 @@ enum zfcp_erp_steps {
 	ZFCP_ERP_STEP_PHYS_PORT_CLOSING	= 0x0010,
 	ZFCP_ERP_STEP_PHYS_PORT_CLOSING	= 0x0010,
 	ZFCP_ERP_STEP_PORT_CLOSING	= 0x0100,
 	ZFCP_ERP_STEP_PORT_CLOSING	= 0x0100,
 	ZFCP_ERP_STEP_PORT_OPENING	= 0x0800,
 	ZFCP_ERP_STEP_PORT_OPENING	= 0x0800,
-	ZFCP_ERP_STEP_UNIT_CLOSING	= 0x1000,
-	ZFCP_ERP_STEP_UNIT_OPENING	= 0x2000,
+	ZFCP_ERP_STEP_LUN_CLOSING	= 0x1000,
+	ZFCP_ERP_STEP_LUN_OPENING	= 0x2000,
 };
 };
 
 
 enum zfcp_erp_act_type {
 enum zfcp_erp_act_type {
-	ZFCP_ERP_ACTION_REOPEN_UNIT        = 1,
+	ZFCP_ERP_ACTION_REOPEN_LUN         = 1,
 	ZFCP_ERP_ACTION_REOPEN_PORT	   = 2,
 	ZFCP_ERP_ACTION_REOPEN_PORT	   = 2,
 	ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
 	ZFCP_ERP_ACTION_REOPEN_PORT_FORCED = 3,
 	ZFCP_ERP_ACTION_REOPEN_ADAPTER     = 4,
 	ZFCP_ERP_ACTION_REOPEN_ADAPTER     = 4,
@@ -56,9 +57,8 @@ enum zfcp_erp_act_result {
 
 
 static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
 static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int mask)
 {
 {
-	zfcp_erp_modify_adapter_status(adapter, "erablk1", NULL,
-				       ZFCP_STATUS_COMMON_UNBLOCKED | mask,
-				       ZFCP_CLEAR);
+	zfcp_erp_clear_adapter_status(adapter,
+				       ZFCP_STATUS_COMMON_UNBLOCKED | mask);
 }
 }
 
 
 static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
 static int zfcp_erp_action_exists(struct zfcp_erp_action *act)
@@ -88,24 +88,24 @@ static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
 		zfcp_erp_action_ready(act);
 		zfcp_erp_action_ready(act);
 }
 }
 
 
-static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
+static void zfcp_erp_action_dismiss_lun(struct scsi_device *sdev)
 {
 {
-	if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
-		zfcp_erp_action_dismiss(&unit->erp_action);
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
+		zfcp_erp_action_dismiss(&zfcp_sdev->erp_action);
 }
 }
 
 
 static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
 static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
 {
 {
-	struct zfcp_unit *unit;
+	struct scsi_device *sdev;
 
 
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE)
 		zfcp_erp_action_dismiss(&port->erp_action);
 		zfcp_erp_action_dismiss(&port->erp_action);
-	else {
-		read_lock(&port->unit_list_lock);
-		list_for_each_entry(unit, &port->unit_list, list)
-			zfcp_erp_action_dismiss_unit(unit);
-		read_unlock(&port->unit_list_lock);
-	}
+	else
+		shost_for_each_device(sdev, port->adapter->scsi_host)
+			if (sdev_to_zfcp(sdev)->port == port)
+				zfcp_erp_action_dismiss_lun(sdev);
 }
 }
 
 
 static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
 static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
@@ -124,15 +124,17 @@ static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
 
 
 static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
 static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
 				 struct zfcp_port *port,
 				 struct zfcp_port *port,
-				 struct zfcp_unit *unit)
+				 struct scsi_device *sdev)
 {
 {
 	int need = want;
 	int need = want;
-	int u_status, p_status, a_status;
+	int l_status, p_status, a_status;
+	struct zfcp_scsi_dev *zfcp_sdev;
 
 
 	switch (want) {
 	switch (want) {
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		u_status = atomic_read(&unit->status);
-		if (u_status & ZFCP_STATUS_COMMON_ERP_INUSE)
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		l_status = atomic_read(&zfcp_sdev->status);
+		if (l_status & ZFCP_STATUS_COMMON_ERP_INUSE)
 			return 0;
 			return 0;
 		p_status = atomic_read(&port->status);
 		p_status = atomic_read(&port->status);
 		if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
 		if (!(p_status & ZFCP_STATUS_COMMON_RUNNING) ||
@@ -169,22 +171,26 @@ static int zfcp_erp_required_act(int want, struct zfcp_adapter *adapter,
 	return need;
 	return need;
 }
 }
 
 
-static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
+static struct zfcp_erp_action *zfcp_erp_setup_act(int need, u32 act_status,
 						  struct zfcp_adapter *adapter,
 						  struct zfcp_adapter *adapter,
 						  struct zfcp_port *port,
 						  struct zfcp_port *port,
-						  struct zfcp_unit *unit)
+						  struct scsi_device *sdev)
 {
 {
 	struct zfcp_erp_action *erp_action;
 	struct zfcp_erp_action *erp_action;
-	u32 status = 0;
+	struct zfcp_scsi_dev *zfcp_sdev;
 
 
 	switch (need) {
 	switch (need) {
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		if (!get_device(&unit->dev))
-			return NULL;
-		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status);
-		erp_action = &unit->erp_action;
-		if (!(atomic_read(&unit->status) & ZFCP_STATUS_COMMON_RUNNING))
-			status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		if (!(act_status & ZFCP_STATUS_ERP_NO_REF))
+			if (scsi_device_get(sdev))
+				return NULL;
+		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
+				&zfcp_sdev->status);
+		erp_action = &zfcp_sdev->erp_action;
+		if (!(atomic_read(&zfcp_sdev->status) &
+		      ZFCP_STATUS_COMMON_RUNNING))
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
 		break;
 		break;
 
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -195,7 +201,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
 		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		atomic_set_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &port->status);
 		erp_action = &port->erp_action;
 		erp_action = &port->erp_action;
 		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
 		if (!(atomic_read(&port->status) & ZFCP_STATUS_COMMON_RUNNING))
-			status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
 		break;
 		break;
 
 
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
 	case ZFCP_ERP_ACTION_REOPEN_ADAPTER:
@@ -205,7 +211,7 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
 		erp_action = &adapter->erp_action;
 		erp_action = &adapter->erp_action;
 		if (!(atomic_read(&adapter->status) &
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING))
 		      ZFCP_STATUS_COMMON_RUNNING))
-			status = ZFCP_STATUS_ERP_CLOSE_ONLY;
+			act_status |= ZFCP_STATUS_ERP_CLOSE_ONLY;
 		break;
 		break;
 
 
 	default:
 	default:
@@ -215,16 +221,17 @@ static struct zfcp_erp_action *zfcp_erp_setup_act(int need,
 	memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 	memset(erp_action, 0, sizeof(struct zfcp_erp_action));
 	erp_action->adapter = adapter;
 	erp_action->adapter = adapter;
 	erp_action->port = port;
 	erp_action->port = port;
-	erp_action->unit = unit;
+	erp_action->sdev = sdev;
 	erp_action->action = need;
 	erp_action->action = need;
-	erp_action->status = status;
+	erp_action->status = act_status;
 
 
 	return erp_action;
 	return erp_action;
 }
 }
 
 
 static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 				   struct zfcp_port *port,
 				   struct zfcp_port *port,
-				   struct zfcp_unit *unit, char *id, void *ref)
+				   struct scsi_device *sdev,
+				   char *id, void *ref, u32 act_status)
 {
 {
 	int retval = 1, need;
 	int retval = 1, need;
 	struct zfcp_erp_action *act = NULL;
 	struct zfcp_erp_action *act = NULL;
@@ -232,21 +239,21 @@ static int zfcp_erp_action_enqueue(int want, struct zfcp_adapter *adapter,
 	if (!adapter->erp_thread)
 	if (!adapter->erp_thread)
 		return -EIO;
 		return -EIO;
 
 
-	need = zfcp_erp_required_act(want, adapter, port, unit);
+	need = zfcp_erp_required_act(want, adapter, port, sdev);
 	if (!need)
 	if (!need)
 		goto out;
 		goto out;
 
 
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
-	act = zfcp_erp_setup_act(need, adapter, port, unit);
+	act = zfcp_erp_setup_act(need, act_status, adapter, port, sdev);
 	if (!act)
 	if (!act)
 		goto out;
 		goto out;
+	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, &adapter->status);
 	++adapter->erp_total_count;
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	list_add_tail(&act->list, &adapter->erp_ready_head);
 	wake_up(&adapter->erp_ready_wq);
 	wake_up(&adapter->erp_ready_wq);
 	zfcp_dbf_rec_thread("eracte1", adapter->dbf);
 	zfcp_dbf_rec_thread("eracte1", adapter->dbf);
 	retval = 0;
 	retval = 0;
  out:
  out:
-	zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit);
+	zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, sdev);
 	return retval;
 	return retval;
 }
 }
 
 
@@ -258,11 +265,12 @@ static int _zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter,
 
 
 	/* ensure propagation of failed status to new devices */
 	/* ensure propagation of failed status to new devices */
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
-		zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_ERP_FAILED);
 		return -EIO;
 		return -EIO;
 	}
 	}
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER,
-				       adapter, NULL, NULL, id, ref);
+				       adapter, NULL, NULL, id, ref, 0);
 }
 }
 
 
 /**
 /**
@@ -282,10 +290,11 @@ void zfcp_erp_adapter_reopen(struct zfcp_adapter *adapter, int clear,
 
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
 	write_lock_irqsave(&adapter->erp_lock, flags);
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
-		zfcp_erp_adapter_failed(adapter, "erareo1", NULL);
+		zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_ERP_FAILED);
 	else
 	else
 		zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
 		zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_ADAPTER, adapter,
-					NULL, NULL, id, ref);
+					NULL, NULL, id, ref, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 }
 
 
@@ -317,25 +326,10 @@ void zfcp_erp_port_shutdown(struct zfcp_port *port, int clear, char *id,
 	zfcp_erp_port_reopen(port, clear | flags, id, ref);
 	zfcp_erp_port_reopen(port, clear | flags, id, ref);
 }
 }
 
 
-/**
- * zfcp_erp_unit_shutdown - Shutdown unit
- * @unit: Unit to shut down.
- * @clear: Status flags to clear.
- * @id: Id for debug trace event.
- * @ref: Reference for debug trace event.
- */
-void zfcp_erp_unit_shutdown(struct zfcp_unit *unit, int clear, char *id,
-			    void *ref)
-{
-	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
-	zfcp_erp_unit_reopen(unit, clear | flags, id, ref);
-}
-
 static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
 static void zfcp_erp_port_block(struct zfcp_port *port, int clear)
 {
 {
-	zfcp_erp_modify_port_status(port, "erpblk1", NULL,
-				    ZFCP_STATUS_COMMON_UNBLOCKED | clear,
-				    ZFCP_CLEAR);
+	zfcp_erp_clear_port_status(port,
+				    ZFCP_STATUS_COMMON_UNBLOCKED | clear);
 }
 }
 
 
 static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
 static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
@@ -348,7 +342,7 @@ static void _zfcp_erp_port_forced_reopen(struct zfcp_port *port,
 		return;
 		return;
 
 
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
 	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT_FORCED,
-				port->adapter, port, NULL, id, ref);
+				port->adapter, port, NULL, id, ref, 0);
 }
 }
 
 
 /**
 /**
@@ -376,12 +370,12 @@ static int _zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id,
 
 
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
 		/* ensure propagation of failed status to new devices */
 		/* ensure propagation of failed status to new devices */
-		zfcp_erp_port_failed(port, "erpreo1", NULL);
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
 	return zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_PORT,
-				       port->adapter, port, NULL, id, ref);
+				       port->adapter, port, NULL, id, ref, 0);
 }
 }
 
 
 /**
 /**
@@ -404,53 +398,88 @@ int zfcp_erp_port_reopen(struct zfcp_port *port, int clear, char *id, void *ref)
 	return retval;
 	return retval;
 }
 }
 
 
-static void zfcp_erp_unit_block(struct zfcp_unit *unit, int clear_mask)
+static void zfcp_erp_lun_block(struct scsi_device *sdev, int clear_mask)
 {
 {
-	zfcp_erp_modify_unit_status(unit, "erublk1", NULL,
-				    ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask,
-				    ZFCP_CLEAR);
+	zfcp_erp_clear_lun_status(sdev,
+				  ZFCP_STATUS_COMMON_UNBLOCKED | clear_mask);
 }
 }
 
 
-static void _zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
-				  void *ref)
+static void _zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
+				 void *ref, u32 act_status)
 {
 {
-	struct zfcp_adapter *adapter = unit->port->adapter;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 
 
-	zfcp_erp_unit_block(unit, clear);
+	zfcp_erp_lun_block(sdev, clear);
 
 
-	if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
 		return;
 		return;
 
 
-	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_UNIT,
-				adapter, unit->port, unit, id, ref);
+	zfcp_erp_action_enqueue(ZFCP_ERP_ACTION_REOPEN_LUN, adapter,
+				zfcp_sdev->port, sdev, id, ref, act_status);
 }
 }
 
 
 /**
 /**
- * zfcp_erp_unit_reopen - initiate reopen of a unit
- * @unit: unit to be reopened
- * @clear_mask: specifies flags in unit status to be cleared
+ * zfcp_erp_lun_reopen - initiate reopen of a LUN
+ * @sdev: SCSI device / LUN to be reopened
+ * @clear_mask: specifies flags in LUN status to be cleared
  * Return: 0 on success, < 0 on error
  * Return: 0 on success, < 0 on error
  */
  */
-void zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear, char *id,
-			  void *ref)
+void zfcp_erp_lun_reopen(struct scsi_device *sdev, int clear, char *id,
+			 void *ref)
 {
 {
 	unsigned long flags;
 	unsigned long flags;
-	struct zfcp_port *port = unit->port;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port = zfcp_sdev->port;
 	struct zfcp_adapter *adapter = port->adapter;
 	struct zfcp_adapter *adapter = port->adapter;
 
 
 	write_lock_irqsave(&adapter->erp_lock, flags);
 	write_lock_irqsave(&adapter->erp_lock, flags);
-	_zfcp_erp_unit_reopen(unit, clear, id, ref);
+	_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 	write_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 }
 
 
-static int status_change_set(unsigned long mask, atomic_t *status)
+/**
+ * zfcp_erp_lun_shutdown - Shutdown LUN
+ * @sdev: SCSI device / LUN to shut down.
+ * @clear: Status flags to clear.
+ * @id: Id for debug trace event.
+ * @ref: Reference for debug trace event.
+ */
+void zfcp_erp_lun_shutdown(struct scsi_device *sdev, int clear, char *id,
+			   void *ref)
 {
 {
-	return (atomic_read(status) ^ mask) & mask;
+	int flags = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+	zfcp_erp_lun_reopen(sdev, clear | flags, id, ref);
 }
 }
 
 
-static int status_change_clear(unsigned long mask, atomic_t *status)
+/**
+ * zfcp_erp_lun_shutdown_wait - Shutdown LUN and wait for erp completion
+ * @sdev: SCSI device / LUN to shut down.
+ * @id: Id for debug trace event.
+ *
+ * Do not acquire a reference for the LUN when creating the ERP
+ * action. It is safe, because this function waits for the ERP to
+ * complete first. This allows to shutdown the LUN, even when the SCSI
+ * device is in the state SDEV_DEL when scsi_device_get will fail.
+ */
+void zfcp_erp_lun_shutdown_wait(struct scsi_device *sdev, char *id)
 {
 {
-	return atomic_read(status) & mask;
+	unsigned long flags;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+	struct zfcp_port *port = zfcp_sdev->port;
+	struct zfcp_adapter *adapter = port->adapter;
+	int clear = ZFCP_STATUS_COMMON_RUNNING | ZFCP_STATUS_COMMON_ERP_FAILED;
+
+	write_lock_irqsave(&adapter->erp_lock, flags);
+	_zfcp_erp_lun_reopen(sdev, clear, id, NULL, ZFCP_STATUS_ERP_NO_REF);
+	write_unlock_irqrestore(&adapter->erp_lock, flags);
+
+	zfcp_erp_wait(adapter);
+}
+
+static int status_change_set(unsigned long mask, atomic_t *status)
+{
+	return (atomic_read(status) ^ mask) & mask;
 }
 }
 
 
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
@@ -467,11 +496,13 @@ static void zfcp_erp_port_unblock(struct zfcp_port *port)
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 }
 
 
-static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
+static void zfcp_erp_lun_unblock(struct scsi_device *sdev)
 {
 {
-	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
-		zfcp_dbf_rec_unit("eruubl1", NULL, unit);
-	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status))
+		zfcp_dbf_rec_lun("erlubl1", NULL, sdev);
+	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &zfcp_sdev->status);
 }
 }
 
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
@@ -559,15 +590,14 @@ static void _zfcp_erp_port_reopen_all(struct zfcp_adapter *adapter,
 	read_unlock(&adapter->port_list_lock);
 	read_unlock(&adapter->port_list_lock);
 }
 }
 
 
-static void _zfcp_erp_unit_reopen_all(struct zfcp_port *port, int clear,
-				      char *id, void *ref)
+static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear,
+				     char *id, void *ref)
 {
 {
-	struct zfcp_unit *unit;
+	struct scsi_device *sdev;
 
 
-	read_lock(&port->unit_list_lock);
-	list_for_each_entry(unit, &port->unit_list, list)
-		_zfcp_erp_unit_reopen(unit, clear, id, ref);
-	read_unlock(&port->unit_list_lock);
+	shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port)
+			_zfcp_erp_lun_reopen(sdev, clear, id, ref, 0);
 }
 }
 
 
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
 static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
@@ -582,8 +612,8 @@ static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act)
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		_zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
 		_zfcp_erp_port_reopen(act->port, 0, "ersff_3", NULL);
 		break;
 		break;
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		_zfcp_erp_unit_reopen(act->unit, 0, "ersff_4", NULL);
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		_zfcp_erp_lun_reopen(act->sdev, 0, "ersff_4", NULL, 0);
 		break;
 		break;
 	}
 	}
 }
 }
@@ -598,7 +628,7 @@ static void zfcp_erp_strategy_followup_success(struct zfcp_erp_action *act)
 		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
 		_zfcp_erp_port_reopen(act->port, 0, "ersfs_2", NULL);
 		break;
 		break;
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
-		_zfcp_erp_unit_reopen_all(act->port, 0, "ersfs_3", NULL);
+		_zfcp_erp_lun_reopen_all(act->port, 0, "ersfs_3", NULL);
 		break;
 		break;
 	}
 	}
 }
 }
@@ -742,9 +772,8 @@ static void zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *act)
 	zfcp_fsf_req_dismiss_all(adapter);
 	zfcp_fsf_req_dismiss_all(adapter);
 	adapter->fsf_req_seq_no = 0;
 	adapter->fsf_req_seq_no = 0;
 	zfcp_fc_wka_ports_force_offline(adapter->gs);
 	zfcp_fc_wka_ports_force_offline(adapter->gs);
-	/* all ports and units are closed */
-	zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
-				       ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
+	/* all ports and LUNs are closed */
+	zfcp_erp_clear_adapter_status(adapter, ZFCP_STATUS_COMMON_OPEN);
 
 
 	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 	atomic_clear_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK |
 			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
 			  ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED, &adapter->status);
@@ -861,7 +890,7 @@ static int zfcp_erp_open_ptp_port(struct zfcp_erp_action *act)
 	struct zfcp_port *port = act->port;
 	struct zfcp_port *port = act->port;
 
 
 	if (port->wwpn != adapter->peer_wwpn) {
 	if (port->wwpn != adapter->peer_wwpn) {
-		zfcp_erp_port_failed(port, "eroptp1", NULL);
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
 		return ZFCP_ERP_FAILED;
 		return ZFCP_ERP_FAILED;
 	}
 	}
 	port->d_id = adapter->peer_d_id;
 	port->d_id = adapter->peer_d_id;
@@ -933,82 +962,87 @@ close_init_done:
 	return zfcp_erp_port_strategy_open_common(erp_action);
 	return zfcp_erp_port_strategy_open_common(erp_action);
 }
 }
 
 
-static void zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *unit)
+static void zfcp_erp_lun_strategy_clearstati(struct scsi_device *sdev)
 {
 {
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
 	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
 	atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
-			  ZFCP_STATUS_UNIT_SHARED |
-			  ZFCP_STATUS_UNIT_READONLY,
-			  &unit->status);
+			  ZFCP_STATUS_LUN_SHARED | ZFCP_STATUS_LUN_READONLY,
+			  &zfcp_sdev->status);
 }
 }
 
 
-static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_lun_strategy_close(struct zfcp_erp_action *erp_action)
 {
 {
-	int retval = zfcp_fsf_close_unit(erp_action);
+	int retval = zfcp_fsf_close_lun(erp_action);
 	if (retval == -ENOMEM)
 	if (retval == -ENOMEM)
 		return ZFCP_ERP_NOMEM;
 		return ZFCP_ERP_NOMEM;
-	erp_action->step = ZFCP_ERP_STEP_UNIT_CLOSING;
+	erp_action->step = ZFCP_ERP_STEP_LUN_CLOSING;
 	if (retval)
 	if (retval)
 		return ZFCP_ERP_FAILED;
 		return ZFCP_ERP_FAILED;
 	return ZFCP_ERP_CONTINUES;
 	return ZFCP_ERP_CONTINUES;
 }
 }
 
 
-static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_lun_strategy_open(struct zfcp_erp_action *erp_action)
 {
 {
-	int retval = zfcp_fsf_open_unit(erp_action);
+	int retval = zfcp_fsf_open_lun(erp_action);
 	if (retval == -ENOMEM)
 	if (retval == -ENOMEM)
 		return ZFCP_ERP_NOMEM;
 		return ZFCP_ERP_NOMEM;
-	erp_action->step = ZFCP_ERP_STEP_UNIT_OPENING;
+	erp_action->step = ZFCP_ERP_STEP_LUN_OPENING;
 	if (retval)
 	if (retval)
 		return  ZFCP_ERP_FAILED;
 		return  ZFCP_ERP_FAILED;
 	return ZFCP_ERP_CONTINUES;
 	return ZFCP_ERP_CONTINUES;
 }
 }
 
 
-static int zfcp_erp_unit_strategy(struct zfcp_erp_action *erp_action)
+static int zfcp_erp_lun_strategy(struct zfcp_erp_action *erp_action)
 {
 {
-	struct zfcp_unit *unit = erp_action->unit;
+	struct scsi_device *sdev = erp_action->sdev;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
 
 	switch (erp_action->step) {
 	switch (erp_action->step) {
 	case ZFCP_ERP_STEP_UNINITIALIZED:
 	case ZFCP_ERP_STEP_UNINITIALIZED:
-		zfcp_erp_unit_strategy_clearstati(unit);
-		if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
-			return zfcp_erp_unit_strategy_close(erp_action);
+		zfcp_erp_lun_strategy_clearstati(sdev);
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
+			return zfcp_erp_lun_strategy_close(erp_action);
 		/* already closed, fall through */
 		/* already closed, fall through */
-	case ZFCP_ERP_STEP_UNIT_CLOSING:
-		if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
+	case ZFCP_ERP_STEP_LUN_CLOSING:
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
 			return ZFCP_ERP_FAILED;
 			return ZFCP_ERP_FAILED;
 		if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
 		if (erp_action->status & ZFCP_STATUS_ERP_CLOSE_ONLY)
 			return ZFCP_ERP_EXIT;
 			return ZFCP_ERP_EXIT;
-		return zfcp_erp_unit_strategy_open(erp_action);
+		return zfcp_erp_lun_strategy_open(erp_action);
 
 
-	case ZFCP_ERP_STEP_UNIT_OPENING:
-		if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_OPEN)
+	case ZFCP_ERP_STEP_LUN_OPENING:
+		if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_OPEN)
 			return ZFCP_ERP_SUCCEEDED;
 			return ZFCP_ERP_SUCCEEDED;
 	}
 	}
 	return ZFCP_ERP_FAILED;
 	return ZFCP_ERP_FAILED;
 }
 }
 
 
-static int zfcp_erp_strategy_check_unit(struct zfcp_unit *unit, int result)
+static int zfcp_erp_strategy_check_lun(struct scsi_device *sdev, int result)
 {
 {
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
 	switch (result) {
 	switch (result) {
 	case ZFCP_ERP_SUCCEEDED :
 	case ZFCP_ERP_SUCCEEDED :
-		atomic_set(&unit->erp_counter, 0);
-		zfcp_erp_unit_unblock(unit);
+		atomic_set(&zfcp_sdev->erp_counter, 0);
+		zfcp_erp_lun_unblock(sdev);
 		break;
 		break;
 	case ZFCP_ERP_FAILED :
 	case ZFCP_ERP_FAILED :
-		atomic_inc(&unit->erp_counter);
-		if (atomic_read(&unit->erp_counter) > ZFCP_MAX_ERPS) {
-			dev_err(&unit->port->adapter->ccw_device->dev,
-				"ERP failed for unit 0x%016Lx on "
+		atomic_inc(&zfcp_sdev->erp_counter);
+		if (atomic_read(&zfcp_sdev->erp_counter) > ZFCP_MAX_ERPS) {
+			dev_err(&zfcp_sdev->port->adapter->ccw_device->dev,
+				"ERP failed for LUN 0x%016Lx on "
 				"port 0x%016Lx\n",
 				"port 0x%016Lx\n",
-				(unsigned long long)unit->fcp_lun,
-				(unsigned long long)unit->port->wwpn);
-			zfcp_erp_unit_failed(unit, "erusck1", NULL);
+				(unsigned long long)zfcp_scsi_dev_lun(sdev),
+				(unsigned long long)zfcp_sdev->port->wwpn);
+			zfcp_erp_set_lun_status(sdev,
+						ZFCP_STATUS_COMMON_ERP_FAILED);
 		}
 		}
 		break;
 		break;
 	}
 	}
 
 
-	if (atomic_read(&unit->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
-		zfcp_erp_unit_block(unit, 0);
+	if (atomic_read(&zfcp_sdev->status) & ZFCP_STATUS_COMMON_ERP_FAILED) {
+		zfcp_erp_lun_block(sdev, 0);
 		result = ZFCP_ERP_EXIT;
 		result = ZFCP_ERP_EXIT;
 	}
 	}
 	return result;
 	return result;
@@ -1032,7 +1066,8 @@ static int zfcp_erp_strategy_check_port(struct zfcp_port *port, int result)
 			dev_err(&port->adapter->ccw_device->dev,
 			dev_err(&port->adapter->ccw_device->dev,
 				"ERP failed for remote port 0x%016Lx\n",
 				"ERP failed for remote port 0x%016Lx\n",
 				(unsigned long long)port->wwpn);
 				(unsigned long long)port->wwpn);
-			zfcp_erp_port_failed(port, "erpsck1", NULL);
+			zfcp_erp_set_port_status(port,
+					 ZFCP_STATUS_COMMON_ERP_FAILED);
 		}
 		}
 		break;
 		break;
 	}
 	}
@@ -1059,7 +1094,8 @@ static int zfcp_erp_strategy_check_adapter(struct zfcp_adapter *adapter,
 			dev_err(&adapter->ccw_device->dev,
 			dev_err(&adapter->ccw_device->dev,
 				"ERP cannot recover an error "
 				"ERP cannot recover an error "
 				"on the FCP device\n");
 				"on the FCP device\n");
-			zfcp_erp_adapter_failed(adapter, "erasck1", NULL);
+			zfcp_erp_set_adapter_status(adapter,
+					    ZFCP_STATUS_COMMON_ERP_FAILED);
 		}
 		}
 		break;
 		break;
 	}
 	}
@@ -1076,12 +1112,12 @@ static int zfcp_erp_strategy_check_target(struct zfcp_erp_action *erp_action,
 {
 {
 	struct zfcp_adapter *adapter = erp_action->adapter;
 	struct zfcp_adapter *adapter = erp_action->adapter;
 	struct zfcp_port *port = erp_action->port;
 	struct zfcp_port *port = erp_action->port;
-	struct zfcp_unit *unit = erp_action->unit;
+	struct scsi_device *sdev = erp_action->sdev;
 
 
 	switch (erp_action->action) {
 	switch (erp_action->action) {
 
 
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		result = zfcp_erp_strategy_check_unit(unit, result);
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		result = zfcp_erp_strategy_check_lun(sdev, result);
 		break;
 		break;
 
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1116,7 +1152,8 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 	int action = act->action;
 	int action = act->action;
 	struct zfcp_adapter *adapter = act->adapter;
 	struct zfcp_adapter *adapter = act->adapter;
 	struct zfcp_port *port = act->port;
 	struct zfcp_port *port = act->port;
-	struct zfcp_unit *unit = act->unit;
+	struct scsi_device *sdev = act->sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
 	u32 erp_status = act->status;
 	u32 erp_status = act->status;
 
 
 	switch (action) {
 	switch (action) {
@@ -1139,11 +1176,12 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 		}
 		}
 		break;
 		break;
 
 
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		if (zfcp_erp_strat_change_det(&unit->status, erp_status)) {
-			_zfcp_erp_unit_reopen(unit,
-					      ZFCP_STATUS_COMMON_ERP_FAILED,
-					      "ersscg3", NULL);
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		if (zfcp_erp_strat_change_det(&zfcp_sdev->status, erp_status)) {
+			_zfcp_erp_lun_reopen(sdev,
+					     ZFCP_STATUS_COMMON_ERP_FAILED,
+					     "ersscg3", NULL, 0);
 			return ZFCP_ERP_EXIT;
 			return ZFCP_ERP_EXIT;
 		}
 		}
 		break;
 		break;
@@ -1154,6 +1192,7 @@ static int zfcp_erp_strategy_statechange(struct zfcp_erp_action *act, int ret)
 static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
 static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
 {
 {
 	struct zfcp_adapter *adapter = erp_action->adapter;
 	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_scsi_dev *zfcp_sdev;
 
 
 	adapter->erp_total_count--;
 	adapter->erp_total_count--;
 	if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
 	if (erp_action->status & ZFCP_STATUS_ERP_LOWMEM) {
@@ -1165,9 +1204,10 @@ static void zfcp_erp_action_dequeue(struct zfcp_erp_action *erp_action)
 	zfcp_dbf_rec_action("eractd1", erp_action);
 	zfcp_dbf_rec_action("eractd1", erp_action);
 
 
 	switch (erp_action->action) {
 	switch (erp_action->action) {
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		zfcp_sdev = sdev_to_zfcp(erp_action->sdev);
 		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
 		atomic_clear_mask(ZFCP_STATUS_COMMON_ERP_INUSE,
-				  &erp_action->unit->status);
+				  &zfcp_sdev->status);
 		break;
 		break;
 
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
 	case ZFCP_ERP_ACTION_REOPEN_PORT_FORCED:
@@ -1187,11 +1227,12 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result)
 {
 {
 	struct zfcp_adapter *adapter = act->adapter;
 	struct zfcp_adapter *adapter = act->adapter;
 	struct zfcp_port *port = act->port;
 	struct zfcp_port *port = act->port;
-	struct zfcp_unit *unit = act->unit;
+	struct scsi_device *sdev = act->sdev;
 
 
 	switch (act->action) {
 	switch (act->action) {
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		put_device(&unit->dev);
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		if (!(act->status & ZFCP_STATUS_ERP_NO_REF))
+			scsi_device_put(sdev);
 		break;
 		break;
 
 
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
@@ -1222,8 +1263,8 @@ static int zfcp_erp_strategy_do_action(struct zfcp_erp_action *erp_action)
 		return zfcp_erp_port_forced_strategy(erp_action);
 		return zfcp_erp_port_forced_strategy(erp_action);
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 	case ZFCP_ERP_ACTION_REOPEN_PORT:
 		return zfcp_erp_port_strategy(erp_action);
 		return zfcp_erp_port_strategy(erp_action);
-	case ZFCP_ERP_ACTION_REOPEN_UNIT:
-		return zfcp_erp_unit_strategy(erp_action);
+	case ZFCP_ERP_ACTION_REOPEN_LUN:
+		return zfcp_erp_lun_strategy(erp_action);
 	}
 	}
 	return ZFCP_ERP_FAILED;
 	return ZFCP_ERP_FAILED;
 }
 }
@@ -1375,42 +1416,6 @@ void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
 	WARN_ON(!list_empty(&adapter->erp_running_head));
 	WARN_ON(!list_empty(&adapter->erp_running_head));
 }
 }
 
 
-/**
- * zfcp_erp_adapter_failed - Set adapter status to failed.
- * @adapter: Failed adapter.
- * @id: Event id for debug trace.
- * @ref: Reference for debug trace.
- */
-void zfcp_erp_adapter_failed(struct zfcp_adapter *adapter, char *id, void *ref)
-{
-	zfcp_erp_modify_adapter_status(adapter, id, ref,
-				       ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-}
-
-/**
- * zfcp_erp_port_failed - Set port status to failed.
- * @port: Failed port.
- * @id: Event id for debug trace.
- * @ref: Reference for debug trace.
- */
-void zfcp_erp_port_failed(struct zfcp_port *port, char *id, void *ref)
-{
-	zfcp_erp_modify_port_status(port, id, ref,
-				    ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-}
-
-/**
- * zfcp_erp_unit_failed - Set unit status to failed.
- * @unit: Failed unit.
- * @id: Event id for debug trace.
- * @ref: Reference for debug trace.
- */
-void zfcp_erp_unit_failed(struct zfcp_unit *unit, char *id, void *ref)
-{
-	zfcp_erp_modify_unit_status(unit, id, ref,
-				    ZFCP_STATUS_COMMON_ERP_FAILED, ZFCP_SET);
-}
-
 /**
 /**
  * zfcp_erp_wait - wait for completion of error recovery on an adapter
  * zfcp_erp_wait - wait for completion of error recovery on an adapter
  * @adapter: adapter for which to wait for completion of its error recovery
  * @adapter: adapter for which to wait for completion of its error recovery
@@ -1423,210 +1428,148 @@ void zfcp_erp_wait(struct zfcp_adapter *adapter)
 }
 }
 
 
 /**
 /**
- * zfcp_erp_modify_adapter_status - change adapter status bits
+ * zfcp_erp_set_adapter_status - set adapter status bits
  * @adapter: adapter to change the status
  * @adapter: adapter to change the status
- * @id: id for the debug trace
- * @ref: reference for the debug trace
  * @mask: status bits to change
  * @mask: status bits to change
- * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
  *
  *
- * Changes in common status bits are propagated to attached ports and units.
+ * Changes in common status bits are propagated to attached ports and LUNs.
  */
  */
-void zfcp_erp_modify_adapter_status(struct zfcp_adapter *adapter, char *id,
-				    void *ref, u32 mask, int set_or_clear)
+void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask)
 {
 {
 	struct zfcp_port *port;
 	struct zfcp_port *port;
+	struct scsi_device *sdev;
 	unsigned long flags;
 	unsigned long flags;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 
 
-	if (set_or_clear == ZFCP_SET) {
-		if (status_change_set(mask, &adapter->status))
-			zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
-		atomic_set_mask(mask, &adapter->status);
-	} else {
-		if (status_change_clear(mask, &adapter->status))
-			zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
-		atomic_clear_mask(mask, &adapter->status);
-		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
-			atomic_set(&adapter->erp_counter, 0);
-	}
+	atomic_set_mask(mask, &adapter->status);
 
 
-	if (common_mask) {
-		read_lock_irqsave(&adapter->port_list_lock, flags);
-		list_for_each_entry(port, &adapter->port_list, list)
-			zfcp_erp_modify_port_status(port, id, ref, common_mask,
-						    set_or_clear);
-		read_unlock_irqrestore(&adapter->port_list_lock, flags);
-	}
+	if (!common_mask)
+		return;
+
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list)
+		atomic_set_mask(common_mask, &port->status);
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+
+	shost_for_each_device(sdev, adapter->scsi_host)
+		atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status);
 }
 }
 
 
 /**
 /**
- * zfcp_erp_modify_port_status - change port status bits
- * @port: port to change the status bits
- * @id: id for the debug trace
- * @ref: reference for the debug trace
+ * zfcp_erp_clear_adapter_status - clear adapter status bits
+ * @adapter: adapter to change the status
  * @mask: status bits to change
  * @mask: status bits to change
- * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
  *
  *
- * Changes in common status bits are propagated to attached units.
+ * Changes in common status bits are propagated to attached ports and LUNs.
  */
  */
-void zfcp_erp_modify_port_status(struct zfcp_port *port, char *id, void *ref,
-				 u32 mask, int set_or_clear)
+void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask)
 {
 {
-	struct zfcp_unit *unit;
+	struct zfcp_port *port;
+	struct scsi_device *sdev;
 	unsigned long flags;
 	unsigned long flags;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
+
+	atomic_clear_mask(mask, &adapter->status);
+
+	if (!common_mask)
+		return;
+
+	if (clear_counter)
+		atomic_set(&adapter->erp_counter, 0);
 
 
-	if (set_or_clear == ZFCP_SET) {
-		if (status_change_set(mask, &port->status))
-			zfcp_dbf_rec_port(id, ref, port);
-		atomic_set_mask(mask, &port->status);
-	} else {
-		if (status_change_clear(mask, &port->status))
-			zfcp_dbf_rec_port(id, ref, port);
-		atomic_clear_mask(mask, &port->status);
-		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+	read_lock_irqsave(&adapter->port_list_lock, flags);
+	list_for_each_entry(port, &adapter->port_list, list) {
+		atomic_clear_mask(common_mask, &port->status);
+		if (clear_counter)
 			atomic_set(&port->erp_counter, 0);
 			atomic_set(&port->erp_counter, 0);
 	}
 	}
+	read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
 
-	if (common_mask) {
-		read_lock_irqsave(&port->unit_list_lock, flags);
-		list_for_each_entry(unit, &port->unit_list, list)
-			zfcp_erp_modify_unit_status(unit, id, ref, common_mask,
-						    set_or_clear);
-		read_unlock_irqrestore(&port->unit_list_lock, flags);
+	shost_for_each_device(sdev, adapter->scsi_host) {
+		atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status);
+		if (clear_counter)
+			atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
 	}
 	}
 }
 }
 
 
 /**
 /**
- * zfcp_erp_modify_unit_status - change unit status bits
- * @unit: unit to change the status bits
- * @id: id for the debug trace
- * @ref: reference for the debug trace
+ * zfcp_erp_set_port_status - set port status bits
+ * @port: port to change the status
  * @mask: status bits to change
  * @mask: status bits to change
- * @set_or_clear: ZFCP_SET or ZFCP_CLEAR
- */
-void zfcp_erp_modify_unit_status(struct zfcp_unit *unit, char *id, void *ref,
-				 u32 mask, int set_or_clear)
-{
-	if (set_or_clear == ZFCP_SET) {
-		if (status_change_set(mask, &unit->status))
-			zfcp_dbf_rec_unit(id, ref, unit);
-		atomic_set_mask(mask, &unit->status);
-	} else {
-		if (status_change_clear(mask, &unit->status))
-			zfcp_dbf_rec_unit(id, ref, unit);
-		atomic_clear_mask(mask, &unit->status);
-		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
-			atomic_set(&unit->erp_counter, 0);
-		}
-	}
-}
-
-/**
- * zfcp_erp_port_boxed - Mark port as "boxed" and start ERP
- * @port: The "boxed" port.
- * @id: The debug trace id.
- * @id: Reference for the debug trace.
+ *
+ * Changes in common status bits are propagated to attached LUNs.
  */
  */
-void zfcp_erp_port_boxed(struct zfcp_port *port, char *id, void *ref)
+void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask)
 {
 {
-	zfcp_erp_modify_port_status(port, id, ref,
-				    ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
-	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
-}
+	struct scsi_device *sdev;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
 
 
-/**
- * zfcp_erp_unit_boxed - Mark unit as "boxed" and start ERP
- * @port: The "boxed" unit.
- * @id: The debug trace id.
- * @id: Reference for the debug trace.
- */
-void zfcp_erp_unit_boxed(struct zfcp_unit *unit, char *id, void *ref)
-{
-	zfcp_erp_modify_unit_status(unit, id, ref,
-				    ZFCP_STATUS_COMMON_ACCESS_BOXED, ZFCP_SET);
-	zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
-}
+	atomic_set_mask(mask, &port->status);
 
 
-/**
- * zfcp_erp_port_access_denied - Adapter denied access to port.
- * @port: port where access has been denied
- * @id: id for debug trace
- * @ref: reference for debug trace
- *
- * Since the adapter has denied access, stop using the port and the
- * attached units.
- */
-void zfcp_erp_port_access_denied(struct zfcp_port *port, char *id, void *ref)
-{
-	zfcp_erp_modify_port_status(port, id, ref,
-				    ZFCP_STATUS_COMMON_ERP_FAILED |
-				    ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
+	if (!common_mask)
+		return;
+
+	shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port)
+			atomic_set_mask(common_mask,
+					&sdev_to_zfcp(sdev)->status);
 }
 }
 
 
 /**
 /**
- * zfcp_erp_unit_access_denied - Adapter denied access to unit.
- * @unit: unit where access has been denied
- * @id: id for debug trace
- * @ref: reference for debug trace
+ * zfcp_erp_clear_port_status - clear port status bits
+ * @port: adapter to change the status
+ * @mask: status bits to change
  *
  *
- * Since the adapter has denied access, stop using the unit.
+ * Changes in common status bits are propagated to attached LUNs.
  */
  */
-void zfcp_erp_unit_access_denied(struct zfcp_unit *unit, char *id, void *ref)
+void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask)
 {
 {
-	zfcp_erp_modify_unit_status(unit, id, ref,
-				    ZFCP_STATUS_COMMON_ERP_FAILED |
-				    ZFCP_STATUS_COMMON_ACCESS_DENIED, ZFCP_SET);
-}
+	struct scsi_device *sdev;
+	u32 common_mask = mask & ZFCP_COMMON_FLAGS;
+	u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED;
 
 
-static void zfcp_erp_unit_access_changed(struct zfcp_unit *unit, char *id,
-					 void *ref)
-{
-	int status = atomic_read(&unit->status);
-	if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
-			ZFCP_STATUS_COMMON_ACCESS_BOXED)))
+	atomic_clear_mask(mask, &port->status);
+
+	if (!common_mask)
 		return;
 		return;
 
 
-	zfcp_erp_unit_reopen(unit, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
+	if (clear_counter)
+		atomic_set(&port->erp_counter, 0);
+
+	shost_for_each_device(sdev, port->adapter->scsi_host)
+		if (sdev_to_zfcp(sdev)->port == port) {
+			atomic_clear_mask(common_mask,
+					  &sdev_to_zfcp(sdev)->status);
+			if (clear_counter)
+				atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0);
+		}
 }
 }
 
 
-static void zfcp_erp_port_access_changed(struct zfcp_port *port, char *id,
-					 void *ref)
+/**
+ * zfcp_erp_set_lun_status - set lun status bits
+ * @sdev: SCSI device / lun to set the status bits
+ * @mask: status bits to change
+ */
+void zfcp_erp_set_lun_status(struct scsi_device *sdev, u32 mask)
 {
 {
-	struct zfcp_unit *unit;
-	unsigned long flags;
-	int status = atomic_read(&port->status);
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
 
-	if (!(status & (ZFCP_STATUS_COMMON_ACCESS_DENIED |
-			ZFCP_STATUS_COMMON_ACCESS_BOXED))) {
-		read_lock_irqsave(&port->unit_list_lock, flags);
-		list_for_each_entry(unit, &port->unit_list, list)
-				    zfcp_erp_unit_access_changed(unit, id, ref);
-		read_unlock_irqrestore(&port->unit_list_lock, flags);
-		return;
-	}
-
-	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, id, ref);
+	atomic_set_mask(mask, &zfcp_sdev->status);
 }
 }
 
 
 /**
 /**
- * zfcp_erp_adapter_access_changed - Process change in adapter ACT
- * @adapter: Adapter where the Access Control Table (ACT) changed
- * @id: Id for debug trace
- * @ref: Reference for debug trace
+ * zfcp_erp_clear_lun_status - clear lun status bits
+ * @sdev: SCSi device / lun to clear the status bits
+ * @mask: status bits to change
  */
  */
-void zfcp_erp_adapter_access_changed(struct zfcp_adapter *adapter, char *id,
-				     void *ref)
+void zfcp_erp_clear_lun_status(struct scsi_device *sdev, u32 mask)
 {
 {
-	unsigned long flags;
-	struct zfcp_port *port;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 
 
-	if (adapter->connection_features & FSF_FEATURE_NPIV_MODE)
-		return;
+	atomic_clear_mask(mask, &zfcp_sdev->status);
 
 
-	read_lock_irqsave(&adapter->port_list_lock, flags);
-	list_for_each_entry(port, &adapter->port_list, list)
-		zfcp_erp_port_access_changed(port, id, ref);
-	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
+		atomic_set(&zfcp_sdev->erp_counter, 0);
 }
 }
+

+ 33 - 30
drivers/s390/scsi/zfcp_ext.h

@@ -15,12 +15,10 @@
 #include "zfcp_fc.h"
 #include "zfcp_fc.h"
 
 
 /* zfcp_aux.c */
 /* zfcp_aux.c */
-extern struct zfcp_unit *zfcp_get_unit_by_lun(struct zfcp_port *, u64);
 extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
 extern struct zfcp_port *zfcp_get_port_by_wwpn(struct zfcp_adapter *, u64);
 extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
 extern struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *);
 extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
 extern struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *, u64, u32,
 					   u32);
 					   u32);
-extern struct zfcp_unit *zfcp_unit_enqueue(struct zfcp_port *, u64);
 extern void zfcp_sg_free_table(struct scatterlist *, int);
 extern void zfcp_sg_free_table(struct scatterlist *, int);
 extern int zfcp_sg_setup_table(struct scatterlist *, int);
 extern int zfcp_sg_setup_table(struct scatterlist *, int);
 extern void zfcp_device_unregister(struct device *,
 extern void zfcp_device_unregister(struct device *,
@@ -36,6 +34,14 @@ extern void zfcp_ccw_adapter_put(struct zfcp_adapter *);
 
 
 /* zfcp_cfdc.c */
 /* zfcp_cfdc.c */
 extern struct miscdevice zfcp_cfdc_misc;
 extern struct miscdevice zfcp_cfdc_misc;
+extern void zfcp_cfdc_port_denied(struct zfcp_port *, union fsf_status_qual *);
+extern void zfcp_cfdc_lun_denied(struct scsi_device *, union fsf_status_qual *);
+extern void zfcp_cfdc_lun_shrng_vltn(struct scsi_device *,
+				     union fsf_status_qual *);
+extern int zfcp_cfdc_open_lun_eval(struct scsi_device *,
+				   struct fsf_qtcb_bottom_support *);
+extern void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *);
+
 
 
 /* zfcp_dbf.c */
 /* zfcp_dbf.c */
 extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
 extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
@@ -44,10 +50,10 @@ extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
 extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
 extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
 extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
 extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
 extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
 extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
-extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *);
+extern void zfcp_dbf_rec_lun(char *, void *, struct scsi_device *);
 extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
 extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
 				 struct zfcp_adapter *, struct zfcp_port *,
 				 struct zfcp_adapter *, struct zfcp_port *,
-				 struct zfcp_unit *);
+				 struct scsi_device *);
 extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
 extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
 extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
 extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
 				       struct zfcp_dbf *);
 				       struct zfcp_dbf *);
@@ -65,34 +71,26 @@ extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
 			   unsigned long);
 			   unsigned long);
 
 
 /* zfcp_erp.c */
 /* zfcp_erp.c */
-extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
-					   void *, u32, int);
+extern void zfcp_erp_set_adapter_status(struct zfcp_adapter *, u32);
+extern void zfcp_erp_clear_adapter_status(struct zfcp_adapter *, u32);
 extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
 extern void zfcp_erp_adapter_reopen(struct zfcp_adapter *, int, char *, void *);
 extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
 extern void zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int, char *,
 				      void *);
 				      void *);
-extern void zfcp_erp_adapter_failed(struct zfcp_adapter *, char *, void *);
-extern void zfcp_erp_modify_port_status(struct zfcp_port *, char *, void *, u32,
-					int);
+extern void zfcp_erp_set_port_status(struct zfcp_port *, u32);
+extern void zfcp_erp_clear_port_status(struct zfcp_port *, u32);
 extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
 extern int  zfcp_erp_port_reopen(struct zfcp_port *, int, char *, void *);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
 extern void zfcp_erp_port_shutdown(struct zfcp_port *, int, char *, void *);
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
 extern void zfcp_erp_port_forced_reopen(struct zfcp_port *, int, char *,
 					void *);
 					void *);
-extern void zfcp_erp_port_failed(struct zfcp_port *, char *, void *);
-extern void zfcp_erp_modify_unit_status(struct zfcp_unit *, char *, void *, u32,
-					int);
-extern void zfcp_erp_unit_reopen(struct zfcp_unit *, int, char *, void *);
-extern void zfcp_erp_unit_shutdown(struct zfcp_unit *, int, char *, void *);
-extern void zfcp_erp_unit_failed(struct zfcp_unit *, char *, void *);
+extern void zfcp_erp_set_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_clear_lun_status(struct scsi_device *, u32);
+extern void zfcp_erp_lun_reopen(struct scsi_device *, int, char *, void *);
+extern void zfcp_erp_lun_shutdown(struct scsi_device *, int, char *, void *);
+extern void zfcp_erp_lun_shutdown_wait(struct scsi_device *, char *);
 extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
 extern int  zfcp_erp_thread_setup(struct zfcp_adapter *);
 extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
 extern void zfcp_erp_thread_kill(struct zfcp_adapter *);
 extern void zfcp_erp_wait(struct zfcp_adapter *);
 extern void zfcp_erp_wait(struct zfcp_adapter *);
 extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
 extern void zfcp_erp_notify(struct zfcp_erp_action *, unsigned long);
-extern void zfcp_erp_port_boxed(struct zfcp_port *, char *, void *);
-extern void zfcp_erp_unit_boxed(struct zfcp_unit *, char *, void *);
-extern void zfcp_erp_port_access_denied(struct zfcp_port *, char *, void *);
-extern void zfcp_erp_unit_access_denied(struct zfcp_unit *, char *, void *);
-extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
-					    void *);
 extern void zfcp_erp_timeout_handler(unsigned long);
 extern void zfcp_erp_timeout_handler(unsigned long);
 
 
 /* zfcp_fc.c */
 /* zfcp_fc.c */
@@ -118,8 +116,8 @@ extern int zfcp_fsf_open_wka_port(struct zfcp_fc_wka_port *);
 extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
 extern int zfcp_fsf_close_wka_port(struct zfcp_fc_wka_port *);
 extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
 extern int zfcp_fsf_close_port(struct zfcp_erp_action *);
 extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
 extern int zfcp_fsf_close_physical_port(struct zfcp_erp_action *);
-extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
-extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
+extern int zfcp_fsf_open_lun(struct zfcp_erp_action *);
+extern int zfcp_fsf_close_lun(struct zfcp_erp_action *);
 extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
 extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
 extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
 extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
 					      struct fsf_qtcb_bottom_config *);
 					      struct fsf_qtcb_bottom_config *);
@@ -135,12 +133,10 @@ extern int zfcp_fsf_send_ct(struct zfcp_fc_wka_port *, struct zfcp_fsf_ct_els *,
 			    mempool_t *, unsigned int);
 			    mempool_t *, unsigned int);
 extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
 extern int zfcp_fsf_send_els(struct zfcp_adapter *, u32,
 			     struct zfcp_fsf_ct_els *, unsigned int);
 			     struct zfcp_fsf_ct_els *, unsigned int);
-extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
-					  struct scsi_cmnd *);
+extern int zfcp_fsf_fcp_cmnd(struct scsi_cmnd *);
 extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
 extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
-extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
-extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
-						       struct zfcp_unit *);
+extern struct zfcp_fsf_req *zfcp_fsf_fcp_task_mgmt(struct scsi_cmnd *, u8);
+extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_cmnd(struct scsi_cmnd *);
 extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
 extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
 
 
 /* zfcp_qdio.c */
 /* zfcp_qdio.c */
@@ -163,8 +159,6 @@ extern void zfcp_scsi_rport_work(struct work_struct *);
 extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
 extern void zfcp_scsi_schedule_rport_register(struct zfcp_port *);
 extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
 extern void zfcp_scsi_schedule_rport_block(struct zfcp_port *);
 extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
 extern void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *);
-extern void zfcp_scsi_scan(struct zfcp_unit *);
-extern void zfcp_scsi_scan_work(struct work_struct *);
 extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
 extern void zfcp_scsi_set_prot(struct zfcp_adapter *);
 extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
 extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
 
 
@@ -175,4 +169,13 @@ extern struct attribute_group zfcp_sysfs_port_attrs;
 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
 
 
+/* zfcp_unit.c */
+extern int zfcp_unit_add(struct zfcp_port *, u64);
+extern int zfcp_unit_remove(struct zfcp_port *, u64);
+extern struct zfcp_unit *zfcp_unit_find(struct zfcp_port *, u64);
+extern struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit);
+extern void zfcp_unit_scsi_scan(struct zfcp_unit *);
+extern void zfcp_unit_queue_scsi_scan(struct zfcp_port *);
+extern unsigned int zfcp_unit_sdev_status(struct zfcp_unit *);
+
 #endif	/* ZFCP_EXT_H */
 #endif	/* ZFCP_EXT_H */

+ 1 - 1
drivers/s390/scsi/zfcp_fc.c

@@ -365,7 +365,7 @@ void zfcp_fc_port_did_lookup(struct work_struct *work)
 	}
 	}
 
 
 	if (!port->d_id) {
 	if (!port->d_id) {
-		zfcp_erp_port_failed(port, "fcgpn_2", NULL);
+		zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_ERP_FAILED);
 		goto out;
 		goto out;
 	}
 	}
 
 

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 223 - 319
drivers/s390/scsi/zfcp_fsf.c


+ 10 - 8
drivers/s390/scsi/zfcp_qdio.c

@@ -60,13 +60,11 @@ static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
 	unsigned long long now, span;
 	unsigned long long now, span;
 	int used;
 	int used;
 
 
-	spin_lock(&qdio->stat_lock);
 	now = get_clock_monotonic();
 	now = get_clock_monotonic();
 	span = (now - qdio->req_q_time) >> 12;
 	span = (now - qdio->req_q_time) >> 12;
 	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
 	used = QDIO_MAX_BUFFERS_PER_Q - atomic_read(&qdio->req_q_free);
 	qdio->req_q_util += used * span;
 	qdio->req_q_util += used * span;
 	qdio->req_q_time = now;
 	qdio->req_q_time = now;
-	spin_unlock(&qdio->stat_lock);
 }
 }
 
 
 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
@@ -84,7 +82,9 @@ static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 	/* cleanup all SBALs being program-owned now */
 	/* cleanup all SBALs being program-owned now */
 	zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
 	zfcp_qdio_zero_sbals(qdio->req_q, idx, count);
 
 
+	spin_lock_irq(&qdio->stat_lock);
 	zfcp_qdio_account(qdio);
 	zfcp_qdio_account(qdio);
+	spin_unlock_irq(&qdio->stat_lock);
 	atomic_add(count, &qdio->req_q_free);
 	atomic_add(count, &qdio->req_q_free);
 	wake_up(&qdio->req_q_wq);
 	wake_up(&qdio->req_q_wq);
 }
 }
@@ -201,11 +201,11 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
 
 
 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
 static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio)
 {
 {
-	spin_lock_bh(&qdio->req_q_lock);
+	spin_lock_irq(&qdio->req_q_lock);
 	if (atomic_read(&qdio->req_q_free) ||
 	if (atomic_read(&qdio->req_q_free) ||
 	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 	    !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 		return 1;
 		return 1;
-	spin_unlock_bh(&qdio->req_q_lock);
+	spin_unlock_irq(&qdio->req_q_lock);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -223,7 +223,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
 {
 {
 	long ret;
 	long ret;
 
 
-	spin_unlock_bh(&qdio->req_q_lock);
+	spin_unlock_irq(&qdio->req_q_lock);
 	ret = wait_event_interruptible_timeout(qdio->req_q_wq,
 	ret = wait_event_interruptible_timeout(qdio->req_q_wq,
 			       zfcp_qdio_sbal_check(qdio), 5 * HZ);
 			       zfcp_qdio_sbal_check(qdio), 5 * HZ);
 
 
@@ -239,7 +239,7 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio)
 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
 		zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1", NULL);
 	}
 	}
 
 
-	spin_lock_bh(&qdio->req_q_lock);
+	spin_lock_irq(&qdio->req_q_lock);
 	return -EIO;
 	return -EIO;
 }
 }
 
 
@@ -254,7 +254,9 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req)
 	int retval;
 	int retval;
 	u8 sbal_number = q_req->sbal_number;
 	u8 sbal_number = q_req->sbal_number;
 
 
+	spin_lock(&qdio->stat_lock);
 	zfcp_qdio_account(qdio);
 	zfcp_qdio_account(qdio);
+	spin_unlock(&qdio->stat_lock);
 
 
 	retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
 	retval = do_QDIO(qdio->adapter->ccw_device, QDIO_FLAG_SYNC_OUTPUT, 0,
 			 q_req->sbal_first, sbal_number);
 			 q_req->sbal_first, sbal_number);
@@ -328,9 +330,9 @@ void zfcp_qdio_close(struct zfcp_qdio *qdio)
 		return;
 		return;
 
 
 	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
 	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
-	spin_lock_bh(&qdio->req_q_lock);
+	spin_lock_irq(&qdio->req_q_lock);
 	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
 	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
-	spin_unlock_bh(&qdio->req_q_lock);
+	spin_unlock_irq(&qdio->req_q_lock);
 
 
 	wake_up(&qdio->req_q_wq);
 	wake_up(&qdio->req_q_wq);
 
 

+ 55 - 103
drivers/s390/scsi/zfcp_scsi.c

@@ -49,11 +49,12 @@ static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth,
 	return sdev->queue_depth;
 	return sdev->queue_depth;
 }
 }
 
 
-static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
+static void zfcp_scsi_slave_destroy(struct scsi_device *sdev)
 {
 {
-	struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
-	unit->device = NULL;
-	put_device(&unit->dev);
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+
+	zfcp_erp_lun_shutdown_wait(sdev, "scssd_1");
+	put_device(&zfcp_sdev->port->dev);
 }
 }
 
 
 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
@@ -78,23 +79,16 @@ static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
 static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 				  void (*done) (struct scsi_cmnd *))
 				  void (*done) (struct scsi_cmnd *))
 {
 {
-	struct zfcp_unit *unit;
-	struct zfcp_adapter *adapter;
-	int    status, scsi_result, ret;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
 	struct fc_rport *rport = starget_to_rport(scsi_target(scpnt->device));
+	int    status, scsi_result, ret;
 
 
 	/* reset the status for this request */
 	/* reset the status for this request */
 	scpnt->result = 0;
 	scpnt->result = 0;
 	scpnt->host_scribble = NULL;
 	scpnt->host_scribble = NULL;
 	scpnt->scsi_done = done;
 	scpnt->scsi_done = done;
 
 
-	/*
-	 * figure out adapter and target device
-	 * (stored there by zfcp_scsi_slave_alloc)
-	 */
-	adapter = (struct zfcp_adapter *) scpnt->device->host->hostdata[0];
-	unit = scpnt->device->hostdata;
-
 	scsi_result = fc_remote_port_chkready(rport);
 	scsi_result = fc_remote_port_chkready(rport);
 	if (unlikely(scsi_result)) {
 	if (unlikely(scsi_result)) {
 		scpnt->result = scsi_result;
 		scpnt->result = scsi_result;
@@ -103,11 +97,11 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	status = atomic_read(&unit->status);
+	status = atomic_read(&zfcp_sdev->status);
 	if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
 	if (unlikely(status & ZFCP_STATUS_COMMON_ERP_FAILED) &&
-		     !(atomic_read(&unit->port->status) &
+		     !(atomic_read(&zfcp_sdev->port->status) &
 		       ZFCP_STATUS_COMMON_ERP_FAILED)) {
 		       ZFCP_STATUS_COMMON_ERP_FAILED)) {
-		/* only unit access denied, but port is good
+		/* only LUN access denied, but port is good
 		 * not covered by FC transport, have to fail here */
 		 * not covered by FC transport, have to fail here */
 		zfcp_scsi_command_fail(scpnt, DID_ERROR);
 		zfcp_scsi_command_fail(scpnt, DID_ERROR);
 		return 0;
 		return 0;
@@ -115,8 +109,8 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 
 
 	if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
 	if (unlikely(!(status & ZFCP_STATUS_COMMON_UNBLOCKED))) {
 		/* This could be either
 		/* This could be either
-		 * open unit pending: this is temporary, will result in
-		 * 	open unit or ERP_FAILED, so retry command
+		 * open LUN pending: this is temporary, will result in
+		 *	open LUN or ERP_FAILED, so retry command
 		 * call to rport_delete pending: mimic retry from
 		 * call to rport_delete pending: mimic retry from
 		 * 	fc_remote_port_chkready until rport is BLOCKED
 		 * 	fc_remote_port_chkready until rport is BLOCKED
 		 */
 		 */
@@ -124,7 +118,7 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	ret = zfcp_fsf_send_fcp_command_task(unit, scpnt);
+	ret = zfcp_fsf_fcp_cmnd(scpnt);
 	if (unlikely(ret == -EBUSY))
 	if (unlikely(ret == -EBUSY))
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 	else if (unlikely(ret < 0))
 	else if (unlikely(ret < 0))
@@ -133,45 +127,42 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *scpnt,
 	return ret;
 	return ret;
 }
 }
 
 
-static struct zfcp_unit *zfcp_unit_lookup(struct zfcp_adapter *adapter,
-					  unsigned int id, u64 lun)
+static int zfcp_scsi_slave_alloc(struct scsi_device *sdev)
 {
 {
-	unsigned long flags;
+	struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) sdev->host->hostdata[0];
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
 	struct zfcp_port *port;
 	struct zfcp_port *port;
-	struct zfcp_unit *unit = NULL;
+	struct zfcp_unit *unit;
 
 
-	read_lock_irqsave(&adapter->port_list_lock, flags);
-	list_for_each_entry(port, &adapter->port_list, list) {
-		if (!port->rport || (id != port->rport->scsi_target_id))
-			continue;
-		unit = zfcp_get_unit_by_lun(port, lun);
-		if (unit)
-			break;
-	}
-	read_unlock_irqrestore(&adapter->port_list_lock, flags);
+	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
+	if (!port)
+		return -ENXIO;
 
 
-	return unit;
-}
+	unit = zfcp_unit_find(port, zfcp_scsi_dev_lun(sdev));
+	if (unit)
+		put_device(&unit->dev);
 
 
-static int zfcp_scsi_slave_alloc(struct scsi_device *sdp)
-{
-	struct zfcp_adapter *adapter;
-	struct zfcp_unit *unit;
-	u64 lun;
+	if (!unit && !(adapter->connection_features & FSF_FEATURE_NPIV_MODE)) {
+		put_device(&port->dev);
+		return -ENXIO;
+	}
 
 
-	adapter = (struct zfcp_adapter *) sdp->host->hostdata[0];
-	if (!adapter)
-		goto out;
+	zfcp_sdev->port = port;
+	zfcp_sdev->latencies.write.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.write.fabric.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.read.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.read.fabric.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.cmd.channel.min = 0xFFFFFFFF;
+	zfcp_sdev->latencies.cmd.fabric.min = 0xFFFFFFFF;
+	spin_lock_init(&zfcp_sdev->latencies.lock);
 
 
-	int_to_scsilun(sdp->lun, (struct scsi_lun *)&lun);
-	unit = zfcp_unit_lookup(adapter, sdp->id, lun);
-	if (unit) {
-		sdp->hostdata = unit;
-		unit->device = sdp;
-		return 0;
-	}
-out:
-	return -ENXIO;
+	zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_lun_reopen(sdev, 0, "scsla_1", NULL);
+	zfcp_erp_wait(port->adapter);
+
+	return 0;
 }
 }
 
 
 static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
@@ -179,7 +170,6 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 	struct Scsi_Host *scsi_host = scpnt->device->host;
 	struct Scsi_Host *scsi_host = scpnt->device->host;
 	struct zfcp_adapter *adapter =
 	struct zfcp_adapter *adapter =
 		(struct zfcp_adapter *) scsi_host->hostdata[0];
 		(struct zfcp_adapter *) scsi_host->hostdata[0];
-	struct zfcp_unit *unit = scpnt->device->hostdata;
 	struct zfcp_fsf_req *old_req, *abrt_req;
 	struct zfcp_fsf_req *old_req, *abrt_req;
 	unsigned long flags;
 	unsigned long flags;
 	unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
 	unsigned long old_reqid = (unsigned long) scpnt->host_scribble;
@@ -203,7 +193,7 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 	write_unlock_irqrestore(&adapter->abort_lock, flags);
 	write_unlock_irqrestore(&adapter->abort_lock, flags);
 
 
 	while (retry--) {
 	while (retry--) {
-		abrt_req = zfcp_fsf_abort_fcp_command(old_reqid, unit);
+		abrt_req = zfcp_fsf_abort_fcp_cmnd(scpnt);
 		if (abrt_req)
 		if (abrt_req)
 			break;
 			break;
 
 
@@ -238,14 +228,14 @@ static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *scpnt)
 
 
 static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 {
 {
-	struct zfcp_unit *unit = scpnt->device->hostdata;
-	struct zfcp_adapter *adapter = unit->port->adapter;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	struct zfcp_fsf_req *fsf_req = NULL;
 	struct zfcp_fsf_req *fsf_req = NULL;
 	int retval = SUCCESS, ret;
 	int retval = SUCCESS, ret;
 	int retry = 3;
 	int retry = 3;
 
 
 	while (retry--) {
 	while (retry--) {
-		fsf_req = zfcp_fsf_send_fcp_ctm(unit, tm_flags);
+		fsf_req = zfcp_fsf_fcp_task_mgmt(scpnt, tm_flags);
 		if (fsf_req)
 		if (fsf_req)
 			break;
 			break;
 
 
@@ -256,7 +246,7 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 
 
 		if (!(atomic_read(&adapter->status) &
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
 		      ZFCP_STATUS_COMMON_RUNNING)) {
-			zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
+			zfcp_dbf_scsi_devreset("nres", scpnt, tm_flags);
 			return SUCCESS;
 			return SUCCESS;
 		}
 		}
 	}
 	}
@@ -266,10 +256,10 @@ static int zfcp_task_mgmt_function(struct scsi_cmnd *scpnt, u8 tm_flags)
 	wait_for_completion(&fsf_req->completion);
 	wait_for_completion(&fsf_req->completion);
 
 
 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
-		zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
+		zfcp_dbf_scsi_devreset("fail", scpnt, tm_flags);
 		retval = FAILED;
 		retval = FAILED;
 	} else
 	} else
-		zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
+		zfcp_dbf_scsi_devreset("okay", scpnt, tm_flags);
 
 
 	zfcp_fsf_req_free(fsf_req);
 	zfcp_fsf_req_free(fsf_req);
 	return retval;
 	return retval;
@@ -287,8 +277,8 @@ static int zfcp_scsi_eh_target_reset_handler(struct scsi_cmnd *scpnt)
 
 
 static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
 {
 {
-	struct zfcp_unit *unit = scpnt->device->hostdata;
-	struct zfcp_adapter *adapter = unit->port->adapter;
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scpnt->device);
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;
 	int ret;
 	int ret;
 
 
 	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
 	zfcp_erp_adapter_reopen(adapter, 0, "schrh_1", scpnt);
@@ -319,8 +309,8 @@ int zfcp_adapter_scsi_register(struct zfcp_adapter *adapter)
 	}
 	}
 
 
 	/* tell the SCSI stack some characteristics of this adapter */
 	/* tell the SCSI stack some characteristics of this adapter */
-	adapter->scsi_host->max_id = 1;
-	adapter->scsi_host->max_lun = 1;
+	adapter->scsi_host->max_id = 511;
+	adapter->scsi_host->max_lun = 0xFFFFFFFF;
 	adapter->scsi_host->max_channel = 0;
 	adapter->scsi_host->max_channel = 0;
 	adapter->scsi_host->unique_id = dev_id.devno;
 	adapter->scsi_host->unique_id = dev_id.devno;
 	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
 	adapter->scsi_host->max_cmd_len = 16; /* in struct fcp_cmnd */
@@ -534,20 +524,6 @@ static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
 	}
 	}
 }
 }
 
 
-static void zfcp_scsi_queue_unit_register(struct zfcp_port *port)
-{
-	struct zfcp_unit *unit;
-
-	read_lock_irq(&port->unit_list_lock);
-	list_for_each_entry(unit, &port->unit_list, list) {
-		get_device(&unit->dev);
-		if (scsi_queue_work(port->adapter->scsi_host,
-				    &unit->scsi_work) <= 0)
-			put_device(&unit->dev);
-	}
-	read_unlock_irq(&port->unit_list_lock);
-}
-
 static void zfcp_scsi_rport_register(struct zfcp_port *port)
 static void zfcp_scsi_rport_register(struct zfcp_port *port)
 {
 {
 	struct fc_rport_identifiers ids;
 	struct fc_rport_identifiers ids;
@@ -574,7 +550,7 @@ static void zfcp_scsi_rport_register(struct zfcp_port *port)
 	port->rport = rport;
 	port->rport = rport;
 	port->starget_id = rport->scsi_target_id;
 	port->starget_id = rport->scsi_target_id;
 
 
-	zfcp_scsi_queue_unit_register(port);
+	zfcp_unit_queue_scsi_scan(port);
 }
 }
 
 
 static void zfcp_scsi_rport_block(struct zfcp_port *port)
 static void zfcp_scsi_rport_block(struct zfcp_port *port)
@@ -637,29 +613,6 @@ void zfcp_scsi_rport_work(struct work_struct *work)
 	put_device(&port->dev);
 	put_device(&port->dev);
 }
 }
 
 
-/**
- * zfcp_scsi_scan - Register LUN with SCSI midlayer
- * @unit: The LUN/unit to register
- */
-void zfcp_scsi_scan(struct zfcp_unit *unit)
-{
-	struct fc_rport *rport = unit->port->rport;
-
-	if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
-		scsi_scan_target(&rport->dev, 0, rport->scsi_target_id,
-				 scsilun_to_int((struct scsi_lun *)
-						&unit->fcp_lun), 0);
-}
-
-void zfcp_scsi_scan_work(struct work_struct *work)
-{
-	struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
-					      scsi_work);
-
-	zfcp_scsi_scan(unit);
-	put_device(&unit->dev);
-}
-
 /**
 /**
  * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
  * zfcp_scsi_set_prot - Configure DIF/DIX support in scsi_host
  * @adapter: The adapter where to configure DIF/DIX for the SCSI host
  * @adapter: The adapter where to configure DIF/DIX for the SCSI host
@@ -735,7 +688,6 @@ struct fc_function_template zfcp_transport_functions = {
 	.show_host_port_type = 1,
 	.show_host_port_type = 1,
 	.show_host_speed = 1,
 	.show_host_speed = 1,
 	.show_host_port_id = 1,
 	.show_host_port_id = 1,
-	.disable_target_scan = 1,
 	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
 	.dd_bsg_size = sizeof(struct zfcp_fsf_ct_els),
 };
 };
 
 

+ 111 - 110
drivers/s390/scsi/zfcp_sysfs.c

@@ -68,63 +68,96 @@ ZFCP_DEFINE_ATTR(zfcp_port, port, access_denied, "%d\n",
 		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
 		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
 
 
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, status, "0x%08x\n",
-		 atomic_read(&unit->status));
+		 zfcp_unit_sdev_status(unit));
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, in_recovery, "%d\n",
-		 (atomic_read(&unit->status) &
+		 (zfcp_unit_sdev_status(unit) &
 		  ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
 		  ZFCP_STATUS_COMMON_ERP_INUSE) != 0);
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_denied, "%d\n",
-		 (atomic_read(&unit->status) &
+		 (zfcp_unit_sdev_status(unit) &
 		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
 		  ZFCP_STATUS_COMMON_ACCESS_DENIED) != 0);
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_shared, "%d\n",
-		 (atomic_read(&unit->status) &
-		  ZFCP_STATUS_UNIT_SHARED) != 0);
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_LUN_SHARED) != 0);
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
 ZFCP_DEFINE_ATTR(zfcp_unit, unit, access_readonly, "%d\n",
-		 (atomic_read(&unit->status) &
-		  ZFCP_STATUS_UNIT_READONLY) != 0);
+		 (zfcp_unit_sdev_status(unit) &
+		  ZFCP_STATUS_LUN_READONLY) != 0);
 
 
-#define ZFCP_SYSFS_FAILED(_feat_def, _feat, _adapter, _mod_id, _reopen_id)     \
-static ssize_t zfcp_sysfs_##_feat##_failed_show(struct device *dev,	       \
-						struct device_attribute *attr, \
-						char *buf)		       \
-{									       \
-	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
-									       \
-	if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_ERP_FAILED)       \
-		return sprintf(buf, "1\n");				       \
-	else								       \
-		return sprintf(buf, "0\n");				       \
-}									       \
-static ssize_t zfcp_sysfs_##_feat##_failed_store(struct device *dev,	       \
-						 struct device_attribute *attr,\
-						 const char *buf, size_t count)\
-{									       \
-	struct _feat_def *_feat = container_of(dev, struct _feat_def, dev);    \
-	unsigned long val;						       \
-	int retval = 0;							       \
-									       \
-	if (!(_feat && get_device(&_feat->dev)))			       \
-		return -EBUSY;						       \
-									       \
-	if (strict_strtoul(buf, 0, &val) || val != 0) {			       \
-		retval = -EINVAL;					       \
-		goto out;						       \
-	}								       \
-									       \
-	zfcp_erp_modify_##_feat##_status(_feat, _mod_id, NULL,		       \
-					 ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);\
-	zfcp_erp_##_feat##_reopen(_feat, ZFCP_STATUS_COMMON_ERP_FAILED,	       \
-				  _reopen_id, NULL);			       \
-	zfcp_erp_wait(_adapter);					       \
-out:									       \
-	put_device(&_feat->dev);					       \
-	return retval ? retval : (ssize_t) count;			       \
-}									       \
-static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO,			       \
-		     zfcp_sysfs_##_feat##_failed_show,			       \
-		     zfcp_sysfs_##_feat##_failed_store);
+static ssize_t zfcp_sysfs_port_failed_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+
+	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_FAILED)
+		return sprintf(buf, "1\n");
+
+	return sprintf(buf, "0\n");
+}
+
+static ssize_t zfcp_sysfs_port_failed_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
+	unsigned long val;
+
+	if (strict_strtoul(buf, 0, &val) || val != 0)
+		return -EINVAL;
+
+	zfcp_erp_set_port_status(port, ZFCP_STATUS_COMMON_RUNNING);
+	zfcp_erp_port_reopen(port, ZFCP_STATUS_COMMON_ERP_FAILED, "sypfai2",
+			     NULL);
+	zfcp_erp_wait(port->adapter);
 
 
-ZFCP_SYSFS_FAILED(zfcp_port, port, port->adapter, "sypfai1", "sypfai2");
-ZFCP_SYSFS_FAILED(zfcp_unit, unit, unit->port->adapter, "syufai1", "syufai2");
+	return count;
+}
+static ZFCP_DEV_ATTR(port, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_port_failed_show,
+		     zfcp_sysfs_port_failed_store);
+
+static ssize_t zfcp_sysfs_unit_failed_show(struct device *dev,
+					   struct device_attribute *attr,
+					   char *buf)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+	struct scsi_device *sdev;
+	unsigned int status, failed = 1;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		status = atomic_read(&sdev_to_zfcp(sdev)->status);
+		failed = status & ZFCP_STATUS_COMMON_ERP_FAILED ? 1 : 0;
+		scsi_device_put(sdev);
+	}
+
+	return sprintf(buf, "%d\n", failed);
+}
+
+static ssize_t zfcp_sysfs_unit_failed_store(struct device *dev,
+					    struct device_attribute *attr,
+					    const char *buf, size_t count)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+	unsigned long val;
+	struct scsi_device *sdev;
+
+	if (strict_strtoul(buf, 0, &val) || val != 0)
+		return -EINVAL;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		zfcp_erp_set_lun_status(sdev, ZFCP_STATUS_COMMON_RUNNING);
+		zfcp_erp_lun_reopen(sdev, ZFCP_STATUS_COMMON_ERP_FAILED,
+				    "syufai2", NULL);
+		zfcp_erp_wait(unit->port->adapter);
+	} else
+		zfcp_unit_scsi_scan(unit);
+
+	return count;
+}
+static ZFCP_DEV_ATTR(unit, failed, S_IWUSR | S_IRUGO,
+		     zfcp_sysfs_unit_failed_show,
+		     zfcp_sysfs_unit_failed_store);
 
 
 static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
 static ssize_t zfcp_sysfs_adapter_failed_show(struct device *dev,
 					      struct device_attribute *attr,
 					      struct device_attribute *attr,
@@ -163,8 +196,7 @@ static ssize_t zfcp_sysfs_adapter_failed_store(struct device *dev,
 		goto out;
 		goto out;
 	}
 	}
 
 
-	zfcp_erp_modify_adapter_status(adapter, "syafai1", NULL,
-				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
+	zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 				"syafai2", NULL);
 				"syafai2", NULL);
 	zfcp_erp_wait(adapter);
 	zfcp_erp_wait(adapter);
@@ -257,28 +289,15 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
 					 const char *buf, size_t count)
 					 const char *buf, size_t count)
 {
 {
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
-	struct zfcp_unit *unit;
 	u64 fcp_lun;
 	u64 fcp_lun;
-	int retval = -EINVAL;
-
-	if (!(port && get_device(&port->dev)))
-		return -EBUSY;
 
 
 	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
 	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
-		goto out;
+		return -EINVAL;
 
 
-	unit = zfcp_unit_enqueue(port, fcp_lun);
-	if (IS_ERR(unit))
-		goto out;
-	else
-		retval = 0;
+	if (zfcp_unit_add(port, fcp_lun))
+		return -EINVAL;
 
 
-	zfcp_erp_unit_reopen(unit, 0, "syuas_1", NULL);
-	zfcp_erp_wait(unit->port->adapter);
-	zfcp_scsi_scan(unit);
-out:
-	put_device(&port->dev);
-	return retval ? retval : (ssize_t) count;
+	return count;
 }
 }
 static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
 static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
 
 
@@ -287,42 +306,15 @@ static ssize_t zfcp_sysfs_unit_remove_store(struct device *dev,
 					    const char *buf, size_t count)
 					    const char *buf, size_t count)
 {
 {
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
 	struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
-	struct zfcp_unit *unit;
 	u64 fcp_lun;
 	u64 fcp_lun;
-	int retval = -EINVAL;
-	struct scsi_device *sdev;
-
-	if (!(port && get_device(&port->dev)))
-		return -EBUSY;
 
 
 	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
 	if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
-		goto out;
+		return -EINVAL;
 
 
-	unit = zfcp_get_unit_by_lun(port, fcp_lun);
-	if (!unit)
-		goto out;
-	else
-		retval = 0;
-
-	sdev = scsi_device_lookup(port->adapter->scsi_host, 0,
-				  port->starget_id,
-				  scsilun_to_int((struct scsi_lun *)&fcp_lun));
-	if (sdev) {
-		scsi_remove_device(sdev);
-		scsi_device_put(sdev);
-	}
-
-	write_lock_irq(&port->unit_list_lock);
-	list_del(&unit->list);
-	write_unlock_irq(&port->unit_list_lock);
-
-	put_device(&unit->dev);
+	if (zfcp_unit_remove(port, fcp_lun))
+		return -EINVAL;
 
 
-	zfcp_erp_unit_shutdown(unit, 0, "syurs_1", NULL);
-	zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
-out:
-	put_device(&port->dev);
-	return retval ? retval : (ssize_t) count;
+	return count;
 }
 }
 static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
 static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
 
 
@@ -363,9 +355,9 @@ zfcp_sysfs_unit_##_name##_latency_show(struct device *dev,		\
 				       struct device_attribute *attr,	\
 				       struct device_attribute *attr,	\
 				       char *buf) {			\
 				       char *buf) {			\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
-	struct zfcp_unit *unit = sdev->hostdata;			\
-	struct zfcp_latencies *lat = &unit->latencies;			\
-	struct zfcp_adapter *adapter = unit->port->adapter;		\
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		\
+	struct zfcp_latencies *lat = &zfcp_sdev->latencies;		\
+	struct zfcp_adapter *adapter = zfcp_sdev->port->adapter;	\
 	unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc;	\
 	unsigned long long fsum, fmin, fmax, csum, cmin, cmax, cc;	\
 									\
 									\
 	spin_lock_bh(&lat->lock);					\
 	spin_lock_bh(&lat->lock);					\
@@ -394,8 +386,8 @@ zfcp_sysfs_unit_##_name##_latency_store(struct device *dev,		\
 					const char *buf, size_t count)	\
 					const char *buf, size_t count)	\
 {									\
 {									\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
 	struct scsi_device *sdev = to_scsi_device(dev);			\
-	struct zfcp_unit *unit = sdev->hostdata;			\
-	struct zfcp_latencies *lat = &unit->latencies;			\
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		\
+	struct zfcp_latencies *lat = &zfcp_sdev->latencies;		\
 	unsigned long flags;						\
 	unsigned long flags;						\
 									\
 									\
 	spin_lock_irqsave(&lat->lock, flags);				\
 	spin_lock_irqsave(&lat->lock, flags);				\
@@ -423,19 +415,28 @@ static ssize_t zfcp_sysfs_scsi_##_name##_show(struct device *dev,	\
 					      struct device_attribute *attr,\
 					      struct device_attribute *attr,\
 					      char *buf)                 \
 					      char *buf)                 \
 {                                                                        \
 {                                                                        \
-	struct scsi_device *sdev  = to_scsi_device(dev);		 \
-	struct zfcp_unit *unit = sdev->hostdata;			 \
+	struct scsi_device *sdev = to_scsi_device(dev);			 \
+	struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);		 \
+	struct zfcp_port *port = zfcp_sdev->port;			 \
 									 \
 									 \
 	return sprintf(buf, _format, _value);                            \
 	return sprintf(buf, _format, _value);                            \
 }                                                                        \
 }                                                                        \
 static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
 static DEVICE_ATTR(_name, S_IRUGO, zfcp_sysfs_scsi_##_name##_show, NULL);
 
 
 ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
 ZFCP_DEFINE_SCSI_ATTR(hba_id, "%s\n",
-		      dev_name(&unit->port->adapter->ccw_device->dev));
+		      dev_name(&port->adapter->ccw_device->dev));
 ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
 ZFCP_DEFINE_SCSI_ATTR(wwpn, "0x%016llx\n",
-		      (unsigned long long) unit->port->wwpn);
-ZFCP_DEFINE_SCSI_ATTR(fcp_lun, "0x%016llx\n",
-		      (unsigned long long) unit->fcp_lun);
+		      (unsigned long long) port->wwpn);
+
+static ssize_t zfcp_sysfs_scsi_fcp_lun_show(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+
+	return sprintf(buf, "0x%016llx\n", zfcp_scsi_dev_lun(sdev));
+}
+static DEVICE_ATTR(fcp_lun, S_IRUGO, zfcp_sysfs_scsi_fcp_lun_show, NULL);
 
 
 struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
 struct device_attribute *zfcp_sysfs_sdev_attrs[] = {
 	&dev_attr_fcp_lun,
 	&dev_attr_fcp_lun,

+ 244 - 0
drivers/s390/scsi/zfcp_unit.c

@@ -0,0 +1,244 @@
+/*
+ * zfcp device driver
+ *
+ * Tracking of manually configured LUNs and helper functions to
+ * register the LUNs with the SCSI midlayer.
+ *
+ * Copyright IBM Corporation 2010
+ */
+
+#include "zfcp_def.h"
+#include "zfcp_ext.h"
+
+/**
+ * zfcp_unit_scsi_scan - Register LUN with SCSI midlayer
+ * @unit: The zfcp LUN/unit to register
+ *
+ * When the SCSI midlayer is not allowed to automatically scan and
+ * attach SCSI devices, zfcp has to register the single devices with
+ * the SCSI midlayer.
+ */
+void zfcp_unit_scsi_scan(struct zfcp_unit *unit)
+{
+	struct fc_rport *rport = unit->port->rport;
+	unsigned int lun;
+
+	lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+
+	if (rport && rport->port_state == FC_PORTSTATE_ONLINE)
+		scsi_scan_target(&rport->dev, 0, rport->scsi_target_id, lun, 1);
+}
+
+static void zfcp_unit_scsi_scan_work(struct work_struct *work)
+{
+	struct zfcp_unit *unit = container_of(work, struct zfcp_unit,
+					      scsi_work);
+
+	zfcp_unit_scsi_scan(unit);
+	put_device(&unit->dev);
+}
+
+/**
+ * zfcp_unit_queue_scsi_scan - Register configured units on port
+ * @port: The zfcp_port where to register units
+ *
+ * After opening a port, all units configured on this port have to be
+ * registered with the SCSI midlayer. This function should be called
+ * after calling fc_remote_port_add, so that the fc_rport is already
+ * ONLINE and the call to scsi_scan_target runs the same way as the
+ * call in the FC transport class.
+ */
+void zfcp_unit_queue_scsi_scan(struct zfcp_port *port)
+{
+	struct zfcp_unit *unit;
+
+	read_lock_irq(&port->unit_list_lock);
+	list_for_each_entry(unit, &port->unit_list, list) {
+		get_device(&unit->dev);
+		if (scsi_queue_work(port->adapter->scsi_host,
+				    &unit->scsi_work) <= 0)
+			put_device(&unit->dev);
+	}
+	read_unlock_irq(&port->unit_list_lock);
+}
+
+static struct zfcp_unit *_zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+
+	list_for_each_entry(unit, &port->unit_list, list)
+		if (unit->fcp_lun == fcp_lun) {
+			get_device(&unit->dev);
+			return unit;
+		}
+
+	return NULL;
+}
+
+/**
+ * zfcp_unit_find - Find and return zfcp_unit with specified FCP LUN
+ * @port: zfcp_port where to look for the unit
+ * @fcp_lun: 64 Bit FCP LUN used to identify the zfcp_unit
+ *
+ * If zfcp_unit is found, a reference is acquired that has to be
+ * released later.
+ *
+ * Returns: Pointer to the zfcp_unit, or NULL if there is no zfcp_unit
+ *          with the specified FCP LUN.
+ */
+struct zfcp_unit *zfcp_unit_find(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+
+	read_lock_irq(&port->unit_list_lock);
+	unit = _zfcp_unit_find(port, fcp_lun);
+	read_unlock_irq(&port->unit_list_lock);
+	return unit;
+}
+
+/**
+ * zfcp_unit_release - Drop reference to zfcp_port and free memory of zfcp_unit.
+ * @dev: pointer to device in zfcp_unit
+ */
+static void zfcp_unit_release(struct device *dev)
+{
+	struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
+
+	put_device(&unit->port->dev);
+	kfree(unit);
+}
+
+/**
+ * zfcp_unit_enqueue - enqueue unit to unit list of a port.
+ * @port: pointer to port where unit is added
+ * @fcp_lun: FCP LUN of unit to be enqueued
+ * Returns: 0 success
+ *
+ * Sets up some unit internal structures and creates sysfs entry.
+ */
+int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+
+	unit = zfcp_unit_find(port, fcp_lun);
+	if (unit) {
+		put_device(&unit->dev);
+		return -EEXIST;
+	}
+
+	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
+	if (!unit)
+		return -ENOMEM;
+
+	unit->port = port;
+	unit->fcp_lun = fcp_lun;
+	unit->dev.parent = &port->dev;
+	unit->dev.release = zfcp_unit_release;
+	INIT_WORK(&unit->scsi_work, zfcp_unit_scsi_scan_work);
+
+	if (dev_set_name(&unit->dev, "0x%016llx",
+			 (unsigned long long) fcp_lun)) {
+		kfree(unit);
+		return -ENOMEM;
+	}
+
+	if (device_register(&unit->dev)) {
+		put_device(&unit->dev);
+		return -ENOMEM;
+	}
+
+	if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
+		device_unregister(&unit->dev);
+		return -EINVAL;
+	}
+
+	get_device(&port->dev);
+
+	write_lock_irq(&port->unit_list_lock);
+	list_add_tail(&unit->list, &port->unit_list);
+	write_unlock_irq(&port->unit_list_lock);
+
+	zfcp_unit_scsi_scan(unit);
+
+	return 0;
+}
+
+/**
+ * zfcp_unit_sdev - Return SCSI device for zfcp_unit
+ * @unit: The zfcp_unit where to get the SCSI device for
+ *
+ * Returns: scsi_device pointer on success, NULL if there is no SCSI
+ *          device for this zfcp_unit
+ *
+ * On success, the caller also holds a reference to the SCSI device
+ * that must be released with scsi_device_put.
+ */
+struct scsi_device *zfcp_unit_sdev(struct zfcp_unit *unit)
+{
+	struct Scsi_Host *shost;
+	struct zfcp_port *port;
+	unsigned int lun;
+
+	lun = scsilun_to_int((struct scsi_lun *) &unit->fcp_lun);
+	port = unit->port;
+	shost = port->adapter->scsi_host;
+	return scsi_device_lookup(shost, 0, port->starget_id, lun);
+}
+
+/**
+ * zfcp_unit_sdev_status - Return zfcp LUN status for SCSI device
+ * @unit: The unit to lookup the SCSI device for
+ *
+ * Returns the zfcp LUN status field of the SCSI device if the SCSI device
+ * for the zfcp_unit exists, 0 otherwise.
+ */
+unsigned int zfcp_unit_sdev_status(struct zfcp_unit *unit)
+{
+	unsigned int status = 0;
+	struct scsi_device *sdev;
+	struct zfcp_scsi_dev *zfcp_sdev;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		zfcp_sdev = sdev_to_zfcp(sdev);
+		status = atomic_read(&zfcp_sdev->status);
+		scsi_device_put(sdev);
+	}
+
+	return status;
+}
+
+/**
+ * zfcp_unit_remove - Remove entry from list of configured units
+ * @port: The port where to remove the unit from the configuration
+ * @fcp_lun: The 64 bit LUN of the unit to remove
+ *
+ * Returns: -EINVAL if a unit with the specified LUN does not exist,
+ *          0 on success.
+ */
+int zfcp_unit_remove(struct zfcp_port *port, u64 fcp_lun)
+{
+	struct zfcp_unit *unit;
+	struct scsi_device *sdev;
+
+	write_lock_irq(&port->unit_list_lock);
+	unit = _zfcp_unit_find(port, fcp_lun);
+	if (unit)
+		list_del(&unit->list);
+	write_unlock_irq(&port->unit_list_lock);
+
+	if (!unit)
+		return -EINVAL;
+
+	sdev = zfcp_unit_sdev(unit);
+	if (sdev) {
+		scsi_remove_device(sdev);
+		scsi_device_put(sdev);
+	}
+
+	put_device(&unit->dev);
+
+	zfcp_device_unregister(&unit->dev, &zfcp_sysfs_unit_attrs);
+
+	return 0;
+}

+ 3 - 2
drivers/scsi/Kconfig

@@ -316,7 +316,8 @@ config SCSI_ISCSI_ATTRS
 
 
 config SCSI_SAS_ATTRS
 config SCSI_SAS_ATTRS
 	tristate "SAS Transport Attributes"
 	tristate "SAS Transport Attributes"
-	depends on SCSI && BLK_DEV_BSG
+	depends on SCSI
+	select BLK_DEV_BSG
 	help
 	help
 	  If you wish to export transport-specific information about
 	  If you wish to export transport-specific information about
 	  each attached SAS device to sysfs, say Y.
 	  each attached SAS device to sysfs, say Y.
@@ -378,7 +379,7 @@ config ISCSI_BOOT_SYSFS
 	  via sysfs to userspace. If you wish to export this information,
 	  via sysfs to userspace. If you wish to export this information,
 	  say Y. Otherwise, say N.
 	  say Y. Otherwise, say N.
 
 
-source "drivers/scsi/cxgb3i/Kconfig"
+source "drivers/scsi/cxgbi/Kconfig"
 source "drivers/scsi/bnx2i/Kconfig"
 source "drivers/scsi/bnx2i/Kconfig"
 source "drivers/scsi/be2iscsi/Kconfig"
 source "drivers/scsi/be2iscsi/Kconfig"
 
 

+ 2 - 1
drivers/scsi/Makefile

@@ -133,7 +133,8 @@ obj-$(CONFIG_SCSI_HPTIOP)	+= hptiop.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_STEX)		+= stex.o
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas/
 obj-$(CONFIG_SCSI_MVSAS)	+= mvsas/
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
-obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
+obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgbi/
+obj-$(CONFIG_SCSI_CXGB4_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgbi/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
 obj-$(CONFIG_BE2ISCSI)		+= libiscsi.o be2iscsi/
 obj-$(CONFIG_BE2ISCSI)		+= libiscsi.o be2iscsi/
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
 obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o

+ 1 - 1
drivers/scsi/aacraid/commctrl.c

@@ -190,7 +190,7 @@ static int open_getadapter_fib(struct aac_dev * dev, void __user *arg)
 		/*
 		/*
 		 *	Initialize the mutex used to wait for the next AIF.
 		 *	Initialize the mutex used to wait for the next AIF.
 		 */
 		 */
-		init_MUTEX_LOCKED(&fibctx->wait_sem);
+		sema_init(&fibctx->wait_sem, 0);
 		fibctx->wait = 0;
 		fibctx->wait = 0;
 		/*
 		/*
 		 *	Initialize the fibs and set the count of fibs on
 		 *	Initialize the fibs and set the count of fibs on

+ 1 - 1
drivers/scsi/aacraid/commsup.c

@@ -124,7 +124,7 @@ int aac_fib_setup(struct aac_dev * dev)
 		fibptr->hw_fib_va = hw_fib;
 		fibptr->hw_fib_va = hw_fib;
 		fibptr->data = (void *) fibptr->hw_fib_va->data;
 		fibptr->data = (void *) fibptr->hw_fib_va->data;
 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
 		fibptr->next = fibptr+1;	/* Forward chain the fibs */
-		init_MUTEX_LOCKED(&fibptr->event_wait);
+		sema_init(&fibptr->event_wait, 0);
 		spin_lock_init(&fibptr->event_lock);
 		spin_lock_init(&fibptr->event_lock);
 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 		hw_fib->header.XferState = cpu_to_le32(0xffffffff);
 		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);
 		hw_fib->header.SenderSize = cpu_to_le16(dev->max_fib_size);

+ 2 - 2
drivers/scsi/arcmsr/arcmsr_hba.c

@@ -878,8 +878,8 @@ static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb,
 	if (!error) {
 	if (!error) {
 		if (acb->devstate[id][lun] == ARECA_RAID_GONE)
 		if (acb->devstate[id][lun] == ARECA_RAID_GONE)
 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
 			acb->devstate[id][lun] = ARECA_RAID_GOOD;
-			ccb->pcmd->result = DID_OK << 16;
-			arcmsr_ccb_complete(ccb);
+		ccb->pcmd->result = DID_OK << 16;
+		arcmsr_ccb_complete(ccb);
 	}else{
 	}else{
 		switch (ccb->arcmsr_cdb.DeviceStatus) {
 		switch (ccb->arcmsr_cdb.DeviceStatus) {
 		case ARCMSR_DEV_SELECT_TIMEOUT: {
 		case ARCMSR_DEV_SELECT_TIMEOUT: {

+ 1 - 1
drivers/scsi/be2iscsi/be_cmds.c

@@ -335,7 +335,7 @@ static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
 		if (ready)
 		if (ready)
 			break;
 			break;
 
 
-		if (cnt > 6000000) {
+		if (cnt > 12000000) {
 			dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
 			dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
 			return -EBUSY;
 			return -EBUSY;
 		}
 		}

+ 0 - 3
drivers/scsi/be2iscsi/be_iscsi.c

@@ -522,7 +522,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 	if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
 	if (beiscsi_ep->ep_cid > (phba->fw_config.iscsi_cid_start +
 				  phba->params.cxns_per_ctrl * 2)) {
 				  phba->params.cxns_per_ctrl * 2)) {
 		SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
 		SE_DEBUG(DBG_LVL_1, "Failed in allocate iscsi cid\n");
-		beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
 		goto free_ep;
 		goto free_ep;
 	}
 	}
 
 
@@ -559,7 +558,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 		SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
 		SE_DEBUG(DBG_LVL_1, "mgmt_open_connection Failed"
 				    " status = %d extd_status = %d\n",
 				    " status = %d extd_status = %d\n",
 				    status, extd_status);
 				    status, extd_status);
-		beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
 		free_mcc_tag(&phba->ctrl, tag);
 		free_mcc_tag(&phba->ctrl, tag);
 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 			    nonemb_cmd.va, nonemb_cmd.dma);
 			    nonemb_cmd.va, nonemb_cmd.dma);
@@ -574,7 +572,6 @@ static int beiscsi_open_conn(struct iscsi_endpoint *ep,
 		beiscsi_ep->cid_vld = 1;
 		beiscsi_ep->cid_vld = 1;
 		SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
 		SE_DEBUG(DBG_LVL_8, "mgmt_open_connection Success\n");
 	}
 	}
-	beiscsi_put_cid(phba, beiscsi_ep->ep_cid);
 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 	pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
 			    nonemb_cmd.va, nonemb_cmd.dma);
 			    nonemb_cmd.va, nonemb_cmd.dma);
 	return 0;
 	return 0;

+ 1 - 1
drivers/scsi/be2iscsi/be_main.c

@@ -2040,7 +2040,7 @@ hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
 	      unsigned int num_sg, struct beiscsi_io_task *io_task)
 	      unsigned int num_sg, struct beiscsi_io_task *io_task)
 {
 {
 	struct iscsi_sge *psgl;
 	struct iscsi_sge *psgl;
-	unsigned short sg_len, index;
+	unsigned int sg_len, index;
 	unsigned int sge_len = 0;
 	unsigned int sge_len = 0;
 	unsigned long long addr;
 	unsigned long long addr;
 	struct scatterlist *l_sg;
 	struct scatterlist *l_sg;

+ 5 - 12
drivers/scsi/bfa/Makefile

@@ -1,15 +1,8 @@
 obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
 obj-$(CONFIG_SCSI_BFA_FC) := bfa.o
 
 
-bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
-bfa-y += bfad_debugfs.o
-bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
-bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
-bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
-bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
-bfa-y += bfa_csdebug.o bfa_sm.o plog.o
+bfa-y := bfad.o bfad_im.o bfad_attr.o bfad_debugfs.o
+bfa-y += bfa_ioc.o bfa_ioc_cb.o bfa_ioc_ct.o bfa_hw_cb.o bfa_hw_ct.o
+bfa-y += bfa_fcs.o bfa_fcs_lport.o bfa_fcs_rport.o bfa_fcs_fcpim.o bfa_fcbuild.o
+bfa-y += bfa_port.o bfa_fcpim.o bfa_core.o bfa_drv.o bfa_svc.o
 
 
-bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
-bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
-bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
-
-ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
+ccflags-y := -DBFA_PERF_BUILD

+ 438 - 0
drivers/scsi/bfa/bfa.h

@@ -0,0 +1,438 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BFA_H__
+#define __BFA_H__
+
+#include "bfa_os_inc.h"
+#include "bfa_cs.h"
+#include "bfa_plog.h"
+#include "bfa_defs_svc.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+
+struct bfa_s;
+
+typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
+typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
+
+/**
+ * Interrupt message handlers
+ */
+void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
+void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
+
+/**
+ * Request and response queue related defines
+ */
+#define BFA_REQQ_NELEMS_MIN	(4)
+#define BFA_RSPQ_NELEMS_MIN	(4)
+
+#define bfa_reqq_pi(__bfa, __reqq)	((__bfa)->iocfc.req_cq_pi[__reqq])
+#define bfa_reqq_ci(__bfa, __reqq)					\
+	(*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
+
+#define bfa_reqq_full(__bfa, __reqq)				\
+	(((bfa_reqq_pi(__bfa, __reqq) + 1) &			\
+	  ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) ==	\
+	 bfa_reqq_ci(__bfa, __reqq))
+
+#define bfa_reqq_next(__bfa, __reqq)					\
+	(bfa_reqq_full(__bfa, __reqq) ? NULL :				\
+	 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
+		   + bfa_reqq_pi((__bfa), (__reqq)))))
+
+#define bfa_reqq_produce(__bfa, __reqq)	do {				\
+		(__bfa)->iocfc.req_cq_pi[__reqq]++;			\
+		(__bfa)->iocfc.req_cq_pi[__reqq] &=			\
+			((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1);      \
+		bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq],	\
+			      (__bfa)->iocfc.req_cq_pi[__reqq]);      \
+		mmiowb();      \
+	} while (0)
+
+#define bfa_rspq_pi(__bfa, __rspq)					\
+	(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
+
+#define bfa_rspq_ci(__bfa, __rspq)	((__bfa)->iocfc.rsp_cq_ci[__rspq])
+#define bfa_rspq_elem(__bfa, __rspq, __ci)				\
+	(&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
+
+#define CQ_INCR(__index, __size) do {			\
+	(__index)++;					\
+	(__index) &= ((__size) - 1);			\
+} while (0)
+
+/**
+ * Queue element to wait for room in request queue. FIFO order is
+ * maintained when fullfilling requests.
+ */
+struct bfa_reqq_wait_s {
+	struct list_head	qe;
+	void		(*qresume) (void *cbarg);
+	void		*cbarg;
+};
+
+/**
+ * Circular queue usage assignments
+ */
+enum {
+	BFA_REQQ_IOC	= 0,	/*  all low-priority IOC msgs	*/
+	BFA_REQQ_FCXP	= 0,	/*  all FCXP messages		*/
+	BFA_REQQ_LPS	= 0,	/*  all lport service msgs	*/
+	BFA_REQQ_PORT	= 0,	/*  all port messages		*/
+	BFA_REQQ_FLASH	= 0,	/*  for flash module		*/
+	BFA_REQQ_DIAG	= 0,	/*  for diag module		*/
+	BFA_REQQ_RPORT	= 0,	/*  all port messages		*/
+	BFA_REQQ_SBOOT	= 0,	/*  all san boot messages	*/
+	BFA_REQQ_QOS_LO	= 1,	/*  all low priority IO	*/
+	BFA_REQQ_QOS_MD	= 2,	/*  all medium priority IO	*/
+	BFA_REQQ_QOS_HI	= 3,	/*  all high priority IO	*/
+};
+
+static inline void
+bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
+	       void *cbarg)
+{
+	wqe->qresume = qresume;
+	wqe->cbarg = cbarg;
+}
+
+#define bfa_reqq(__bfa, __reqq)	(&(__bfa)->reqq_waitq[__reqq])
+
+/**
+ * static inline void
+ * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
+ */
+#define bfa_reqq_wait(__bfa, __reqq, __wqe) do {			\
+									\
+		struct list_head *waitq = bfa_reqq(__bfa, __reqq);      \
+									\
+		bfa_assert(((__reqq) < BFI_IOC_MAX_CQS));      \
+		bfa_assert((__wqe)->qresume && (__wqe)->cbarg);      \
+									\
+		list_add_tail(&(__wqe)->qe, waitq);      \
+	} while (0)
+
+#define bfa_reqq_wcancel(__wqe)	list_del(&(__wqe)->qe)
+
+
+/**
+ * Generic BFA callback element.
+ */
+struct bfa_cb_qe_s {
+	struct list_head         qe;
+	bfa_cb_cbfn_t  cbfn;
+	bfa_boolean_t   once;
+	u32		rsvd;
+	void           *cbarg;
+};
+
+#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do {	\
+		(__hcb_qe)->cbfn  = (__cbfn);      \
+		(__hcb_qe)->cbarg = (__cbarg);      \
+		list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);      \
+	} while (0)
+
+#define bfa_cb_dequeue(__hcb_qe)	list_del(&(__hcb_qe)->qe)
+
+#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do {	\
+		(__hcb_qe)->cbfn  = (__cbfn);      \
+		(__hcb_qe)->cbarg = (__cbarg);      \
+		if (!(__hcb_qe)->once) {      \
+			list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);      \
+			(__hcb_qe)->once = BFA_TRUE;			\
+		}							\
+	} while (0)
+
+#define bfa_cb_queue_done(__hcb_qe) do {	\
+		(__hcb_qe)->once = BFA_FALSE;	\
+	} while (0)
+
+
+/**
+ * PCI devices supported by the current BFA
+ */
+struct bfa_pciid_s {
+	u16	device_id;
+	u16	vendor_id;
+};
+
+extern char     bfa_version[];
+
+/**
+ * BFA memory resources
+ */
+enum bfa_mem_type {
+	BFA_MEM_TYPE_KVA = 1,	/*  Kernel Virtual Memory *(non-dma-able) */
+	BFA_MEM_TYPE_DMA = 2,	/*  DMA-able memory */
+	BFA_MEM_TYPE_MAX = BFA_MEM_TYPE_DMA,
+};
+
+struct bfa_mem_elem_s {
+	enum bfa_mem_type mem_type;	/* see enum bfa_mem_type */
+	u32	mem_len;	/*  Total Length in Bytes	*/
+	u8		*kva;		/*  kernel virtual address	*/
+	u64	dma;		/*  dma address if DMA memory	*/
+	u8		*kva_curp;	/*  kva allocation cursor	*/
+	u64	dma_curp;	/*  dma allocation cursor	*/
+};
+
+struct bfa_meminfo_s {
+	struct bfa_mem_elem_s meminfo[BFA_MEM_TYPE_MAX];
+};
+#define bfa_meminfo_kva(_m)				\
+	((_m)->meminfo[BFA_MEM_TYPE_KVA - 1].kva_curp)
+#define bfa_meminfo_dma_virt(_m)			\
+	((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].kva_curp)
+#define bfa_meminfo_dma_phys(_m)			\
+	((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
+
+struct bfa_iocfc_regs_s {
+	bfa_os_addr_t   intr_status;
+	bfa_os_addr_t   intr_mask;
+	bfa_os_addr_t   cpe_q_pi[BFI_IOC_MAX_CQS];
+	bfa_os_addr_t   cpe_q_ci[BFI_IOC_MAX_CQS];
+	bfa_os_addr_t   cpe_q_depth[BFI_IOC_MAX_CQS];
+	bfa_os_addr_t   cpe_q_ctrl[BFI_IOC_MAX_CQS];
+	bfa_os_addr_t   rme_q_ci[BFI_IOC_MAX_CQS];
+	bfa_os_addr_t   rme_q_pi[BFI_IOC_MAX_CQS];
+	bfa_os_addr_t   rme_q_depth[BFI_IOC_MAX_CQS];
+	bfa_os_addr_t   rme_q_ctrl[BFI_IOC_MAX_CQS];
+};
+
+/**
+ * MSIX vector handlers
+ */
+#define BFA_MSIX_MAX_VECTORS	22
+typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
+struct bfa_msix_s {
+	int	nvecs;
+	bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
+};
+
+/**
+ * Chip specific interfaces
+ */
+struct bfa_hwif_s {
+	void (*hw_reginit)(struct bfa_s *bfa);
+	void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
+	void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
+	void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
+	void (*hw_msix_install)(struct bfa_s *bfa);
+	void (*hw_msix_uninstall)(struct bfa_s *bfa);
+	void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
+	void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
+				u32 *nvecs, u32 *maxvec);
+	void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
+				       u32 *end);
+};
+typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
+
+struct bfa_iocfc_s {
+	struct bfa_s		*bfa;
+	struct bfa_iocfc_cfg_s	cfg;
+	int			action;
+	u32		req_cq_pi[BFI_IOC_MAX_CQS];
+	u32		rsp_cq_ci[BFI_IOC_MAX_CQS];
+	struct bfa_cb_qe_s	init_hcb_qe;
+	struct bfa_cb_qe_s	stop_hcb_qe;
+	struct bfa_cb_qe_s	dis_hcb_qe;
+	struct bfa_cb_qe_s	stats_hcb_qe;
+	bfa_boolean_t		cfgdone;
+
+	struct bfa_dma_s	cfg_info;
+	struct bfi_iocfc_cfg_s *cfginfo;
+	struct bfa_dma_s	cfgrsp_dma;
+	struct bfi_iocfc_cfgrsp_s *cfgrsp;
+	struct bfi_iocfc_cfg_reply_s *cfg_reply;
+	struct bfa_dma_s	req_cq_ba[BFI_IOC_MAX_CQS];
+	struct bfa_dma_s	req_cq_shadow_ci[BFI_IOC_MAX_CQS];
+	struct bfa_dma_s	rsp_cq_ba[BFI_IOC_MAX_CQS];
+	struct bfa_dma_s	rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
+	struct bfa_iocfc_regs_s	bfa_regs;	/*  BFA device registers */
+	struct bfa_hwif_s	hwif;
+	bfa_cb_iocfc_t		updateq_cbfn; /*  bios callback function */
+	void			*updateq_cbarg;	/*  bios callback arg */
+	u32	intr_mask;
+};
+
+#define bfa_lpuid(__bfa)						\
+	bfa_ioc_portid(&(__bfa)->ioc)
+#define bfa_msix_init(__bfa, __nvecs)					\
+	((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
+#define bfa_msix_install(__bfa)						\
+	((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
+#define bfa_msix_uninstall(__bfa)					\
+	((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
+#define bfa_isr_mode_set(__bfa, __msix)					\
+	((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
+#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)		\
+	((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap,		\
+					__nvecs, __maxvec))
+#define bfa_msix_get_rme_range(__bfa, __start, __end)			\
+	((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
+#define bfa_msix(__bfa, __vec)						\
+	((__bfa)->msix.handler[__vec](__bfa, __vec))
+
+/*
+ * FC specific IOC functions.
+ */
+void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+		       u32 *dm_len);
+void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
+		      struct bfa_iocfc_cfg_s *cfg,
+		      struct bfa_meminfo_s *meminfo,
+		      struct bfa_pcidev_s *pcidev);
+void bfa_iocfc_detach(struct bfa_s *bfa);
+void bfa_iocfc_init(struct bfa_s *bfa);
+void bfa_iocfc_start(struct bfa_s *bfa);
+void bfa_iocfc_stop(struct bfa_s *bfa);
+void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
+void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
+bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
+void bfa_iocfc_reset_queues(struct bfa_s *bfa);
+
+void bfa_msix_all(struct bfa_s *bfa, int vec);
+void bfa_msix_reqq(struct bfa_s *bfa, int vec);
+void bfa_msix_rspq(struct bfa_s *bfa, int vec);
+void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
+
+void bfa_hwcb_reginit(struct bfa_s *bfa);
+void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwcb_msix_install(struct bfa_s *bfa);
+void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
+			   u32 *maxvec);
+void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
+				 u32 *end);
+void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
+void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
+void bfa_hwct_msix_install(struct bfa_s *bfa);
+void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
+void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
+void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap, u32 *nvecs,
+			   u32 *maxvec);
+void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start,
+				 u32 *end);
+void bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi);
+void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
+wwn_t bfa_iocfc_get_pwwn(struct bfa_s *bfa);
+wwn_t bfa_iocfc_get_nwwn(struct bfa_s *bfa);
+void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
+				struct bfa_boot_pbc_s *pbcfg);
+int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
+				struct bfi_pbc_vport_s *pbc_vport);
+
+
+/**
+ *----------------------------------------------------------------------
+ *		BFA public interfaces
+ *----------------------------------------------------------------------
+ */
+#define bfa_stats(_mod, _stats)	((_mod)->stats._stats++)
+#define bfa_ioc_get_stats(__bfa, __ioc_stats)		\
+	bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
+#define bfa_ioc_clear_stats(__bfa)		\
+	bfa_ioc_clr_stats(&(__bfa)->ioc)
+#define bfa_get_nports(__bfa)			\
+	bfa_ioc_get_nports(&(__bfa)->ioc)
+#define bfa_get_adapter_manufacturer(__bfa, __manufacturer)		\
+	bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
+#define bfa_get_adapter_model(__bfa, __model)			\
+	bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
+#define bfa_get_adapter_serial_num(__bfa, __serial_num)			\
+	bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
+#define bfa_get_adapter_fw_ver(__bfa, __fw_ver)			\
+	bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
+#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver)			\
+	bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
+#define bfa_get_pci_chip_rev(__bfa, __chip_rev)			\
+	bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
+#define bfa_get_ioc_state(__bfa)		\
+	bfa_ioc_get_state(&(__bfa)->ioc)
+#define bfa_get_type(__bfa)			\
+	bfa_ioc_get_type(&(__bfa)->ioc)
+#define bfa_get_mac(__bfa)			\
+	bfa_ioc_get_mac(&(__bfa)->ioc)
+#define bfa_get_mfg_mac(__bfa)			\
+	bfa_ioc_get_mfg_mac(&(__bfa)->ioc)
+#define bfa_get_fw_clock_res(__bfa)		\
+	((__bfa)->iocfc.cfgrsp->fwcfg.fw_tick_res)
+
+void bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids);
+void bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg);
+void bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg);
+void bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg,
+			 struct bfa_meminfo_s *meminfo);
+void bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_meminfo_s *meminfo,
+		struct bfa_pcidev_s *pcidev);
+void bfa_init_trc(struct bfa_s *bfa, struct bfa_trc_mod_s *trcmod);
+void bfa_init_plog(struct bfa_s *bfa, struct bfa_plog_s *plog);
+void bfa_detach(struct bfa_s *bfa);
+void bfa_init(struct bfa_s *bfa);
+void bfa_start(struct bfa_s *bfa);
+void bfa_stop(struct bfa_s *bfa);
+void bfa_attach_fcs(struct bfa_s *bfa);
+void bfa_cb_init(void *bfad, bfa_status_t status);
+void bfa_cb_updateq(void *bfad, bfa_status_t status);
+
+bfa_boolean_t bfa_intx(struct bfa_s *bfa);
+void bfa_intx_disable(struct bfa_s *bfa);
+void bfa_intx_enable(struct bfa_s *bfa);
+void bfa_isr_enable(struct bfa_s *bfa);
+void bfa_isr_disable(struct bfa_s *bfa);
+
+void bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q);
+void bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q);
+void bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q);
+
+typedef void (*bfa_cb_ioc_t) (void *cbarg, enum bfa_status status);
+void bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr);
+void bfa_get_attr(struct bfa_s *bfa, struct bfa_ioc_attr_s *ioc_attr);
+
+void bfa_adapter_get_attr(struct bfa_s *bfa,
+			  struct bfa_adapter_attr_s *ad_attr);
+u64 bfa_adapter_get_id(struct bfa_s *bfa);
+
+bfa_status_t bfa_iocfc_israttr_set(struct bfa_s *bfa,
+				   struct bfa_iocfc_intr_attr_s *attr);
+
+void bfa_iocfc_enable(struct bfa_s *bfa);
+void bfa_iocfc_disable(struct bfa_s *bfa);
+void bfa_chip_reset(struct bfa_s *bfa);
+void bfa_timer_tick(struct bfa_s *bfa);
+#define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)		\
+	bfa_timer_begin(&(_bfa)->timer_mod, _timer, _timercb, _arg, _timeout)
+
+/*
+ * BFA debug API functions
+ */
+bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
+bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
+bfa_status_t bfa_debug_fwcore(struct bfa_s *bfa, void *buf,
+			      u32 *offset, int *buflen);
+void bfa_debug_fwsave_clear(struct bfa_s *bfa);
+bfa_status_t bfa_fw_stats_get(struct bfa_s *bfa, void *data);
+bfa_status_t bfa_fw_stats_clear(struct bfa_s *bfa);
+
+#endif /* __BFA_H__ */

+ 0 - 57
drivers/scsi/bfa/bfa_callback_priv.h

@@ -1,57 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_CALLBACK_PRIV_H__
-#define __BFA_CALLBACK_PRIV_H__
-
-#include <cs/bfa_q.h>
-
-typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
-
-/**
- * Generic BFA callback element.
- */
-struct bfa_cb_qe_s {
-	struct list_head         qe;
-	bfa_cb_cbfn_t  cbfn;
-	bfa_boolean_t   once;
-	u32		rsvd;
-	void           *cbarg;
-};
-
-#define bfa_cb_queue(__bfa, __hcb_qe, __cbfn, __cbarg) do {		\
-	(__hcb_qe)->cbfn  = (__cbfn);      \
-	(__hcb_qe)->cbarg = (__cbarg);      \
-	list_add_tail(&(__hcb_qe)->qe, &(__bfa)->comp_q);      \
-} while (0)
-
-#define bfa_cb_dequeue(__hcb_qe)	list_del(&(__hcb_qe)->qe)
-
-#define bfa_cb_queue_once(__bfa, __hcb_qe, __cbfn, __cbarg) do {	\
-	(__hcb_qe)->cbfn  = (__cbfn);      \
-	(__hcb_qe)->cbarg = (__cbarg);      \
-	if (!(__hcb_qe)->once) {      \
-		list_add_tail((__hcb_qe), &(__bfa)->comp_q);      \
-		(__hcb_qe)->once = BFA_TRUE;				\
-	}								\
-} while (0)
-
-#define bfa_cb_queue_done(__hcb_qe) do {				\
-	(__hcb_qe)->once = BFA_FALSE;					\
-} while (0)
-
-#endif /* __BFA_CALLBACK_PRIV_H__ */

+ 9 - 21
drivers/scsi/bfa/bfa_cb_ioim_macros.h → drivers/scsi/bfa/bfa_cb_ioim.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,37 +15,25 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-/**
- *  bfa_cb_ioim_macros.h BFA IOIM driver interface macros.
- */
-
-#ifndef __BFA_HCB_IOIM_MACROS_H__
-#define __BFA_HCB_IOIM_MACROS_H__
-
-#include <bfa_os_inc.h>
-/*
- * #include <linux/dma-mapping.h>
- *
- * #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include
- * <scsi/scsi_device.h> #include <scsi/scsi_host.h>
- */
-#include "bfad_im_compat.h"
+#ifndef __BFA_HCB_IOIM_H__
+#define __BFA_HCB_IOIM_H__
 
 
+#include "bfa_os_inc.h"
 /*
 /*
  * task attribute values in FCP-2 FCP_CMND IU
  * task attribute values in FCP-2 FCP_CMND IU
  */
  */
 #define SIMPLE_Q    0
 #define SIMPLE_Q    0
 #define HEAD_OF_Q   1
 #define HEAD_OF_Q   1
 #define ORDERED_Q   2
 #define ORDERED_Q   2
-#define ACA_Q       4
+#define ACA_Q	    4
 #define UNTAGGED    5
 #define UNTAGGED    5
 
 
 static inline lun_t
 static inline lun_t
 bfad_int_to_lun(u32 luno)
 bfad_int_to_lun(u32 luno)
 {
 {
 	union {
 	union {
-		u16        scsi_lun[4];
-		lun_t           bfa_lun;
+		u16	scsi_lun[4];
+		lun_t		bfa_lun;
 	} lun;
 	} lun;
 
 
 	lun.bfa_lun     = 0;
 	lun.bfa_lun     = 0;
@@ -141,7 +129,7 @@ static inline u8
 bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
 bfa_cb_ioim_get_taskattr(struct bfad_ioim_s *dio)
 {
 {
 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *)dio;
-	u8         task_attr = UNTAGGED;
+	u8	task_attr = UNTAGGED;
 
 
 	if (cmnd->device->tagged_supported) {
 	if (cmnd->device->tagged_supported) {
 		switch (cmnd->tag) {
 		switch (cmnd->tag) {
@@ -178,4 +166,4 @@ bfa_cb_ioim_get_cdblen(struct bfad_ioim_s *dio)
  */
  */
 #define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
 #define bfa_cb_ioim_get_reqq(__dio) BFA_FALSE
 
 
-#endif /* __BFA_HCB_IOIM_MACROS_H__ */
+#endif /* __BFA_HCB_IOIM_H__ */

+ 0 - 492
drivers/scsi/bfa/bfa_cee.c

@@ -1,492 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <defs/bfa_defs_cee.h>
-#include <cs/bfa_trc.h>
-#include <cs/bfa_log.h>
-#include <cs/bfa_debug.h>
-#include <cee/bfa_cee.h>
-#include <bfi/bfi_cee.h>
-#include <bfi/bfi.h>
-#include <bfa_ioc.h>
-#include <cna/bfa_cna_trcmod.h>
-
-BFA_TRC_FILE(CNA, CEE);
-
-#define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
-#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
-
-static void     bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg);
-static void     bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s
-					   *dcbcx_stats);
-static void     bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s
-					  *lldp_stats);
-static void     bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats);
-static void     bfa_cee_format_cee_cfg(void *buffer);
-static void     bfa_cee_format_cee_stats(void *buffer);
-
-static void
-bfa_cee_format_cee_stats(void *buffer)
-{
-	struct bfa_cee_stats_s *cee_stats = buffer;
-	bfa_cee_format_dcbcx_stats(&cee_stats->dcbx_stats);
-	bfa_cee_format_lldp_stats(&cee_stats->lldp_stats);
-	bfa_cee_format_cfg_stats(&cee_stats->cfg_stats);
-}
-
-static void
-bfa_cee_format_cee_cfg(void *buffer)
-{
-	struct bfa_cee_attr_s *cee_cfg = buffer;
-	bfa_cee_format_lldp_cfg(&cee_cfg->lldp_remote);
-}
-
-static void
-bfa_cee_format_dcbcx_stats(struct bfa_cee_dcbx_stats_s *dcbcx_stats)
-{
-	dcbcx_stats->subtlvs_unrecognized =
-		bfa_os_ntohl(dcbcx_stats->subtlvs_unrecognized);
-	dcbcx_stats->negotiation_failed =
-		bfa_os_ntohl(dcbcx_stats->negotiation_failed);
-	dcbcx_stats->remote_cfg_changed =
-		bfa_os_ntohl(dcbcx_stats->remote_cfg_changed);
-	dcbcx_stats->tlvs_received = bfa_os_ntohl(dcbcx_stats->tlvs_received);
-	dcbcx_stats->tlvs_invalid = bfa_os_ntohl(dcbcx_stats->tlvs_invalid);
-	dcbcx_stats->seqno = bfa_os_ntohl(dcbcx_stats->seqno);
-	dcbcx_stats->ackno = bfa_os_ntohl(dcbcx_stats->ackno);
-	dcbcx_stats->recvd_seqno = bfa_os_ntohl(dcbcx_stats->recvd_seqno);
-	dcbcx_stats->recvd_ackno = bfa_os_ntohl(dcbcx_stats->recvd_ackno);
-}
-
-static void
-bfa_cee_format_lldp_stats(struct bfa_cee_lldp_stats_s *lldp_stats)
-{
-	lldp_stats->frames_transmitted =
-		bfa_os_ntohl(lldp_stats->frames_transmitted);
-	lldp_stats->frames_aged_out = bfa_os_ntohl(lldp_stats->frames_aged_out);
-	lldp_stats->frames_discarded =
-		bfa_os_ntohl(lldp_stats->frames_discarded);
-	lldp_stats->frames_in_error = bfa_os_ntohl(lldp_stats->frames_in_error);
-	lldp_stats->frames_rcvd = bfa_os_ntohl(lldp_stats->frames_rcvd);
-	lldp_stats->tlvs_discarded = bfa_os_ntohl(lldp_stats->tlvs_discarded);
-	lldp_stats->tlvs_unrecognized =
-		bfa_os_ntohl(lldp_stats->tlvs_unrecognized);
-}
-
-static void
-bfa_cee_format_cfg_stats(struct bfa_cee_cfg_stats_s *cfg_stats)
-{
-	cfg_stats->cee_status_down = bfa_os_ntohl(cfg_stats->cee_status_down);
-	cfg_stats->cee_status_up = bfa_os_ntohl(cfg_stats->cee_status_up);
-	cfg_stats->cee_hw_cfg_changed =
-		bfa_os_ntohl(cfg_stats->cee_hw_cfg_changed);
-	cfg_stats->recvd_invalid_cfg =
-		bfa_os_ntohl(cfg_stats->recvd_invalid_cfg);
-}
-
-static void
-bfa_cee_format_lldp_cfg(struct bfa_cee_lldp_cfg_s *lldp_cfg)
-{
-	lldp_cfg->time_to_interval = bfa_os_ntohs(lldp_cfg->time_to_interval);
-	lldp_cfg->enabled_system_cap =
-		bfa_os_ntohs(lldp_cfg->enabled_system_cap);
-}
-
-/**
- * bfa_cee_attr_meminfo()
- *
- *
- * @param[in] void
- *
- * @return Size of DMA region
- */
-static          u32
-bfa_cee_attr_meminfo(void)
-{
-	return BFA_ROUNDUP(sizeof(struct bfa_cee_attr_s), BFA_DMA_ALIGN_SZ);
-}
-
-/**
- * bfa_cee_stats_meminfo()
- *
- *
- * @param[in] void
- *
- * @return Size of DMA region
- */
-static          u32
-bfa_cee_stats_meminfo(void)
-{
-	return BFA_ROUNDUP(sizeof(struct bfa_cee_stats_s), BFA_DMA_ALIGN_SZ);
-}
-
-/**
- * bfa_cee_get_attr_isr()
- *
- *
- * @param[in] cee - Pointer to the CEE module
- *            status - Return status from the f/w
- *
- * @return void
- */
-static void
-bfa_cee_get_attr_isr(struct bfa_cee_s *cee, bfa_status_t status)
-{
-	cee->get_attr_status = status;
-	bfa_trc(cee, 0);
-	if (status == BFA_STATUS_OK) {
-		bfa_trc(cee, 0);
-		/*
-		 * The requested data has been copied to the DMA area, *process
-		 * it.
-		 */
-		memcpy(cee->attr, cee->attr_dma.kva,
-		       sizeof(struct bfa_cee_attr_s));
-		bfa_cee_format_cee_cfg(cee->attr);
-	}
-	cee->get_attr_pending = BFA_FALSE;
-	if (cee->cbfn.get_attr_cbfn) {
-		bfa_trc(cee, 0);
-		cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg, status);
-	}
-	bfa_trc(cee, 0);
-}
-
-/**
- * bfa_cee_get_attr_isr()
- *
- *
- * @param[in] cee - Pointer to the CEE module
- *            status - Return status from the f/w
- *
- * @return void
- */
-static void
-bfa_cee_get_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
-{
-	cee->get_stats_status = status;
-	bfa_trc(cee, 0);
-	if (status == BFA_STATUS_OK) {
-		bfa_trc(cee, 0);
-		/*
-		 * The requested data has been copied to the DMA area, process
-		 * it.
-		 */
-		memcpy(cee->stats, cee->stats_dma.kva,
-		       sizeof(struct bfa_cee_stats_s));
-		bfa_cee_format_cee_stats(cee->stats);
-	}
-	cee->get_stats_pending = BFA_FALSE;
-	bfa_trc(cee, 0);
-	if (cee->cbfn.get_stats_cbfn) {
-		bfa_trc(cee, 0);
-		cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg, status);
-	}
-	bfa_trc(cee, 0);
-}
-
-/**
- * bfa_cee_get_attr_isr()
- *
- *
- * @param[in] cee - Pointer to the CEE module
- *            status - Return status from the f/w
- *
- * @return void
- */
-static void
-bfa_cee_reset_stats_isr(struct bfa_cee_s *cee, bfa_status_t status)
-{
-	cee->reset_stats_status = status;
-	cee->reset_stats_pending = BFA_FALSE;
-	if (cee->cbfn.reset_stats_cbfn)
-		cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg, status);
-}
-
-/**
- * bfa_cee_meminfo()
- *
- *
- * @param[in] void
- *
- * @return Size of DMA region
- */
-u32
-bfa_cee_meminfo(void)
-{
-	return bfa_cee_attr_meminfo() + bfa_cee_stats_meminfo();
-}
-
-/**
- * bfa_cee_mem_claim()
- *
- *
- * @param[in] cee CEE module pointer
- * 	      dma_kva Kernel Virtual Address of CEE DMA Memory
- * 	      dma_pa  Physical Address of CEE DMA Memory
- *
- * @return void
- */
-void
-bfa_cee_mem_claim(struct bfa_cee_s *cee, u8 *dma_kva, u64 dma_pa)
-{
-	cee->attr_dma.kva = dma_kva;
-	cee->attr_dma.pa = dma_pa;
-	cee->stats_dma.kva = dma_kva + bfa_cee_attr_meminfo();
-	cee->stats_dma.pa = dma_pa + bfa_cee_attr_meminfo();
-	cee->attr = (struct bfa_cee_attr_s *)dma_kva;
-	cee->stats =
-		(struct bfa_cee_stats_s *)(dma_kva + bfa_cee_attr_meminfo());
-}
-
-/**
- * bfa_cee_get_attr()
- *
- *   Send the request to the f/w to fetch CEE attributes.
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return Status
- */
-
-bfa_status_t
-bfa_cee_get_attr(struct bfa_cee_s *cee, struct bfa_cee_attr_s *attr,
-		 bfa_cee_get_attr_cbfn_t cbfn, void *cbarg)
-{
-	struct bfi_cee_get_req_s *cmd;
-
-	bfa_assert((cee != NULL) && (cee->ioc != NULL));
-	bfa_trc(cee, 0);
-	if (!bfa_ioc_is_operational(cee->ioc)) {
-		bfa_trc(cee, 0);
-		return BFA_STATUS_IOC_FAILURE;
-	}
-	if (cee->get_attr_pending == BFA_TRUE) {
-		bfa_trc(cee, 0);
-		return BFA_STATUS_DEVBUSY;
-	}
-	cee->get_attr_pending = BFA_TRUE;
-	cmd = (struct bfi_cee_get_req_s *)cee->get_cfg_mb.msg;
-	cee->attr = attr;
-	cee->cbfn.get_attr_cbfn = cbfn;
-	cee->cbfn.get_attr_cbarg = cbarg;
-	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_CFG_REQ,
-		    bfa_ioc_portid(cee->ioc));
-	bfa_dma_be_addr_set(cmd->dma_addr, cee->attr_dma.pa);
-	bfa_ioc_mbox_queue(cee->ioc, &cee->get_cfg_mb);
-	bfa_trc(cee, 0);
-
-	return BFA_STATUS_OK;
-}
-
-/**
- * bfa_cee_get_stats()
- *
- *   Send the request to the f/w to fetch CEE statistics.
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return Status
- */
-
-bfa_status_t
-bfa_cee_get_stats(struct bfa_cee_s *cee, struct bfa_cee_stats_s *stats,
-		  bfa_cee_get_stats_cbfn_t cbfn, void *cbarg)
-{
-	struct bfi_cee_get_req_s *cmd;
-
-	bfa_assert((cee != NULL) && (cee->ioc != NULL));
-
-	if (!bfa_ioc_is_operational(cee->ioc)) {
-		bfa_trc(cee, 0);
-		return BFA_STATUS_IOC_FAILURE;
-	}
-	if (cee->get_stats_pending == BFA_TRUE) {
-		bfa_trc(cee, 0);
-		return BFA_STATUS_DEVBUSY;
-	}
-	cee->get_stats_pending = BFA_TRUE;
-	cmd = (struct bfi_cee_get_req_s *)cee->get_stats_mb.msg;
-	cee->stats = stats;
-	cee->cbfn.get_stats_cbfn = cbfn;
-	cee->cbfn.get_stats_cbarg = cbarg;
-	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_GET_STATS_REQ,
-		    bfa_ioc_portid(cee->ioc));
-	bfa_dma_be_addr_set(cmd->dma_addr, cee->stats_dma.pa);
-	bfa_ioc_mbox_queue(cee->ioc, &cee->get_stats_mb);
-	bfa_trc(cee, 0);
-
-	return BFA_STATUS_OK;
-}
-
-/**
- * bfa_cee_reset_stats()
- *
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return Status
- */
-
-bfa_status_t
-bfa_cee_reset_stats(struct bfa_cee_s *cee, bfa_cee_reset_stats_cbfn_t cbfn,
-		    void *cbarg)
-{
-	struct bfi_cee_reset_stats_s *cmd;
-
-	bfa_assert((cee != NULL) && (cee->ioc != NULL));
-	if (!bfa_ioc_is_operational(cee->ioc)) {
-		bfa_trc(cee, 0);
-		return BFA_STATUS_IOC_FAILURE;
-	}
-	if (cee->reset_stats_pending == BFA_TRUE) {
-		bfa_trc(cee, 0);
-		return BFA_STATUS_DEVBUSY;
-	}
-	cee->reset_stats_pending = BFA_TRUE;
-	cmd = (struct bfi_cee_reset_stats_s *)cee->reset_stats_mb.msg;
-	cee->cbfn.reset_stats_cbfn = cbfn;
-	cee->cbfn.reset_stats_cbarg = cbarg;
-	bfi_h2i_set(cmd->mh, BFI_MC_CEE, BFI_CEE_H2I_RESET_STATS,
-		    bfa_ioc_portid(cee->ioc));
-	bfa_ioc_mbox_queue(cee->ioc, &cee->reset_stats_mb);
-	bfa_trc(cee, 0);
-	return BFA_STATUS_OK;
-}
-
-/**
- * bfa_cee_isrs()
- *
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return void
- */
-
-void
-bfa_cee_isr(void *cbarg, struct bfi_mbmsg_s *m)
-{
-	union bfi_cee_i2h_msg_u *msg;
-	struct bfi_cee_get_rsp_s *get_rsp;
-	struct bfa_cee_s *cee = (struct bfa_cee_s *)cbarg;
-	msg = (union bfi_cee_i2h_msg_u *)m;
-	get_rsp = (struct bfi_cee_get_rsp_s *)m;
-	bfa_trc(cee, msg->mh.msg_id);
-	switch (msg->mh.msg_id) {
-	case BFI_CEE_I2H_GET_CFG_RSP:
-		bfa_trc(cee, get_rsp->cmd_status);
-		bfa_cee_get_attr_isr(cee, get_rsp->cmd_status);
-		break;
-	case BFI_CEE_I2H_GET_STATS_RSP:
-		bfa_cee_get_stats_isr(cee, get_rsp->cmd_status);
-		break;
-	case BFI_CEE_I2H_RESET_STATS_RSP:
-		bfa_cee_reset_stats_isr(cee, get_rsp->cmd_status);
-		break;
-	default:
-		bfa_assert(0);
-	}
-}
-
-/**
- * bfa_cee_hbfail()
- *
- *
- * @param[in] Pointer to the CEE module data structure.
- *
- * @return void
- */
-
-void
-bfa_cee_hbfail(void *arg)
-{
-	struct bfa_cee_s *cee;
-	cee = (struct bfa_cee_s *)arg;
-
-	if (cee->get_attr_pending == BFA_TRUE) {
-		cee->get_attr_status = BFA_STATUS_FAILED;
-		cee->get_attr_pending = BFA_FALSE;
-		if (cee->cbfn.get_attr_cbfn) {
-			cee->cbfn.get_attr_cbfn(cee->cbfn.get_attr_cbarg,
-						BFA_STATUS_FAILED);
-		}
-	}
-	if (cee->get_stats_pending == BFA_TRUE) {
-		cee->get_stats_status = BFA_STATUS_FAILED;
-		cee->get_stats_pending = BFA_FALSE;
-		if (cee->cbfn.get_stats_cbfn) {
-			cee->cbfn.get_stats_cbfn(cee->cbfn.get_stats_cbarg,
-						 BFA_STATUS_FAILED);
-		}
-	}
-	if (cee->reset_stats_pending == BFA_TRUE) {
-		cee->reset_stats_status = BFA_STATUS_FAILED;
-		cee->reset_stats_pending = BFA_FALSE;
-		if (cee->cbfn.reset_stats_cbfn) {
-			cee->cbfn.reset_stats_cbfn(cee->cbfn.reset_stats_cbarg,
-						   BFA_STATUS_FAILED);
-		}
-	}
-}
-
-/**
- * bfa_cee_attach()
- *
- *
- * @param[in] cee - Pointer to the CEE module data structure
- *            ioc - Pointer to the ioc module data structure
- *            dev - Pointer to the device driver module data structure
- *                  The device driver specific mbox ISR functions have
- *                  this pointer as one of the parameters.
- *            trcmod -
- *            logmod -
- *
- * @return void
- */
-void
-bfa_cee_attach(struct bfa_cee_s *cee, struct bfa_ioc_s *ioc, void *dev,
-	       struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
-{
-	bfa_assert(cee != NULL);
-	cee->dev = dev;
-	cee->trcmod = trcmod;
-	cee->logmod = logmod;
-	cee->ioc = ioc;
-
-	bfa_ioc_mbox_regisr(cee->ioc, BFI_MC_CEE, bfa_cee_isr, cee);
-	bfa_ioc_hbfail_init(&cee->hbfail, bfa_cee_hbfail, cee);
-	bfa_ioc_hbfail_register(cee->ioc, &cee->hbfail);
-	bfa_trc(cee, 0);
-}
-
-/**
- * bfa_cee_detach()
- *
- *
- * @param[in] cee - Pointer to the CEE module data structure
- *
- * @return void
- */
-void
-bfa_cee_detach(struct bfa_cee_s *cee)
-{
-	/*
-	 * For now, just check if there is some ioctl pending and mark that as
-	 * failed?
-	 */
-	/* bfa_cee_hbfail(cee); */
-}

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 985 - 20
drivers/scsi/bfa/bfa_core.c


+ 364 - 0
drivers/scsi/bfa/bfa_cs.h

@@ -0,0 +1,364 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+/**
+ *  bfa_cs.h BFA common services
+ */
+
+#ifndef __BFA_CS_H__
+#define __BFA_CS_H__
+
+#include "bfa_os_inc.h"
+
+/**
+ * BFA TRC
+ */
+
+#ifndef BFA_TRC_MAX
+#define BFA_TRC_MAX	(4 * 1024)
+#endif
+
+#ifndef BFA_TRC_TS
+#define BFA_TRC_TS(_trcm)	((_trcm)->ticks++)
+#endif
+
+struct bfa_trc_s {
+#ifdef __BIGENDIAN
+	u16	fileno;
+	u16	line;
+#else
+	u16	line;
+	u16	fileno;
+#endif
+	u32	timestamp;
+	union {
+		struct {
+			u32	rsvd;
+			u32	u32;
+		} u32;
+		u64	u64;
+	} data;
+};
+
+struct bfa_trc_mod_s {
+	u32	head;
+	u32	tail;
+	u32	ntrc;
+	u32	stopped;
+	u32	ticks;
+	u32	rsvd[3];
+	struct bfa_trc_s trc[BFA_TRC_MAX];
+};
+
+enum {
+	BFA_TRC_HAL  = 1,	/*  BFA modules */
+	BFA_TRC_FCS  = 2,	/*  BFA FCS modules */
+	BFA_TRC_LDRV = 3,	/*  Linux driver modules */
+	BFA_TRC_CNA  = 4,	/*  Common modules */
+};
+#define BFA_TRC_MOD_SH	10
+#define BFA_TRC_MOD(__mod)	((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
+
+/**
+ * Define a new tracing file (module). Module should match one defined above.
+ */
+#define BFA_TRC_FILE(__mod, __submod)					\
+	static int __trc_fileno = ((BFA_TRC_ ## __mod ## _ ## __submod) | \
+						 BFA_TRC_MOD(__mod))
+
+
+#define bfa_trc32(_trcp, _data)	\
+	__bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u32)_data)
+#define bfa_trc(_trcp, _data)	\
+	__bfa_trc((_trcp)->trcmod, __trc_fileno, __LINE__, (u64)_data)
+
+static inline void
+bfa_trc_init(struct bfa_trc_mod_s *trcm)
+{
+	trcm->head = trcm->tail = trcm->stopped = 0;
+	trcm->ntrc = BFA_TRC_MAX;
+}
+
+static inline void
+bfa_trc_stop(struct bfa_trc_mod_s *trcm)
+{
+	trcm->stopped = 1;
+}
+
+#ifdef FWTRC
+extern void dc_flush(void *data);
+#else
+#define dc_flush(data)
+#endif
+
+
+static inline void
+__bfa_trc(struct bfa_trc_mod_s *trcm, int fileno, int line, u64 data)
+{
+	int		tail = trcm->tail;
+	struct bfa_trc_s	*trc = &trcm->trc[tail];
+
+	if (trcm->stopped)
+		return;
+
+	trc->fileno = (u16) fileno;
+	trc->line = (u16) line;
+	trc->data.u64 = data;
+	trc->timestamp = BFA_TRC_TS(trcm);
+	dc_flush(trc);
+
+	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
+	if (trcm->tail == trcm->head)
+		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
+	dc_flush(trcm);
+}
+
+
+static inline void
+__bfa_trc32(struct bfa_trc_mod_s *trcm, int fileno, int line, u32 data)
+{
+	int		tail = trcm->tail;
+	struct bfa_trc_s *trc = &trcm->trc[tail];
+
+	if (trcm->stopped)
+		return;
+
+	trc->fileno = (u16) fileno;
+	trc->line = (u16) line;
+	trc->data.u32.u32 = data;
+	trc->timestamp = BFA_TRC_TS(trcm);
+	dc_flush(trc);
+
+	trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1);
+	if (trcm->tail == trcm->head)
+		trcm->head = (trcm->head + 1) & (BFA_TRC_MAX - 1);
+	dc_flush(trcm);
+}
+
+#ifndef BFA_PERF_BUILD
+#define bfa_trc_fp(_trcp, _data)	bfa_trc(_trcp, _data)
+#else
+#define bfa_trc_fp(_trcp, _data)
+#endif
+
+/**
+ * @ BFA LOG interfaces
+ */
+#define bfa_assert(__cond)	do {					\
+	if (!(__cond)) {						\
+		printk(KERN_ERR "assert(%s) failed at %s:%d\\n",         \
+		#__cond, __FILE__, __LINE__);				\
+	}								\
+} while (0)
+
+#define bfa_sm_fault(__mod, __event)	do {				\
+	bfa_trc(__mod, (((u32)0xDEAD << 16) | __event));		\
+	printk(KERN_ERR	"Assertion failure: %s:%d: %d",			\
+		__FILE__, __LINE__, (__event));				\
+} while (0)
+
+#ifndef BFA_PERF_BUILD
+#define bfa_assert_fp(__cond)	bfa_assert(__cond)
+#else
+#define bfa_assert_fp(__cond)
+#endif
+
+/* BFA queue definitions */
+#define bfa_q_first(_q) ((void *)(((struct list_head *) (_q))->next))
+#define bfa_q_next(_qe) (((struct list_head *) (_qe))->next)
+#define bfa_q_prev(_qe) (((struct list_head *) (_qe))->prev)
+
+/*
+ * bfa_q_qe_init - to initialize a queue element
+ */
+#define bfa_q_qe_init(_qe) {				\
+	bfa_q_next(_qe) = (struct list_head *) NULL;	\
+	bfa_q_prev(_qe) = (struct list_head *) NULL;	\
+}
+
+/*
+ * bfa_q_deq - dequeue an element from head of the queue
+ */
+#define bfa_q_deq(_q, _qe) {						\
+	if (!list_empty(_q)) {						\
+		(*((struct list_head **) (_qe))) = bfa_q_next(_q);	\
+		bfa_q_prev(bfa_q_next(*((struct list_head **) _qe))) =	\
+				(struct list_head *) (_q);		\
+		bfa_q_next(_q) = bfa_q_next(*((struct list_head **) _qe));\
+		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
+	} else {							\
+		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
+	}								\
+}
+
+/*
+ * bfa_q_deq_tail - dequeue an element from tail of the queue
+ */
+#define bfa_q_deq_tail(_q, _qe) {					\
+	if (!list_empty(_q)) {						\
+		*((struct list_head **) (_qe)) = bfa_q_prev(_q);	\
+		bfa_q_next(bfa_q_prev(*((struct list_head **) _qe))) =	\
+			(struct list_head *) (_q);			\
+		bfa_q_prev(_q) = bfa_q_prev(*(struct list_head **) _qe);\
+		BFA_Q_DBG_INIT(*((struct list_head **) _qe));		\
+	} else {							\
+		*((struct list_head **) (_qe)) = (struct list_head *) NULL;\
+	}								\
+}
+
+static inline int
+bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
+{
+	struct list_head        *tqe;
+
+	tqe = bfa_q_next(q);
+	while (tqe != q) {
+		if (tqe == qe)
+			return 1;
+		tqe = bfa_q_next(tqe);
+		if (tqe == NULL)
+			break;
+	}
+	return 0;
+}
+
+/*
+ * #ifdef BFA_DEBUG (Using bfa_assert to check for debug_build is not
+ * consistent across modules)
+ */
+#ifndef BFA_PERF_BUILD
+#define BFA_Q_DBG_INIT(_qe) bfa_q_qe_init(_qe)
+#else
+#define BFA_Q_DBG_INIT(_qe)
+#endif
+
+#define bfa_q_is_on_q(_q, _qe)      \
+	bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
+
+/**
+ * @ BFA state machine interfaces
+ */
+
+typedef void (*bfa_sm_t)(void *sm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype)		\
+	static void oc ## _sm_ ## st(otype * fsm, etype event)
+
+#define bfa_sm_set_state(_sm, _state)	((_sm)->sm = (bfa_sm_t)(_state))
+#define bfa_sm_send_event(_sm, _event)	((_sm)->sm((_sm), (_event)))
+#define bfa_sm_get_state(_sm)		((_sm)->sm)
+#define bfa_sm_cmp_state(_sm, _state)	((_sm)->sm == (bfa_sm_t)(_state))
+
+/**
+ * For converting from state machine function to state encoding.
+ */
+struct bfa_sm_table_s {
+	bfa_sm_t	sm;	/*  state machine function	*/
+	int		state;	/*  state machine encoding	*/
+	char		*name;	/*  state name for display	*/
+};
+#define BFA_SM(_sm)	((bfa_sm_t)(_sm))
+
+/**
+ * State machine with entry actions.
+ */
+typedef void (*bfa_fsm_t)(void *fsm, int event);
+
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_fsm_state_decl(oc, st, otype, etype)		\
+	static void oc ## _sm_ ## st(otype * fsm, etype event);      \
+	static void oc ## _sm_ ## st ## _entry(otype * fsm)
+
+#define bfa_fsm_set_state(_fsm, _state) do {	\
+	(_fsm)->fsm = (bfa_fsm_t)(_state);      \
+	_state ## _entry(_fsm);      \
+} while (0)
+
+#define bfa_fsm_send_event(_fsm, _event)	((_fsm)->fsm((_fsm), (_event)))
+#define bfa_fsm_get_state(_fsm)			((_fsm)->fsm)
+#define bfa_fsm_cmp_state(_fsm, _state)		\
+	((_fsm)->fsm == (bfa_fsm_t)(_state))
+
+static inline int
+bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
+{
+	int	i = 0;
+
+	while (smt[i].sm && smt[i].sm != sm)
+		i++;
+	return smt[i].state;
+}
+
+/**
+ * @ Generic wait counter.
+ */
+
+typedef void (*bfa_wc_resume_t) (void *cbarg);
+
+struct bfa_wc_s {
+	bfa_wc_resume_t wc_resume;
+	void		*wc_cbarg;
+	int		wc_count;
+};
+
+static inline void
+bfa_wc_up(struct bfa_wc_s *wc)
+{
+	wc->wc_count++;
+}
+
+static inline void
+bfa_wc_down(struct bfa_wc_s *wc)
+{
+	wc->wc_count--;
+	if (wc->wc_count == 0)
+		wc->wc_resume(wc->wc_cbarg);
+}
+
+/**
+ * Initialize a waiting counter.
+ */
+static inline void
+bfa_wc_init(struct bfa_wc_s *wc, bfa_wc_resume_t wc_resume, void *wc_cbarg)
+{
+	wc->wc_resume = wc_resume;
+	wc->wc_cbarg = wc_cbarg;
+	wc->wc_count = 0;
+	bfa_wc_up(wc);
+}
+
+/**
+ * Wait for counter to reach zero
+ */
+static inline void
+bfa_wc_wait(struct bfa_wc_s *wc)
+{
+	bfa_wc_down(wc);
+}
+
+#endif /* __BFA_CS_H__ */

+ 0 - 58
drivers/scsi/bfa/bfa_csdebug.c

@@ -1,58 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <cs/bfa_debug.h>
-#include <bfa_os_inc.h>
-#include <cs/bfa_q.h>
-#include <log/bfa_log_hal.h>
-
-/**
- *  cs_debug_api
- */
-
-
-void
-bfa_panic(int line, char *file, char *panicstr)
-{
-	bfa_log(NULL, BFA_LOG_HAL_ASSERT, file, line, panicstr);
-	bfa_os_panic();
-}
-
-void
-bfa_sm_panic(struct bfa_log_mod_s *logm, int line, char *file, int event)
-{
-	bfa_log(logm, BFA_LOG_HAL_SM_ASSERT, file, line, event);
-	bfa_os_panic();
-}
-
-int
-bfa_q_is_on_q_func(struct list_head *q, struct list_head *qe)
-{
-	struct list_head        *tqe;
-
-	tqe = bfa_q_next(q);
-	while (tqe != q) {
-		if (tqe == qe)
-			return 1;
-		tqe = bfa_q_next(tqe);
-		if (tqe == NULL)
-			break;
-	}
-	return 0;
-}
-
-

+ 466 - 0
drivers/scsi/bfa/bfa_defs.h

@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_H__
+#define __BFA_DEFS_H__
+
+#include "bfa_fc.h"
+#include "bfa_os_inc.h"
+
+#define BFA_MFG_SERIALNUM_SIZE                  11
+#define STRSZ(_n)                               (((_n) + 4) & ~3)
+
+/**
+ * Manufacturing card type
+ */
+enum {
+	BFA_MFG_TYPE_CB_MAX  = 825,      /*  Crossbow card type max     */
+	BFA_MFG_TYPE_FC8P2   = 825,      /*  8G 2port FC card           */
+	BFA_MFG_TYPE_FC8P1   = 815,      /*  8G 1port FC card           */
+	BFA_MFG_TYPE_FC4P2   = 425,      /*  4G 2port FC card           */
+	BFA_MFG_TYPE_FC4P1   = 415,      /*  4G 1port FC card           */
+	BFA_MFG_TYPE_CNA10P2 = 1020,     /*  10G 2port CNA card */
+	BFA_MFG_TYPE_CNA10P1 = 1010,     /*  10G 1port CNA card */
+	BFA_MFG_TYPE_JAYHAWK = 804,      /*  Jayhawk mezz card          */
+	BFA_MFG_TYPE_WANCHESE = 1007,    /*  Wanchese mezz card */
+	BFA_MFG_TYPE_ASTRA    = 807,     /*  Astra mezz card            */
+	BFA_MFG_TYPE_LIGHTNING_P0 = 902, /*  Lightning mezz card - old  */
+	BFA_MFG_TYPE_LIGHTNING = 1741,   /*  Lightning mezz card        */
+	BFA_MFG_TYPE_INVALID = 0,        /*  Invalid card type          */
+};
+
+#pragma pack(1)
+
+/**
+ * Check if Mezz card
+ */
+#define bfa_mfg_is_mezz(type) (( \
+	(type) == BFA_MFG_TYPE_JAYHAWK || \
+	(type) == BFA_MFG_TYPE_WANCHESE || \
+	(type) == BFA_MFG_TYPE_ASTRA || \
+	(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
+	(type) == BFA_MFG_TYPE_LIGHTNING))
+
+/**
+ * Check if the card having old wwn/mac handling
+ */
+#define bfa_mfg_is_old_wwn_mac_model(type) (( \
+	(type) == BFA_MFG_TYPE_FC8P2 || \
+	(type) == BFA_MFG_TYPE_FC8P1 || \
+	(type) == BFA_MFG_TYPE_FC4P2 || \
+	(type) == BFA_MFG_TYPE_FC4P1 || \
+	(type) == BFA_MFG_TYPE_CNA10P2 || \
+	(type) == BFA_MFG_TYPE_CNA10P1 || \
+	(type) == BFA_MFG_TYPE_JAYHAWK || \
+	(type) == BFA_MFG_TYPE_WANCHESE))
+
+#define bfa_mfg_increment_wwn_mac(m, i)                         \
+do {                                                            \
+	u32 t = ((u32)(m)[0] << 16) | ((u32)(m)[1] << 8) | \
+		(u32)(m)[2];  \
+	t += (i);      \
+	(m)[0] = (t >> 16) & 0xFF;                              \
+	(m)[1] = (t >> 8) & 0xFF;                               \
+	(m)[2] = t & 0xFF;                                      \
+} while (0)
+
+/**
+ * VPD data length
+ */
+#define BFA_MFG_VPD_LEN                 512
+
+/**
+ * VPD vendor tag
+ */
+enum {
+	BFA_MFG_VPD_UNKNOWN     = 0,     /*  vendor unknown             */
+	BFA_MFG_VPD_IBM         = 1,     /*  vendor IBM                 */
+	BFA_MFG_VPD_HP          = 2,     /*  vendor HP                  */
+	BFA_MFG_VPD_DELL        = 3,     /*  vendor DELL                */
+	BFA_MFG_VPD_PCI_IBM     = 0x08,  /*  PCI VPD IBM                */
+	BFA_MFG_VPD_PCI_HP      = 0x10,  /*  PCI VPD HP         */
+	BFA_MFG_VPD_PCI_DELL    = 0x20,  /*  PCI VPD DELL               */
+	BFA_MFG_VPD_PCI_BRCD    = 0xf8,  /*  PCI VPD Brocade            */
+};
+
+/**
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_vpd_s {
+	u8              version;        /*  vpd data version */
+	u8              vpd_sig[3];     /*  characters 'V', 'P', 'D' */
+	u8              chksum;         /*  u8 checksum */
+	u8              vendor;         /*  vendor */
+	u8      len;            /*  vpd data length excluding header */
+	u8      rsv;
+	u8              data[BFA_MFG_VPD_LEN];  /*  vpd data */
+};
+
+#pragma pack()
+
+/**
+ * Status return values
+ */
+enum bfa_status {
+	BFA_STATUS_OK		= 0,	/*  Success */
+	BFA_STATUS_FAILED	= 1,	/*  Operation failed */
+	BFA_STATUS_EINVAL	= 2,	/*  Invalid params Check input
+					 *  parameters */
+	BFA_STATUS_ENOMEM	= 3,	/*  Out of resources */
+	BFA_STATUS_ETIMER	= 5,	/*  Timer expired - Retry, if persists,
+					 *  contact support */
+	BFA_STATUS_EPROTOCOL	= 6,	/*  Protocol error */
+	BFA_STATUS_DEVBUSY	= 13,	/*  Device busy - Retry operation */
+	BFA_STATUS_UNKNOWN_LWWN = 18,	/*  LPORT PWWN not found */
+	BFA_STATUS_UNKNOWN_RWWN = 19,	/*  RPORT PWWN not found */
+	BFA_STATUS_VPORT_EXISTS = 21,	/*  VPORT already exists */
+	BFA_STATUS_VPORT_MAX	= 22,	/*  Reached max VPORT supported limit */
+	BFA_STATUS_UNSUPP_SPEED	= 23,	/*  Invalid Speed Check speed setting */
+	BFA_STATUS_INVLD_DFSZ	= 24,	/*  Invalid Max data field size */
+	BFA_STATUS_FABRIC_RJT	= 29,	/*  Reject from attached fabric */
+	BFA_STATUS_VPORT_WWN_BP	= 46,	/*  WWN is same as base port's WWN */
+	BFA_STATUS_NO_FCPIM_NEXUS = 52,	/* No FCP Nexus exists with the rport */
+	BFA_STATUS_IOC_FAILURE	= 56,	/* IOC failure - Retry, if persists
+					 * contact support */
+	BFA_STATUS_INVALID_WWN	= 57,	/*  Invalid WWN */
+	BFA_STATUS_DIAG_BUSY	= 71,	/*  diag busy */
+	BFA_STATUS_ENOFSAVE	= 78,	/*  No saved firmware trace */
+	BFA_STATUS_IOC_DISABLED = 82,   /* IOC is already disabled */
+	BFA_STATUS_INVALID_MAC  = 134, /*  Invalid MAC address */
+	BFA_STATUS_PBC		= 154, /*  Operation not allowed for pre-boot
+					*  configuration */
+	BFA_STATUS_TRUNK_ENABLED = 164, /* Trunk is already enabled on
+					 * this adapter */
+	BFA_STATUS_TRUNK_DISABLED  = 165, /* Trunking is disabled on
+					   * the adapter */
+	BFA_STATUS_IOPROFILE_OFF = 175, /* IO profile OFF */
+	BFA_STATUS_MAX_VAL		/* Unknown error code */
+};
+#define bfa_status_t enum bfa_status
+
+enum bfa_eproto_status {
+	BFA_EPROTO_BAD_ACCEPT = 0,
+	BFA_EPROTO_UNKNOWN_RSP = 1
+};
+#define bfa_eproto_status_t enum bfa_eproto_status
+
+enum bfa_boolean {
+	BFA_FALSE = 0,
+	BFA_TRUE  = 1
+};
+#define bfa_boolean_t enum bfa_boolean
+
+#define BFA_STRING_32	32
+#define BFA_VERSION_LEN 64
+
+/**
+ * ---------------------- adapter definitions ------------
+ */
+
+/**
+ * BFA adapter level attributes.
+ */
+enum {
+	BFA_ADAPTER_SERIAL_NUM_LEN = STRSZ(BFA_MFG_SERIALNUM_SIZE),
+					/*
+					 *!< adapter serial num length
+					 */
+	BFA_ADAPTER_MODEL_NAME_LEN  = 16,  /*  model name length */
+	BFA_ADAPTER_MODEL_DESCR_LEN = 128, /*  model description length */
+	BFA_ADAPTER_MFG_NAME_LEN    = 8,   /*  manufacturer name length */
+	BFA_ADAPTER_SYM_NAME_LEN    = 64,  /*  adapter symbolic name length */
+	BFA_ADAPTER_OS_TYPE_LEN	    = 64,  /*  adapter os type length */
+};
+
+struct bfa_adapter_attr_s {
+	char		manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+	char		serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+	u32	card_type;
+	char		model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char		model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
+	wwn_t		pwwn;
+	char		node_symname[FC_SYMNAME_MAX];
+	char		hw_ver[BFA_VERSION_LEN];
+	char		fw_ver[BFA_VERSION_LEN];
+	char		optrom_ver[BFA_VERSION_LEN];
+	char		os_type[BFA_ADAPTER_OS_TYPE_LEN];
+	struct bfa_mfg_vpd_s	vpd;
+	struct mac_s	mac;
+
+	u8		nports;
+	u8		max_speed;
+	u8		prototype;
+	char	        asic_rev;
+
+	u8		pcie_gen;
+	u8		pcie_lanes_orig;
+	u8		pcie_lanes;
+	u8	        cna_capable;
+
+	u8		is_mezz;
+	u8		trunk_capable;
+};
+
+/**
+ * ---------------------- IOC definitions ------------
+ */
+
+enum {
+	BFA_IOC_DRIVER_LEN	= 16,
+	BFA_IOC_CHIP_REV_LEN	= 8,
+};
+
+/**
+ * Driver and firmware versions.
+ */
+struct bfa_ioc_driver_attr_s {
+	char		driver[BFA_IOC_DRIVER_LEN];	/*  driver name */
+	char		driver_ver[BFA_VERSION_LEN];	/*  driver version */
+	char		fw_ver[BFA_VERSION_LEN];	/*  firmware version */
+	char		bios_ver[BFA_VERSION_LEN];	/*  bios version */
+	char		efi_ver[BFA_VERSION_LEN];	/*  EFI version */
+	char		ob_ver[BFA_VERSION_LEN];	/*  openboot version */
+};
+
+/**
+ * IOC PCI device attributes
+ */
+struct bfa_ioc_pci_attr_s {
+	u16	vendor_id;	/*  PCI vendor ID */
+	u16	device_id;	/*  PCI device ID */
+	u16	ssid;		/*  subsystem ID */
+	u16	ssvid;		/*  subsystem vendor ID */
+	u32	pcifn;		/*  PCI device function */
+	u32	rsvd;		/* padding */
+	char		chip_rev[BFA_IOC_CHIP_REV_LEN];	 /*  chip revision */
+};
+
+/**
+ * IOC states
+ */
+enum bfa_ioc_state {
+	BFA_IOC_UNINIT		= 1,	/*  IOC is in uninit state */
+	BFA_IOC_RESET		= 2,	/*  IOC is in reset state */
+	BFA_IOC_SEMWAIT		= 3,	/*  Waiting for IOC h/w semaphore */
+	BFA_IOC_HWINIT		= 4,	/*  IOC h/w is being initialized */
+	BFA_IOC_GETATTR		= 5,	/*  IOC is being configured */
+	BFA_IOC_OPERATIONAL	= 6,	/*  IOC is operational */
+	BFA_IOC_INITFAIL	= 7,	/*  IOC hardware failure */
+	BFA_IOC_FAIL		= 8,	/*  IOC heart-beat failure */
+	BFA_IOC_DISABLING	= 9,	/*  IOC is being disabled */
+	BFA_IOC_DISABLED	= 10,	/*  IOC is disabled */
+	BFA_IOC_FWMISMATCH	= 11,	/*  IOC f/w different from drivers */
+	BFA_IOC_ENABLING	= 12,	/*  IOC is being enabled */
+};
+
+/**
+ * IOC firmware stats
+ */
+struct bfa_fw_ioc_stats_s {
+	u32	enable_reqs;
+	u32	disable_reqs;
+	u32	get_attr_reqs;
+	u32	dbg_sync;
+	u32	dbg_dump;
+	u32	unknown_reqs;
+};
+
+/**
+ * IOC driver stats
+ */
+struct bfa_ioc_drv_stats_s {
+	u32	ioc_isrs;
+	u32	ioc_enables;
+	u32	ioc_disables;
+	u32	ioc_hbfails;
+	u32	ioc_boots;
+	u32	stats_tmos;
+	u32	hb_count;
+	u32	disable_reqs;
+	u32	enable_reqs;
+	u32	disable_replies;
+	u32	enable_replies;
+};
+
+/**
+ * IOC statistics
+ */
+struct bfa_ioc_stats_s {
+	struct bfa_ioc_drv_stats_s	drv_stats; /*  driver IOC stats */
+	struct bfa_fw_ioc_stats_s	fw_stats;  /*  firmware IOC stats */
+};
+
+enum bfa_ioc_type_e {
+	BFA_IOC_TYPE_FC		= 1,
+	BFA_IOC_TYPE_FCoE	= 2,
+	BFA_IOC_TYPE_LL		= 3,
+};
+
+/**
+ * IOC attributes returned in queries
+ */
+struct bfa_ioc_attr_s {
+	enum bfa_ioc_type_e		ioc_type;
+	enum bfa_ioc_state		state;		/*  IOC state      */
+	struct bfa_adapter_attr_s	adapter_attr;	/*  HBA attributes */
+	struct bfa_ioc_driver_attr_s	driver_attr;	/*  driver attr    */
+	struct bfa_ioc_pci_attr_s	pci_attr;
+	u8				port_id;	/*  port number    */
+	u8				rsvd[7];	/*  64bit align    */
+};
+
+/**
+ * ---------------------- mfg definitions ------------
+ */
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE			16
+
+#define BFA_MFG_PARTNUM_SIZE			14
+#define BFA_MFG_SUPPLIER_ID_SIZE		10
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE		20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE		20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE		4
+
+#pragma pack(1)
+
+/**
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block_s {
+	u8		version;	/*  manufacturing block version */
+	u8		mfg_sig[3];	/*  characters 'M', 'F', 'G' */
+	u16	mfgsize;	/*  mfg block size */
+	u16	u16_chksum;	/*  old u16 checksum */
+	char		brcd_serialnum[STRSZ(BFA_MFG_SERIALNUM_SIZE)];
+	char		brcd_partnum[STRSZ(BFA_MFG_PARTNUM_SIZE)];
+	u8		mfg_day;	/*  manufacturing day */
+	u8		mfg_month;	/*  manufacturing month */
+	u16	mfg_year;	/*  manufacturing year */
+	wwn_t		mfg_wwn;	/*  wwn base for this adapter */
+	u8		num_wwn;	/*  number of wwns assigned */
+	u8		mfg_speeds;	/*  speeds allowed for this adapter */
+	u8		rsv[2];
+	char		supplier_id[STRSZ(BFA_MFG_SUPPLIER_ID_SIZE)];
+	char		supplier_partnum[STRSZ(BFA_MFG_SUPPLIER_PARTNUM_SIZE)];
+	char
+		supplier_serialnum[STRSZ(BFA_MFG_SUPPLIER_SERIALNUM_SIZE)];
+	char
+		supplier_revision[STRSZ(BFA_MFG_SUPPLIER_REVISION_SIZE)];
+	mac_t		mfg_mac;	/*  mac address */
+	u8		num_mac;	/*  number of mac addresses */
+	u8		rsv2;
+	u32	mfg_type;	/*  card type */
+	u8		rsv3[108];
+	u8		md5_chksum[BFA_MFG_CHKSUM_SIZE]; /*  md5 checksum */
+};
+
+#pragma pack()
+
+/**
+ * ---------------------- pci definitions ------------
+ */
+
+/**
+ * PCI device and vendor ID information
+ */
+enum {
+	BFA_PCI_VENDOR_ID_BROCADE	= 0x1657,
+	BFA_PCI_DEVICE_ID_FC_8G2P	= 0x13,
+	BFA_PCI_DEVICE_ID_FC_8G1P	= 0x17,
+	BFA_PCI_DEVICE_ID_CT		= 0x14,
+	BFA_PCI_DEVICE_ID_CT_FC		= 0x21,
+};
+
+#define bfa_asic_id_ct(devid)			\
+	((devid) == BFA_PCI_DEVICE_ID_CT ||	\
+	 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
+
+/**
+ * PCI sub-system device and vendor ID information
+ */
+enum {
+	BFA_PCI_FCOE_SSDEVICE_ID	= 0x14,
+};
+
+/**
+ * Maximum number of device address ranges mapped through different BAR(s)
+ */
+#define BFA_PCI_ACCESS_RANGES 1
+
+/*
+ *	Port speed settings. Each specific speed is a bit field. Use multiple
+ *	bits to specify speeds to be selected for auto-negotiation.
+ */
+enum bfa_port_speed {
+	BFA_PORT_SPEED_UNKNOWN = 0,
+	BFA_PORT_SPEED_1GBPS	= 1,
+	BFA_PORT_SPEED_2GBPS	= 2,
+	BFA_PORT_SPEED_4GBPS	= 4,
+	BFA_PORT_SPEED_8GBPS	= 8,
+	BFA_PORT_SPEED_10GBPS	= 10,
+	BFA_PORT_SPEED_16GBPS	= 16,
+	BFA_PORT_SPEED_AUTO =
+		(BFA_PORT_SPEED_1GBPS | BFA_PORT_SPEED_2GBPS |
+		 BFA_PORT_SPEED_4GBPS | BFA_PORT_SPEED_8GBPS),
+};
+#define bfa_port_speed_t enum bfa_port_speed
+
+enum {
+	BFA_BOOT_BOOTLUN_MAX = 4,       /*  maximum boot lun per IOC */
+	BFA_PREBOOT_BOOTLUN_MAX = 8,    /*  maximum preboot lun per IOC */
+};
+
+#define BOOT_CFG_REV1   1
+#define BOOT_CFG_VLAN   1
+
+/**
+ *      Boot options setting. Boot options setting determines from where
+ *      to get the boot lun information
+ */
+enum bfa_boot_bootopt {
+	BFA_BOOT_AUTO_DISCOVER  = 0, /*  Boot from blun provided by fabric */
+	BFA_BOOT_STORED_BLUN = 1, /*  Boot from bluns stored in flash */
+	BFA_BOOT_FIRST_LUN      = 2, /*  Boot from first discovered blun */
+	BFA_BOOT_PBC    = 3, /*  Boot from pbc configured blun  */
+};
+
+#pragma pack(1)
+/**
+ * Boot lun information.
+ */
+struct bfa_boot_bootlun_s {
+	wwn_t   pwwn;   /*  port wwn of target */
+	lun_t   lun;    /*  64-bit lun */
+};
+#pragma pack()
+
+/**
+ * BOOT boot configuraton
+ */
+struct bfa_boot_pbc_s {
+	u8              enable;         /*  enable/disable SAN boot */
+	u8              speed;          /*  boot speed settings */
+	u8              topology;       /*  boot topology setting */
+	u8              rsvd1;
+	u32     nbluns;         /*  number of boot luns */
+	struct bfa_boot_bootlun_s pblun[BFA_PREBOOT_BOOTLUN_MAX];
+};
+
+#endif /* __BFA_DEFS_H__ */

+ 457 - 0
drivers/scsi/bfa/bfa_defs_fcs.h

@@ -0,0 +1,457 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_FCS_H__
+#define __BFA_DEFS_FCS_H__
+
+#include "bfa_fc.h"
+#include "bfa_defs_svc.h"
+
+/**
+ * VF states
+ */
+enum bfa_vf_state {
+	BFA_VF_UNINIT    = 0,	/*  fabric is not yet initialized */
+	BFA_VF_LINK_DOWN = 1,	/*  link is down */
+	BFA_VF_FLOGI     = 2,	/*  flogi is in progress */
+	BFA_VF_AUTH      = 3,	/*  authentication in progress */
+	BFA_VF_NOFABRIC  = 4,	/*  fabric is not present */
+	BFA_VF_ONLINE    = 5,	/*  login to fabric is complete */
+	BFA_VF_EVFP      = 6,	/*  EVFP is in progress */
+	BFA_VF_ISOLATED  = 7,	/*  port isolated due to vf_id mismatch */
+};
+
+/**
+ * VF statistics
+ */
+struct bfa_vf_stats_s {
+	u32	flogi_sent;	/*  Num FLOGIs sent */
+	u32	flogi_rsp_err;	/*  FLOGI response errors */
+	u32	flogi_acc_err;	/*  FLOGI accept errors */
+	u32	flogi_accepts;	/*  FLOGI accepts received */
+	u32	flogi_rejects;	/*  FLOGI rejects received */
+	u32	flogi_unknown_rsp; /*  Unknown responses for FLOGI */
+	u32	flogi_alloc_wait; /*  Allocation waits prior to sending FLOGI */
+	u32	flogi_rcvd;	/*  FLOGIs received */
+	u32	flogi_rejected;	/*  Incoming FLOGIs rejected */
+	u32	fabric_onlines;	/*  Internal fabric online notification sent
+				 *  to other modules */
+	u32	fabric_offlines; /* Internal fabric offline notification sent
+				  * to other modules */
+	u32	resvd; /*  padding for 64 bit alignment */
+};
+
+/**
+ * VF attributes returned in queries
+ */
+struct bfa_vf_attr_s {
+	enum bfa_vf_state  state;		/*  VF state */
+	u32        rsvd;
+	wwn_t           fabric_name;	/*  fabric name */
+};
+
+#define BFA_FCS_MAX_LPORTS 256
+#define BFA_FCS_FABRIC_IPADDR_SZ  16
+
+/**
+ * symbolic names for base port/virtual port
+ */
+#define BFA_SYMNAME_MAXLEN	128	/* 128 bytes */
+struct bfa_lport_symname_s {
+	char	    symname[BFA_SYMNAME_MAXLEN];
+};
+
+/**
+* Roles of FCS port:
+ *     - FCP IM and FCP TM roles cannot be enabled together for a FCS port
+ *     - Create multiple ports if both IM and TM functions required.
+ *     - Atleast one role must be specified.
+ */
+enum bfa_lport_role {
+	BFA_LPORT_ROLE_FCP_IM	= 0x01,	/*  FCP initiator role */
+	BFA_LPORT_ROLE_FCP_MAX	= BFA_LPORT_ROLE_FCP_IM,
+};
+
+/**
+ * FCS port configuration.
+ */
+struct bfa_lport_cfg_s {
+    wwn_t	       pwwn;       /*  port wwn */
+    wwn_t	       nwwn;       /*  node wwn */
+    struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
+	bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
+    enum bfa_lport_role     roles;      /*  FCS port roles */
+    u8	     tag[16];	/*  opaque tag from application */
+};
+
+/**
+ * FCS port states
+ */
+enum bfa_lport_state {
+	BFA_LPORT_UNINIT  = 0,	/*  PORT is not yet initialized */
+	BFA_LPORT_FDISC   = 1,	/*  FDISC is in progress */
+	BFA_LPORT_ONLINE  = 2,	/*  login to fabric is complete */
+	BFA_LPORT_OFFLINE = 3,	/*  No login to fabric */
+};
+
+/**
+ * FCS port type.
+ */
+enum bfa_lport_type {
+	BFA_LPORT_TYPE_PHYSICAL = 0,
+	BFA_LPORT_TYPE_VIRTUAL,
+};
+
+/**
+ * FCS port offline reason.
+ */
+enum bfa_lport_offline_reason {
+	BFA_LPORT_OFFLINE_UNKNOWN = 0,
+	BFA_LPORT_OFFLINE_LINKDOWN,
+	BFA_LPORT_OFFLINE_FAB_UNSUPPORTED,	/*  NPIV not supported by the
+	 *    fabric */
+	BFA_LPORT_OFFLINE_FAB_NORESOURCES,
+	BFA_LPORT_OFFLINE_FAB_LOGOUT,
+};
+
+/**
+ * FCS lport info.
+ */
+struct bfa_lport_info_s {
+	u8	 port_type;	/* bfa_lport_type_t : physical or
+	 * virtual */
+	u8	 port_state;	/* one of bfa_lport_state values */
+	u8	 offline_reason;	/* one of bfa_lport_offline_reason_t
+	 * values */
+	wwn_t	   port_wwn;
+	wwn_t	   node_wwn;
+
+	/*
+	 * following 4 feilds are valid for Physical Ports only
+	 */
+	u32	max_vports_supp;	/* Max supported vports */
+	u32	num_vports_inuse;	/* Num of in use vports */
+	u32	max_rports_supp;	/* Max supported rports */
+	u32	num_rports_inuse;	/* Num of doscovered rports */
+
+};
+
+/**
+ * FCS port statistics
+ */
+struct bfa_lport_stats_s {
+	u32	ns_plogi_sent;
+	u32	ns_plogi_rsp_err;
+	u32	ns_plogi_acc_err;
+	u32	ns_plogi_accepts;
+	u32	ns_rejects;	/* NS command rejects */
+	u32	ns_plogi_unknown_rsp;
+	u32	ns_plogi_alloc_wait;
+
+	u32	ns_retries;	/* NS command retries */
+	u32	ns_timeouts;	/* NS command timeouts */
+
+	u32	ns_rspnid_sent;
+	u32	ns_rspnid_accepts;
+	u32	ns_rspnid_rsp_err;
+	u32	ns_rspnid_rejects;
+	u32	ns_rspnid_alloc_wait;
+
+	u32	ns_rftid_sent;
+	u32	ns_rftid_accepts;
+	u32	ns_rftid_rsp_err;
+	u32	ns_rftid_rejects;
+	u32	ns_rftid_alloc_wait;
+
+	u32	ns_rffid_sent;
+	u32	ns_rffid_accepts;
+	u32	ns_rffid_rsp_err;
+	u32	ns_rffid_rejects;
+	u32	ns_rffid_alloc_wait;
+
+	u32	ns_gidft_sent;
+	u32	ns_gidft_accepts;
+	u32	ns_gidft_rsp_err;
+	u32	ns_gidft_rejects;
+	u32	ns_gidft_unknown_rsp;
+	u32	ns_gidft_alloc_wait;
+
+	/*
+	 * Mgmt Server stats
+	 */
+	u32	ms_retries;	/* MS command retries */
+	u32	ms_timeouts;	/* MS command timeouts */
+	u32	ms_plogi_sent;
+	u32	ms_plogi_rsp_err;
+	u32	ms_plogi_acc_err;
+	u32	ms_plogi_accepts;
+	u32	ms_rejects;	/* MS command rejects */
+	u32	ms_plogi_unknown_rsp;
+	u32	ms_plogi_alloc_wait;
+
+	u32	num_rscn;	/* Num of RSCN received */
+	u32	num_portid_rscn;/* Num portid format RSCN
+	* received */
+
+	u32	uf_recvs;	/* Unsolicited recv frames	*/
+	u32	uf_recv_drops;	/* Dropped received frames	*/
+
+	u32	plogi_rcvd;	/* Received plogi	*/
+	u32	prli_rcvd;	/* Received prli	*/
+	u32	adisc_rcvd;	/* Received adisc	*/
+	u32	prlo_rcvd;	/* Received prlo	*/
+	u32	logo_rcvd;	/* Received logo	*/
+	u32	rpsc_rcvd;	/* Received rpsc	*/
+	u32	un_handled_els_rcvd;	/* Received unhandled ELS	*/
+	u32	rport_plogi_timeouts; /* Rport plogi retry timeout count */
+	u32	rport_del_max_plogi_retry; /* Deleted rport
+					    * (max retry of plogi) */
+};
+
+/**
+ * BFA port attribute returned in queries
+ */
+struct bfa_lport_attr_s {
+	enum bfa_lport_state state;	/*  port state */
+	u32	 pid;	/*  port ID */
+	struct bfa_lport_cfg_s   port_cfg;	/*  port configuration */
+	enum bfa_port_type port_type;	/*  current topology */
+	u32	 loopback;	/*  cable is externally looped back */
+	wwn_t	fabric_name; /*  attached switch's nwwn */
+	u8	fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /*  attached
+	* fabric's ip addr */
+	mac_t	   fpma_mac;	/*  Lport's FPMA Mac address */
+	u16	authfail;	/*  auth failed state */
+};
+
+
+/**
+ * VPORT states
+ */
+enum bfa_vport_state {
+	BFA_FCS_VPORT_UNINIT		= 0,
+	BFA_FCS_VPORT_CREATED		= 1,
+	BFA_FCS_VPORT_OFFLINE		= 1,
+	BFA_FCS_VPORT_FDISC_SEND	= 2,
+	BFA_FCS_VPORT_FDISC		= 3,
+	BFA_FCS_VPORT_FDISC_RETRY	= 4,
+	BFA_FCS_VPORT_ONLINE		= 5,
+	BFA_FCS_VPORT_DELETING		= 6,
+	BFA_FCS_VPORT_CLEANUP		= 6,
+	BFA_FCS_VPORT_LOGO_SEND		= 7,
+	BFA_FCS_VPORT_LOGO		= 8,
+	BFA_FCS_VPORT_ERROR		= 9,
+	BFA_FCS_VPORT_MAX_STATE,
+};
+
+/**
+ * vport statistics
+ */
+struct bfa_vport_stats_s {
+	struct bfa_lport_stats_s port_stats;	/*  base class (port) stats */
+	/*
+	 * TODO - remove
+	 */
+
+	u32        fdisc_sent;	/*  num fdisc sent */
+	u32        fdisc_accepts;	/*  fdisc accepts */
+	u32        fdisc_retries;	/*  fdisc retries */
+	u32        fdisc_timeouts;	/*  fdisc timeouts */
+	u32        fdisc_rsp_err;	/*  fdisc response error */
+	u32        fdisc_acc_bad;	/*  bad fdisc accepts */
+	u32        fdisc_rejects;	/*  fdisc rejects */
+	u32        fdisc_unknown_rsp;
+	/*
+	 *!< fdisc rsp unknown error
+	 */
+	u32        fdisc_alloc_wait;/*  fdisc req (fcxp)alloc wait */
+
+	u32        logo_alloc_wait;/*  logo req (fcxp) alloc wait */
+	u32        logo_sent;	/*  logo sent */
+	u32        logo_accepts;	/*  logo accepts */
+	u32        logo_rejects;	/*  logo rejects */
+	u32        logo_rsp_err;	/*  logo rsp errors */
+	u32        logo_unknown_rsp;
+			/*  logo rsp unknown errors */
+
+	u32        fab_no_npiv;	/*  fabric does not support npiv */
+
+	u32        fab_offline;	/*  offline events from fab SM */
+	u32        fab_online;	/*  online events from fab SM */
+	u32        fab_cleanup;	/*  cleanup request from fab SM */
+	u32        rsvd;
+};
+
+/**
+ * BFA vport attribute returned in queries
+ */
+struct bfa_vport_attr_s {
+	struct bfa_lport_attr_s   port_attr; /*  base class (port) attributes */
+	enum bfa_vport_state vport_state; /*  vport state */
+	u32          rsvd;
+};
+
+/**
+ * FCS remote port states
+ */
+enum bfa_rport_state {
+	BFA_RPORT_UNINIT	= 0,	/*  PORT is not yet initialized */
+	BFA_RPORT_OFFLINE	= 1,	/*  rport is offline */
+	BFA_RPORT_PLOGI		= 2,	/*  PLOGI to rport is in progress */
+	BFA_RPORT_ONLINE	= 3,	/*  login to rport is complete */
+	BFA_RPORT_PLOGI_RETRY	= 4,	/*  retrying login to rport */
+	BFA_RPORT_NSQUERY	= 5,	/*  nameserver query */
+	BFA_RPORT_ADISC		= 6,	/*  ADISC authentication */
+	BFA_RPORT_LOGO		= 7,	/*  logging out with rport */
+	BFA_RPORT_LOGORCV	= 8,	/*  handling LOGO from rport */
+	BFA_RPORT_NSDISC	= 9,	/*  re-discover rport */
+};
+
+/**
+ *  Rport Scsi Function : Initiator/Target.
+ */
+enum bfa_rport_function {
+	BFA_RPORT_INITIATOR	= 0x01,	/*  SCSI Initiator	*/
+	BFA_RPORT_TARGET	= 0x02,	/*  SCSI Target	*/
+};
+
+/**
+ * port/node symbolic names for rport
+ */
+#define BFA_RPORT_SYMNAME_MAXLEN	255
+struct bfa_rport_symname_s {
+	char            symname[BFA_RPORT_SYMNAME_MAXLEN];
+};
+
+/**
+ * FCS remote port statistics
+ */
+struct bfa_rport_stats_s {
+	u32        offlines;           /*  remote port offline count  */
+	u32        onlines;            /*  remote port online count   */
+	u32        rscns;              /*  RSCN affecting rport       */
+	u32        plogis;		    /*  plogis sent                */
+	u32        plogi_accs;	    /*  plogi accepts              */
+	u32        plogi_timeouts;	    /*  plogi timeouts             */
+	u32        plogi_rejects;	    /*  rcvd plogi rejects         */
+	u32        plogi_failed;	    /*  local failure              */
+	u32        plogi_rcvd;	    /*  plogis rcvd                */
+	u32        prli_rcvd;          /*  inbound PRLIs              */
+	u32        adisc_rcvd;         /*  ADISCs received            */
+	u32        adisc_rejects;      /*  recvd  ADISC rejects       */
+	u32        adisc_sent;         /*  ADISC requests sent        */
+	u32        adisc_accs;         /*  ADISC accepted by rport    */
+	u32        adisc_failed;       /*  ADISC failed (no response) */
+	u32        adisc_rejected;     /*  ADISC rejected by us    */
+	u32        logos;              /*  logos sent                 */
+	u32        logo_accs;          /*  LOGO accepts from rport    */
+	u32        logo_failed;        /*  LOGO failures              */
+	u32        logo_rejected;      /*  LOGO rejects from rport    */
+	u32        logo_rcvd;          /*  LOGO from remote port      */
+
+	u32        rpsc_rcvd;         /*  RPSC received            */
+	u32        rpsc_rejects;      /*  recvd  RPSC rejects       */
+	u32        rpsc_sent;         /*  RPSC requests sent        */
+	u32        rpsc_accs;         /*  RPSC accepted by rport    */
+	u32        rpsc_failed;       /*  RPSC failed (no response) */
+	u32        rpsc_rejected;     /*  RPSC rejected by us    */
+
+	u32	rjt_insuff_res;	/*  LS RJT with insuff resources */
+	struct bfa_rport_hal_stats_s	hal_stats;  /*  BFA rport stats    */
+};
+
+/**
+ * FCS remote port attributes returned in queries
+ */
+struct bfa_rport_attr_s {
+	wwn_t		nwwn;	/*  node wwn */
+	wwn_t		pwwn;	/*  port wwn */
+	enum fc_cos cos_supported;	/*  supported class of services */
+	u32		pid;	/*  port ID */
+	u32		df_sz;	/*  Max payload size */
+	enum bfa_rport_state	state;	/*  Rport State machine state */
+	enum fc_cos	fc_cos;	/*  FC classes of services */
+	bfa_boolean_t	cisc;	/*  CISC capable device */
+	struct bfa_rport_symname_s symname; /*  Symbolic Name */
+	enum bfa_rport_function	scsi_function; /*  Initiator/Target */
+	struct bfa_rport_qos_attr_s qos_attr; /*  qos attributes  */
+	enum bfa_port_speed curr_speed;   /*  operating speed got from
+					    * RPSC ELS. UNKNOWN, if RPSC
+					    * is not supported */
+	bfa_boolean_t	trl_enforced;	/*  TRL enforced ? TRUE/FALSE */
+	enum bfa_port_speed	assigned_speed;	/* Speed assigned by the user.
+						 * will be used if RPSC is not
+						 * supported by the rport */
+};
+
+struct bfa_rport_remote_link_stats_s {
+	u32 lfc; /*  Link Failure Count */
+	u32 lsyc; /*  Loss of Synchronization Count */
+	u32 lsic; /*  Loss of Signal Count */
+	u32 pspec; /*  Primitive Sequence Protocol Error Count */
+	u32 itwc; /*  Invalid Transmission Word Count */
+	u32 icc; /*  Invalid CRC Count */
+};
+
+
+#define BFA_MAX_IO_INDEX 7
+#define BFA_NO_IO_INDEX 9
+
+/**
+ * FCS itnim states
+ */
+enum bfa_itnim_state {
+	BFA_ITNIM_OFFLINE	= 0,	/*  offline */
+	BFA_ITNIM_PRLI_SEND	= 1,	/*  prli send */
+	BFA_ITNIM_PRLI_SENT	= 2,	/*  prli sent */
+	BFA_ITNIM_PRLI_RETRY	= 3,	/*  prli retry */
+	BFA_ITNIM_HCB_ONLINE	= 4,	/*  online callback */
+	BFA_ITNIM_ONLINE	= 5,	/*  online */
+	BFA_ITNIM_HCB_OFFLINE	= 6,	/*  offline callback */
+	BFA_ITNIM_INITIATIOR	= 7,	/*  initiator */
+};
+
+/**
+ * FCS remote port statistics
+ */
+struct bfa_itnim_stats_s {
+	u32        onlines;	/*  num rport online */
+	u32        offlines;	/*  num rport offline */
+	u32        prli_sent;	/*  num prli sent out */
+	u32        fcxp_alloc_wait;/*  num fcxp alloc waits */
+	u32        prli_rsp_err;	/*  num prli rsp errors */
+	u32        prli_rsp_acc;	/*  num prli rsp accepts */
+	u32        initiator;	/*  rport is an initiator */
+	u32        prli_rsp_parse_err;	/*  prli rsp parsing errors */
+	u32        prli_rsp_rjt;	/*  num prli rsp rejects */
+	u32        timeout;	/*  num timeouts detected */
+	u32        sler;		/*  num sler notification from BFA */
+	u32	rsvd;		/* padding for 64 bit alignment */
+};
+
+/**
+ * FCS itnim attributes returned in queries
+ */
+struct bfa_itnim_attr_s {
+	enum bfa_itnim_state state; /*  FCS itnim state        */
+	u8 retry;		/*  data retransmision support */
+	u8	task_retry_id;  /*  task retry ident support   */
+	u8 rec_support;    /*  REC supported              */
+	u8 conf_comp;      /*  confirmed completion supp  */
+};
+
+#endif /* __BFA_DEFS_FCS_H__ */

+ 1081 - 0
drivers/scsi/bfa/bfa_defs_svc.h

@@ -0,0 +1,1081 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_DEFS_SVC_H__
+#define __BFA_DEFS_SVC_H__
+
+#include "bfa_defs.h"
+#include "bfa_fc.h"
+#include "bfi.h"
+
+#define BFA_IOCFC_INTR_DELAY	1125
+#define BFA_IOCFC_INTR_LATENCY	225
+#define BFA_IOCFCOE_INTR_DELAY	25
+#define BFA_IOCFCOE_INTR_LATENCY 5
+
+/**
+ * Interrupt coalescing configuration.
+ */
+#pragma pack(1)
+struct bfa_iocfc_intr_attr_s {
+	u8		coalesce;	/*  enable/disable coalescing */
+	u8		rsvd[3];
+	u16	latency;	/*  latency in microseconds   */
+	u16	delay;		/*  delay in microseconds     */
+};
+
+/**
+ * IOC firmware configuraton
+ */
+struct bfa_iocfc_fwcfg_s {
+	u16        num_fabrics;	/*  number of fabrics		*/
+	u16        num_lports;	/*  number of local lports	*/
+	u16        num_rports;	/*  number of remote ports	*/
+	u16        num_ioim_reqs;	/*  number of IO reqs		*/
+	u16        num_tskim_reqs;	/*  task management requests	*/
+	u16        num_iotm_reqs;	/*  number of TM IO reqs	*/
+	u16        num_tsktm_reqs;	/*  TM task management requests*/
+	u16        num_fcxp_reqs;	/*  unassisted FC exchanges	*/
+	u16        num_uf_bufs;	/*  unsolicited recv buffers	*/
+	u8		num_cqs;
+	u8		fw_tick_res;	/*  FW clock resolution in ms */
+	u8		rsvd[4];
+};
+#pragma pack()
+
+struct bfa_iocfc_drvcfg_s {
+	u16        num_reqq_elems;	/*  number of req queue elements */
+	u16        num_rspq_elems;	/*  number of rsp queue elements */
+	u16        num_sgpgs;	/*  number of total SG pages	  */
+	u16        num_sboot_tgts;	/*  number of SAN boot targets	  */
+	u16        num_sboot_luns;	/*  number of SAN boot luns	  */
+	u16	    ioc_recover;	/*  IOC recovery mode		  */
+	u16	    min_cfg;	/*  minimum configuration	  */
+	u16        path_tov;	/*  device path timeout	  */
+	bfa_boolean_t   delay_comp; /*  delay completion of
+							failed inflight IOs */
+	u32		rsvd;
+};
+
+/**
+ * IOC configuration
+ */
+struct bfa_iocfc_cfg_s {
+	struct bfa_iocfc_fwcfg_s	fwcfg;	/*  firmware side config */
+	struct bfa_iocfc_drvcfg_s	drvcfg;	/*  driver side config	  */
+};
+
+/**
+ * IOC firmware IO stats
+ */
+struct bfa_fw_io_stats_s {
+	u32	host_abort;		/*  IO aborted by host driver*/
+	u32	host_cleanup;		/*  IO clean up by host driver */
+
+	u32	fw_io_timeout;		/*  IOs timedout */
+	u32	fw_frm_parse;		/*  frame parsed by f/w */
+	u32	fw_frm_data;		/*  fcp_data frame parsed by f/w */
+	u32	fw_frm_rsp;		/*  fcp_rsp frame parsed by f/w */
+	u32	fw_frm_xfer_rdy;	/*  xfer_rdy frame parsed by f/w */
+	u32	fw_frm_bls_acc;		/*  BLS ACC  frame parsed by f/w */
+	u32	fw_frm_tgt_abort;	/*  target ABTS parsed by f/w */
+	u32	fw_frm_unknown;		/*  unknown parsed by f/w */
+	u32	fw_data_dma;		/*  f/w DMA'ed the data frame */
+	u32	fw_frm_drop;		/*  f/w drop the frame */
+
+	u32	rec_timeout;		/*  FW rec timed out */
+	u32	error_rec;			/*  FW sending rec on
+							* an error condition*/
+	u32	wait_for_si;		/*  FW wait for SI */
+	u32	rec_rsp_inval;		/*  REC rsp invalid */
+	u32	seqr_io_abort;		/*  target does not know cmd so abort */
+	u32	seqr_io_retry;		/*  SEQR failed so retry IO */
+
+	u32	itn_cisc_upd_rsp;	/*  ITN cisc updated on fcp_rsp */
+	u32	itn_cisc_upd_data;	/*  ITN cisc updated on fcp_data */
+	u32	itn_cisc_upd_xfer_rdy;	/*  ITN cisc updated on fcp_data */
+
+	u32	fcp_data_lost;		/*  fcp data lost */
+
+	u32	ro_set_in_xfer_rdy;	/*  Target set RO in Xfer_rdy frame */
+	u32	xfer_rdy_ooo_err;	/*  Out of order Xfer_rdy received */
+	u32	xfer_rdy_unknown_err;	/*  unknown error in xfer_rdy frame */
+
+	u32	io_abort_timeout;	/*  ABTS timedout  */
+	u32	sler_initiated;		/*  SLER initiated */
+
+	u32	unexp_fcp_rsp;		/*  fcp response in wrong state */
+
+	u32	fcp_rsp_under_run;	/*  fcp rsp IO underrun */
+	u32        fcp_rsp_under_run_wr;   /*  fcp rsp IO underrun for write */
+	u32	fcp_rsp_under_run_err;	/*  fcp rsp IO underrun error */
+	u32        fcp_rsp_resid_inval;    /*  invalid residue */
+	u32	fcp_rsp_over_run;	/*  fcp rsp IO overrun */
+	u32	fcp_rsp_over_run_err;	/*  fcp rsp IO overrun error */
+	u32	fcp_rsp_proto_err;	/*  protocol error in fcp rsp */
+	u32	fcp_rsp_sense_err;	/*  error in sense info in fcp rsp */
+	u32	fcp_conf_req;		/*  FCP conf requested */
+
+	u32	tgt_aborted_io;		/*  target initiated abort */
+
+	u32	ioh_edtov_timeout_event;/*  IOH edtov timer popped */
+	u32	ioh_fcp_rsp_excp_event;	/*  IOH FCP_RSP exception */
+	u32	ioh_fcp_conf_event;	/*  IOH FCP_CONF */
+	u32	ioh_mult_frm_rsp_event;	/*  IOH multi_frame FCP_RSP */
+	u32	ioh_hit_class2_event;	/*  IOH hit class2 */
+	u32	ioh_miss_other_event;	/*  IOH miss other */
+	u32	ioh_seq_cnt_err_event;	/*  IOH seq cnt error */
+	u32	ioh_len_err_event;		/*  IOH len error - fcp_dl !=
+							* bytes xfered */
+	u32	ioh_seq_len_err_event;	/*  IOH seq len error */
+	u32	ioh_data_oor_event;	/*  Data out of range */
+	u32	ioh_ro_ooo_event;	/*  Relative offset out of range */
+	u32	ioh_cpu_owned_event;	/*  IOH hit -iost owned by f/w */
+	u32	ioh_unexp_frame_event;	/*  unexpected frame recieved
+						 *   count */
+	u32	ioh_err_int;		/*  IOH error int during data-phase
+						 *   for scsi write
+						 */
+};
+
+/**
+ * IOC port firmware stats
+ */
+
+struct bfa_fw_port_fpg_stats_s {
+    u32    intr_evt;
+    u32    intr;
+    u32    intr_excess;
+    u32    intr_cause0;
+    u32    intr_other;
+    u32    intr_other_ign;
+    u32    sig_lost;
+    u32    sig_regained;
+    u32    sync_lost;
+    u32    sync_to;
+    u32    sync_regained;
+    u32    div2_overflow;
+    u32    div2_underflow;
+    u32    efifo_overflow;
+    u32    efifo_underflow;
+    u32    idle_rx;
+    u32    lrr_rx;
+    u32    lr_rx;
+    u32    ols_rx;
+    u32    nos_rx;
+    u32    lip_rx;
+    u32    arbf0_rx;
+    u32    arb_rx;
+    u32    mrk_rx;
+    u32    const_mrk_rx;
+    u32    prim_unknown;
+};
+
+
+struct bfa_fw_port_lksm_stats_s {
+    u32    hwsm_success;       /*  hwsm state machine success          */
+    u32    hwsm_fails;         /*  hwsm fails                          */
+    u32    hwsm_wdtov;         /*  hwsm timed out                      */
+    u32    swsm_success;       /*  swsm success                        */
+    u32    swsm_fails;         /*  swsm fails                          */
+    u32    swsm_wdtov;         /*  swsm timed out                      */
+    u32    busybufs;           /*  link init failed due to busybuf     */
+    u32    buf_waits;          /*  bufwait state entries               */
+    u32    link_fails;         /*  link failures                       */
+    u32    psp_errors;         /*  primitive sequence protocol errors  */
+    u32    lr_unexp;           /*  No. of times LR rx-ed unexpectedly  */
+    u32    lrr_unexp;          /*  No. of times LRR rx-ed unexpectedly */
+    u32    lr_tx;              /*  No. of times LR tx started          */
+    u32    lrr_tx;             /*  No. of times LRR tx started         */
+    u32    ols_tx;             /*  No. of times OLS tx started         */
+    u32    nos_tx;             /*  No. of times NOS tx started         */
+    u32    hwsm_lrr_rx;        /*  No. of times LRR rx-ed by HWSM      */
+    u32    hwsm_lr_rx;         /*  No. of times LR rx-ed by HWSM      */
+};
+
+struct bfa_fw_port_snsm_stats_s {
+    u32    hwsm_success;       /*  Successful hwsm terminations        */
+    u32    hwsm_fails;         /*  hwsm fail count                     */
+    u32    hwsm_wdtov;         /*  hwsm timed out                      */
+    u32    swsm_success;       /*  swsm success                        */
+    u32    swsm_wdtov;         /*  swsm timed out                      */
+    u32    error_resets;       /*  error resets initiated by upsm      */
+    u32    sync_lost;          /*  Sync loss count                     */
+    u32    sig_lost;           /*  Signal loss count                   */
+};
+
+struct bfa_fw_port_physm_stats_s {
+    u32    module_inserts;     /*  Module insert count                 */
+    u32    module_xtracts;     /*  Module extracts count               */
+    u32    module_invalids;    /*  Invalid module inserted count       */
+    u32    module_read_ign;    /*  Module validation status ignored    */
+    u32    laser_faults;       /*  Laser fault count                   */
+    u32    rsvd;
+};
+
+struct bfa_fw_fip_stats_s {
+    u32    vlan_req;           /*  vlan discovery requests             */
+    u32    vlan_notify;        /*  vlan notifications                  */
+    u32    vlan_err;           /*  vlan response error                 */
+    u32    vlan_timeouts;      /*  vlan disvoery timeouts              */
+    u32    vlan_invalids;      /*  invalid vlan in discovery advert.   */
+    u32    disc_req;           /*  Discovery solicit requests          */
+    u32    disc_rsp;           /*  Discovery solicit response          */
+    u32    disc_err;           /*  Discovery advt. parse errors        */
+    u32    disc_unsol;         /*  Discovery unsolicited               */
+    u32    disc_timeouts;      /*  Discovery timeouts                  */
+    u32    disc_fcf_unavail;   /*  Discovery FCF Not Avail.            */
+    u32    linksvc_unsupp;     /*  Unsupported link service req        */
+    u32    linksvc_err;        /*  Parse error in link service req     */
+    u32    logo_req;           /*  FIP logos received                  */
+    u32    clrvlink_req;       /*  Clear virtual link req              */
+    u32    op_unsupp;          /*  Unsupported FIP operation           */
+    u32    untagged;           /*  Untagged frames (ignored)           */
+    u32    invalid_version;    /*  Invalid FIP version                 */
+};
+
+struct bfa_fw_lps_stats_s {
+    u32    mac_invalids;       /*  Invalid mac assigned                */
+    u32    rsvd;
+};
+
+struct bfa_fw_fcoe_stats_s {
+    u32    cee_linkups;        /*  CEE link up count                   */
+    u32    cee_linkdns;        /*  CEE link down count                 */
+    u32    fip_linkups;        /*  FIP link up count                   */
+    u32    fip_linkdns;        /*  FIP link up count                   */
+    u32    fip_fails;          /*  FIP fail count                      */
+    u32    mac_invalids;       /*  Invalid mac assigned                */
+};
+
+/**
+ * IOC firmware FCoE port stats
+ */
+struct bfa_fw_fcoe_port_stats_s {
+    struct bfa_fw_fcoe_stats_s  fcoe_stats;
+    struct bfa_fw_fip_stats_s   fip_stats;
+};
+
+/**
+ * IOC firmware FC uport stats
+ */
+struct bfa_fw_fc_uport_stats_s {
+	struct bfa_fw_port_snsm_stats_s		snsm_stats;
+	struct bfa_fw_port_lksm_stats_s		lksm_stats;
+};
+
+/**
+ * IOC firmware FC port stats
+ */
+union bfa_fw_fc_port_stats_s {
+	struct bfa_fw_fc_uport_stats_s	fc_stats;
+	struct bfa_fw_fcoe_port_stats_s	fcoe_stats;
+};
+
+/**
+ * IOC firmware port stats
+ */
+struct bfa_fw_port_stats_s {
+	struct bfa_fw_port_fpg_stats_s		fpg_stats;
+	struct bfa_fw_port_physm_stats_s	physm_stats;
+	union  bfa_fw_fc_port_stats_s		fc_port;
+};
+
+/**
+ * fcxchg module statistics
+ */
+struct bfa_fw_fcxchg_stats_s {
+	u32	ua_tag_inv;
+	u32	ua_state_inv;
+};
+
+struct bfa_fw_lpsm_stats_s {
+	u32	cls_rx;
+	u32	cls_tx;
+};
+
+/**
+ *  Trunk statistics
+ */
+struct bfa_fw_trunk_stats_s {
+	u32 emt_recvd;		/*  Trunk EMT received		*/
+	u32 emt_accepted;		/*  Trunk EMT Accepted		*/
+	u32 emt_rejected;		/*  Trunk EMT rejected		*/
+	u32 etp_recvd;		/*  Trunk ETP received		*/
+	u32 etp_accepted;		/*  Trunk ETP Accepted		*/
+	u32 etp_rejected;		/*  Trunk ETP rejected		*/
+	u32 lr_recvd;		/*  Trunk LR received		*/
+	u32 rsvd;			/*  padding for 64 bit alignment */
+};
+
+struct bfa_fw_advsm_stats_s {
+	u32 flogi_sent;		/*  Flogi sent			*/
+	u32 flogi_acc_recvd;	/*  Flogi Acc received		*/
+	u32 flogi_rjt_recvd;	/*  Flogi rejects received	*/
+	u32 flogi_retries;		/*  Flogi retries		*/
+
+	u32 elp_recvd;		/*  ELP received		*/
+	u32 elp_accepted;		/*  ELP Accepted		*/
+	u32 elp_rejected;		/*  ELP rejected		*/
+	u32 elp_dropped;		/*  ELP dropped		*/
+};
+
+/**
+ * IOCFC firmware stats
+ */
+struct bfa_fw_iocfc_stats_s {
+	u32	cfg_reqs;	/*  cfg request */
+	u32	updq_reqs;	/*  update queue request */
+	u32	ic_reqs;	/*  interrupt coalesce reqs */
+	u32	unknown_reqs;
+	u32	set_intr_reqs;	/*  set interrupt reqs */
+};
+
+/**
+ * IOC attributes returned in queries
+ */
+struct bfa_iocfc_attr_s {
+	struct bfa_iocfc_cfg_s		config;		/*  IOCFC config   */
+	struct bfa_iocfc_intr_attr_s	intr_attr;	/*  interrupt attr */
+};
+
+/**
+ * Eth_sndrcv mod stats
+ */
+struct bfa_fw_eth_sndrcv_stats_s {
+	u32	crc_err;
+	u32	rsvd;		/*  64bit align    */
+};
+
+/**
+ * CT MAC mod stats
+ */
+struct bfa_fw_mac_mod_stats_s {
+	u32	mac_on;		/*  MAC got turned-on */
+	u32	link_up;	/*  link-up */
+	u32	signal_off;	/*  lost signal */
+	u32	dfe_on;		/*  DFE on */
+	u32	mac_reset;	/*  # of MAC reset to bring lnk up */
+	u32	pcs_reset;	/*  # of PCS reset to bring lnk up */
+	u32	loopback;	/*  MAC got into serdes loopback */
+	u32	lb_mac_reset;
+			/*  # of MAC reset to bring link up in loopback */
+	u32	lb_pcs_reset;
+			/*  # of PCS reset to bring link up in loopback */
+	u32	rsvd;		/*  64bit align    */
+};
+
+/**
+ * CT MOD stats
+ */
+struct bfa_fw_ct_mod_stats_s {
+	u32	rxa_rds_undrun;	/*  RxA RDS underrun */
+	u32	rad_bpc_ovfl;	/*  RAD BPC overflow */
+	u32	rad_rlb_bpc_ovfl; /*  RAD RLB BPC overflow */
+	u32	bpc_fcs_err;	/*  BPC FCS_ERR */
+	u32	txa_tso_hdr;	/*  TxA TSO header too long */
+	u32	rsvd;		/*  64bit align    */
+};
+
+/**
+ * IOC firmware stats
+ */
+struct bfa_fw_stats_s {
+	struct bfa_fw_ioc_stats_s	ioc_stats;
+	struct bfa_fw_iocfc_stats_s	iocfc_stats;
+	struct bfa_fw_io_stats_s	io_stats;
+	struct bfa_fw_port_stats_s	port_stats;
+	struct bfa_fw_fcxchg_stats_s	fcxchg_stats;
+	struct bfa_fw_lpsm_stats_s	lpsm_stats;
+	struct bfa_fw_lps_stats_s	lps_stats;
+	struct bfa_fw_trunk_stats_s	trunk_stats;
+	struct bfa_fw_advsm_stats_s	advsm_stats;
+	struct bfa_fw_mac_mod_stats_s	macmod_stats;
+	struct bfa_fw_ct_mod_stats_s	ctmod_stats;
+	struct bfa_fw_eth_sndrcv_stats_s	ethsndrcv_stats;
+};
+
+#define BFA_IOCFC_PATHTOV_MAX	60
+#define BFA_IOCFC_QDEPTH_MAX	2000
+
+/**
+ * QoS states
+ */
+enum bfa_qos_state {
+	BFA_QOS_ONLINE = 1,		/*  QoS is online */
+	BFA_QOS_OFFLINE = 2,		/*  QoS is offline */
+};
+
+/**
+ * QoS  Priority levels.
+ */
+enum bfa_qos_priority {
+	BFA_QOS_UNKNOWN = 0,
+	BFA_QOS_HIGH  = 1,	/*  QoS Priority Level High */
+	BFA_QOS_MED  =  2,	/*  QoS Priority Level Medium */
+	BFA_QOS_LOW  =  3,	/*  QoS Priority Level Low */
+};
+
+/**
+ * QoS  bandwidth allocation for each priority level
+ */
+enum bfa_qos_bw_alloc {
+	BFA_QOS_BW_HIGH  = 60,	/*  bandwidth allocation for High */
+	BFA_QOS_BW_MED  =  30,	/*  bandwidth allocation for Medium */
+	BFA_QOS_BW_LOW  =  10,	/*  bandwidth allocation for Low */
+};
+#pragma pack(1)
+/**
+ * QoS attribute returned in QoS Query
+ */
+struct bfa_qos_attr_s {
+	u8		state;		/*  QoS current state */
+	u8		rsvd[3];
+	u32  total_bb_cr;		/*  Total BB Credits */
+};
+
+/**
+ * These fields should be displayed only from the CLI.
+ * There will be a separate BFAL API (get_qos_vc_attr ?)
+ * to retrieve this.
+ *
+ */
+#define  BFA_QOS_MAX_VC  16
+
+struct bfa_qos_vc_info_s {
+	u8 vc_credit;
+	u8 borrow_credit;
+	u8 priority;
+	u8 resvd;
+};
+
+struct bfa_qos_vc_attr_s {
+	u16  total_vc_count;                    /*  Total VC Count */
+	u16  shared_credit;
+	u32  elp_opmode_flags;
+	struct bfa_qos_vc_info_s vc_info[BFA_QOS_MAX_VC];  /*   as many as
+							    * total_vc_count */
+};
+
+/**
+ * QoS statistics
+ */
+struct bfa_qos_stats_s {
+	u32	flogi_sent;		/*  QoS Flogi sent */
+	u32	flogi_acc_recvd;	/*  QoS Flogi Acc received */
+	u32	flogi_rjt_recvd; /*  QoS Flogi rejects received */
+	u32	flogi_retries;		/*  QoS Flogi retries */
+
+	u32	elp_recvd;		/*  QoS ELP received */
+	u32	elp_accepted;		/*  QoS ELP Accepted */
+	u32	elp_rejected;       /*  QoS ELP rejected */
+	u32	elp_dropped;        /*  QoS ELP dropped  */
+
+	u32	qos_rscn_recvd;     /*  QoS RSCN received */
+	u32	rsvd;		    /* padding for 64 bit alignment */
+};
+
+/**
+ * FCoE statistics
+ */
+struct bfa_fcoe_stats_s {
+	u64	secs_reset;	/*  Seconds since stats reset	     */
+	u64	cee_linkups;	/*  CEE link up		     */
+	u64	cee_linkdns;	/*  CEE link down		     */
+	u64	fip_linkups;	/*  FIP link up		     */
+	u64	fip_linkdns;	/*  FIP link down		     */
+	u64	fip_fails;	/*  FIP failures		     */
+	u64	mac_invalids;	/*  Invalid mac assignments	     */
+	u64	vlan_req;	/*  Vlan requests		     */
+	u64	vlan_notify;	/*  Vlan notifications		     */
+	u64	vlan_err;	/*  Vlan notification errors	     */
+	u64	vlan_timeouts;	/*  Vlan request timeouts	     */
+	u64	vlan_invalids;	/*  Vlan invalids		     */
+	u64	disc_req;	/*  Discovery requests		     */
+	u64	disc_rsp;	/*  Discovery responses	     */
+	u64	disc_err;	/*  Discovery error frames	     */
+	u64	disc_unsol;	/*  Discovery unsolicited	     */
+	u64	disc_timeouts;	/*  Discovery timeouts		     */
+	u64	disc_fcf_unavail; /*  Discovery FCF not avail	     */
+	u64	linksvc_unsupp;	/*  FIP link service req unsupp.    */
+	u64	linksvc_err;	/*  FIP link service req errors     */
+	u64	logo_req;	/*  FIP logos received		     */
+	u64	clrvlink_req;	/*  Clear virtual link requests     */
+	u64	op_unsupp;	/*  FIP operation unsupp.	     */
+	u64	untagged;	/*  FIP untagged frames	     */
+	u64	txf_ucast;	/*  Tx FCoE unicast frames	     */
+	u64	txf_ucast_vlan;	/*  Tx FCoE unicast vlan frames     */
+	u64	txf_ucast_octets; /*  Tx FCoE unicast octets	     */
+	u64	txf_mcast;	/*  Tx FCoE multicast frames	     */
+	u64	txf_mcast_vlan;	/*  Tx FCoE multicast vlan frames   */
+	u64	txf_mcast_octets; /*  Tx FCoE multicast octets	     */
+	u64	txf_bcast;	/*  Tx FCoE broadcast frames	     */
+	u64	txf_bcast_vlan;	/*  Tx FCoE broadcast vlan frames   */
+	u64	txf_bcast_octets; /*  Tx FCoE broadcast octets	     */
+	u64	txf_timeout;	/*  Tx timeouts		     */
+	u64	txf_parity_errors; /*  Transmit parity err	     */
+	u64	txf_fid_parity_errors; /*  Transmit FID parity err  */
+	u64	rxf_ucast_octets; /*  Rx FCoE unicast octets	     */
+	u64	rxf_ucast;	/*  Rx FCoE unicast frames	     */
+	u64	rxf_ucast_vlan;	/*  Rx FCoE unicast vlan frames     */
+	u64	rxf_mcast_octets; /*  Rx FCoE multicast octets	     */
+	u64	rxf_mcast;	/*  Rx FCoE multicast frames	     */
+	u64	rxf_mcast_vlan;	/*  Rx FCoE multicast vlan frames   */
+	u64	rxf_bcast_octets; /*  Rx FCoE broadcast octets	     */
+	u64	rxf_bcast;	/*  Rx FCoE broadcast frames	     */
+	u64	rxf_bcast_vlan;	/*  Rx FCoE broadcast vlan frames   */
+};
+
+/**
+ * QoS or FCoE stats (fcport stats excluding physical FC port stats)
+ */
+union bfa_fcport_stats_u {
+	struct bfa_qos_stats_s	fcqos;
+	struct bfa_fcoe_stats_s	fcoe;
+};
+#pragma pack()
+
+struct bfa_fcpim_del_itn_stats_s {
+	u32	del_itn_iocomp_aborted;	   /* Aborted IO requests	      */
+	u32	del_itn_iocomp_timedout;   /* IO timeouts		      */
+	u32	del_itn_iocom_sqer_needed; /* IO retry for SQ error recovery  */
+	u32	del_itn_iocom_res_free;    /* Delayed freeing of IO resources */
+	u32	del_itn_iocom_hostabrts;   /* Host IO abort requests	      */
+	u32	del_itn_total_ios;	   /* Total IO count		      */
+	u32	del_io_iocdowns;	   /* IO cleaned-up due to IOC down   */
+	u32	del_tm_iocdowns;	   /* TM cleaned-up due to IOC down   */
+};
+
+struct bfa_itnim_iostats_s {
+
+	u32	total_ios;		/*  Total IO Requests		*/
+	u32	input_reqs;		/*  Data in-bound requests	*/
+	u32	output_reqs;		/*  Data out-bound requests	*/
+	u32	io_comps;		/*  Total IO Completions	*/
+	u32	wr_throughput;		/*  Write data transfered in bytes */
+	u32	rd_throughput;		/*  Read data transfered in bytes  */
+
+	u32	iocomp_ok;		/*  Slowpath IO completions	*/
+	u32	iocomp_underrun;	/*  IO underrun		*/
+	u32	iocomp_overrun;		/*  IO overrun			*/
+	u32	qwait;			/*  IO Request-Q wait		*/
+	u32	qresumes;		/*  IO Request-Q wait done	*/
+	u32	no_iotags;		/*  No free IO tag		*/
+	u32	iocomp_timedout;	/*  IO timeouts		*/
+	u32	iocom_nexus_abort;	/*  IO failure due to target offline */
+	u32	iocom_proto_err;	/*  IO protocol errors		*/
+	u32	iocom_dif_err;		/*  IO SBC-3 protection errors	*/
+
+	u32	iocom_sqer_needed;	/*  fcp-2 error recovery failed	*/
+	u32	iocom_res_free;		/*  Delayed freeing of IO tag	*/
+
+
+	u32	io_aborts;		/*  Host IO abort requests	*/
+	u32	iocom_hostabrts;	/*  Host IO abort completions	*/
+	u32	io_cleanups;		/*  IO clean-up requests	*/
+	u32	path_tov_expired;	/*  IO path tov expired	*/
+	u32	iocomp_aborted;		/*  IO abort completions	*/
+	u32	io_iocdowns;		/*  IO cleaned-up due to IOC down */
+	u32	iocom_utags;		/*  IO comp with unknown tags	*/
+
+	u32	io_tmaborts;		/*  Abort request due to TM command */
+	u32	tm_io_comps;		/* Abort completion due to TM command */
+
+	u32	creates;		/*  IT Nexus create requests	*/
+	u32	fw_create;		/*  IT Nexus FW create requests	*/
+	u32	create_comps;		/*  IT Nexus FW create completions */
+	u32	onlines;		/*  IT Nexus onlines		*/
+	u32	offlines;		/*  IT Nexus offlines		*/
+	u32	fw_delete;		/*  IT Nexus FW delete requests	*/
+	u32	delete_comps;		/*  IT Nexus FW delete completions */
+	u32	deletes;		/*  IT Nexus delete requests	   */
+	u32	sler_events;		/*  SLER events		*/
+	u32	ioc_disabled;		/*  Num IOC disables		*/
+	u32	cleanup_comps;		/*  IT Nexus cleanup completions    */
+
+	u32	tm_cmnds;		/*  TM Requests		*/
+	u32	tm_fw_rsps;		/*  TM Completions		*/
+	u32	tm_success;		/*  TM initiated IO cleanup success */
+	u32	tm_failures;		/*  TM initiated IO cleanup failure */
+	u32	no_tskims;		/*  No free TM tag		*/
+	u32	tm_qwait;		/*  TM Request-Q wait		*/
+	u32	tm_qresumes;		/*  TM Request-Q wait done	*/
+
+	u32	tm_iocdowns;		/*  TM cleaned-up due to IOC down   */
+	u32	tm_cleanups;		/*  TM cleanup requests	*/
+	u32	tm_cleanup_comps;	/*  TM cleanup completions	*/
+};
+
+/* Modify char* port_stt[] in bfal_port.c if a new state was added */
+enum bfa_port_states {
+	BFA_PORT_ST_UNINIT		= 1,
+	BFA_PORT_ST_ENABLING_QWAIT	= 2,
+	BFA_PORT_ST_ENABLING		= 3,
+	BFA_PORT_ST_LINKDOWN		= 4,
+	BFA_PORT_ST_LINKUP		= 5,
+	BFA_PORT_ST_DISABLING_QWAIT	= 6,
+	BFA_PORT_ST_DISABLING		= 7,
+	BFA_PORT_ST_DISABLED		= 8,
+	BFA_PORT_ST_STOPPED		= 9,
+	BFA_PORT_ST_IOCDOWN		= 10,
+	BFA_PORT_ST_IOCDIS		= 11,
+	BFA_PORT_ST_FWMISMATCH		= 12,
+	BFA_PORT_ST_PREBOOT_DISABLED	= 13,
+	BFA_PORT_ST_TOGGLING_QWAIT	= 14,
+	BFA_PORT_ST_MAX_STATE,
+};
+
+/**
+ *	Port operational type (in sync with SNIA port type).
+ */
+enum bfa_port_type {
+	BFA_PORT_TYPE_UNKNOWN	= 1,	/*  port type is unknown */
+	BFA_PORT_TYPE_NPORT	= 5,	/*  P2P with switched fabric */
+	BFA_PORT_TYPE_NLPORT	= 6,	/*  public loop */
+	BFA_PORT_TYPE_LPORT	= 20,	/*  private loop */
+	BFA_PORT_TYPE_P2P	= 21,	/*  P2P with no switched fabric */
+	BFA_PORT_TYPE_VPORT	= 22,	/*  NPIV - virtual port */
+};
+
+/**
+ *	Port topology setting. A port's topology and fabric login status
+ *	determine its operational type.
+ */
+enum bfa_port_topology {
+	BFA_PORT_TOPOLOGY_NONE = 0,	/*  No valid topology */
+	BFA_PORT_TOPOLOGY_P2P  = 1,	/*  P2P only */
+	BFA_PORT_TOPOLOGY_LOOP = 2,	/*  LOOP topology */
+	BFA_PORT_TOPOLOGY_AUTO = 3,	/*  auto topology selection */
+};
+
+/**
+ *	Physical port loopback types.
+ */
+enum bfa_port_opmode {
+	BFA_PORT_OPMODE_NORMAL   = 0x00, /*  normal non-loopback mode */
+	BFA_PORT_OPMODE_LB_INT   = 0x01, /*  internal loop back */
+	BFA_PORT_OPMODE_LB_SLW   = 0x02, /*  serial link wrapback (serdes) */
+	BFA_PORT_OPMODE_LB_EXT   = 0x04, /*  external loop back (serdes) */
+	BFA_PORT_OPMODE_LB_CBL   = 0x08, /*  cabled loop back */
+	BFA_PORT_OPMODE_LB_NLINT = 0x20, /*  NL_Port internal loopback */
+};
+
+#define BFA_PORT_OPMODE_LB_HARD(_mode)			\
+	((_mode == BFA_PORT_OPMODE_LB_INT) ||		\
+	(_mode == BFA_PORT_OPMODE_LB_SLW) ||		\
+	(_mode == BFA_PORT_OPMODE_LB_EXT))
+
+/**
+ *	Port link state
+ */
+enum bfa_port_linkstate {
+	BFA_PORT_LINKUP		= 1,	/*  Physical port/Trunk link up */
+	BFA_PORT_LINKDOWN	= 2,	/*  Physical port/Trunk link down */
+};
+
+/**
+ *	Port link state reason code
+ */
+enum bfa_port_linkstate_rsn {
+	BFA_PORT_LINKSTATE_RSN_NONE		= 0,
+	BFA_PORT_LINKSTATE_RSN_DISABLED		= 1,
+	BFA_PORT_LINKSTATE_RSN_RX_NOS		= 2,
+	BFA_PORT_LINKSTATE_RSN_RX_OLS		= 3,
+	BFA_PORT_LINKSTATE_RSN_RX_LIP		= 4,
+	BFA_PORT_LINKSTATE_RSN_RX_LIPF7		= 5,
+	BFA_PORT_LINKSTATE_RSN_SFP_REMOVED	= 6,
+	BFA_PORT_LINKSTATE_RSN_PORT_FAULT	= 7,
+	BFA_PORT_LINKSTATE_RSN_RX_LOS		= 8,
+	BFA_PORT_LINKSTATE_RSN_LOCAL_FAULT	= 9,
+	BFA_PORT_LINKSTATE_RSN_REMOTE_FAULT	= 10,
+	BFA_PORT_LINKSTATE_RSN_TIMEOUT		= 11,
+
+
+
+	/* CEE related reason codes/errors */
+	CEE_LLDP_INFO_AGED_OUT			= 20,
+	CEE_LLDP_SHUTDOWN_TLV_RCVD		= 21,
+	CEE_PEER_NOT_ADVERTISE_DCBX		= 22,
+	CEE_PEER_NOT_ADVERTISE_PG		= 23,
+	CEE_PEER_NOT_ADVERTISE_PFC		= 24,
+	CEE_PEER_NOT_ADVERTISE_FCOE		= 25,
+	CEE_PG_NOT_COMPATIBLE			= 26,
+	CEE_PFC_NOT_COMPATIBLE			= 27,
+	CEE_FCOE_NOT_COMPATIBLE			= 28,
+	CEE_BAD_PG_RCVD				= 29,
+	CEE_BAD_BW_RCVD				= 30,
+	CEE_BAD_PFC_RCVD			= 31,
+	CEE_BAD_APP_PRI_RCVD			= 32,
+	CEE_FCOE_PRI_PFC_OFF			= 33,
+	CEE_DUP_CONTROL_TLV_RCVD		= 34,
+	CEE_DUP_FEAT_TLV_RCVD			= 35,
+	CEE_APPLY_NEW_CFG			= 36, /* reason, not error */
+	CEE_PROTOCOL_INIT			= 37, /* reason, not error */
+	CEE_PHY_LINK_DOWN			= 38,
+	CEE_LLS_FCOE_ABSENT			= 39,
+	CEE_LLS_FCOE_DOWN			= 40,
+	CEE_ISCSI_NOT_COMPATIBLE		= 41,
+	CEE_ISCSI_PRI_PFC_OFF			= 42,
+	CEE_ISCSI_PRI_OVERLAP_FCOE_PRI		= 43
+};
+#pragma pack(1)
+/**
+ *      Physical port configuration
+ */
+struct bfa_port_cfg_s {
+	u8	 topology;	/*  bfa_port_topology		*/
+	u8	 speed;		/*  enum bfa_port_speed	*/
+	u8	 trunked;	/*  trunked or not		*/
+	u8	 qos_enabled;	/*  qos enabled or not		*/
+	u8	 cfg_hardalpa;	/*  is hard alpa configured	*/
+	u8	 hardalpa;	/*  configured hard alpa	*/
+	u16 maxfrsize;	/*  maximum frame size		*/
+	u8	 rx_bbcredit;	/*  receive buffer credits	*/
+	u8	 tx_bbcredit;	/*  transmit buffer credits	*/
+	u8	 ratelimit;	/*  ratelimit enabled or not	*/
+	u8	 trl_def_speed;	/*  ratelimit default speed	*/
+	u16 path_tov;	/*  device path timeout	*/
+	u16 q_depth;	/*  SCSI Queue depth		*/
+};
+#pragma pack()
+
+/**
+ *	Port attribute values.
+ */
+struct bfa_port_attr_s {
+	/*
+	 * Static fields
+	 */
+	wwn_t	   nwwn;		/*  node wwn */
+	wwn_t	   pwwn;		/*  port wwn */
+	wwn_t	   factorynwwn;	/*  factory node wwn */
+	wwn_t	   factorypwwn;	/*  factory port wwn */
+	enum fc_cos	cos_supported;	/*  supported class of services */
+	u32	rsvd;
+	struct fc_symname_s	port_symname;	/*  port symbolic name */
+	enum bfa_port_speed speed_supported; /*  supported speeds */
+	bfa_boolean_t   pbind_enabled;
+
+	/*
+	 * Configured values
+	 */
+	struct bfa_port_cfg_s pport_cfg;	/*  pport cfg */
+
+	/*
+	 * Dynamic field - info from BFA
+	 */
+	enum bfa_port_states	port_state;	/*  current port state */
+	enum bfa_port_speed	speed;		/*  current speed */
+	enum bfa_port_topology	topology;	/*  current topology */
+	bfa_boolean_t		beacon;		/*  current beacon status */
+	bfa_boolean_t		link_e2e_beacon; /*  link beacon is on */
+	bfa_boolean_t		plog_enabled;	/*  portlog is enabled */
+
+	/*
+	 * Dynamic field - info from FCS
+	 */
+	u32		pid;		/*  port ID */
+	enum bfa_port_type	port_type;	/*  current topology */
+	u32		loopback;	/*  external loopback */
+	u32		authfail;	/*  auth fail state */
+	bfa_boolean_t		io_profile;	/*  get it from fcpim mod */
+	u8			pad[4];		/*  for 64-bit alignement */
+
+	/* FCoE specific  */
+	u16		fcoe_vlan;
+	u8			rsvd1[6];
+};
+
+/**
+ *	      Port FCP mappings.
+ */
+struct bfa_port_fcpmap_s {
+	char		osdevname[256];
+	u32	bus;
+	u32	target;
+	u32	oslun;
+	u32	fcid;
+	wwn_t	   nwwn;
+	wwn_t	   pwwn;
+	u64	fcplun;
+	char		luid[256];
+};
+
+/**
+ *	      Port RNID info.
+ */
+struct bfa_port_rnid_s {
+	wwn_t	     wwn;
+	u32	  unittype;
+	u32	  portid;
+	u32	  attached_nodes_num;
+	u16	  ip_version;
+	u16	  udp_port;
+	u8	   ipaddr[16];
+	u16	  rsvd;
+	u16	  topologydiscoveryflags;
+};
+
+#pragma pack(1)
+struct bfa_fcport_fcf_s {
+	wwn_t	   name;	   /*  FCF name		 */
+	wwn_t	   fabric_name;    /*  Fabric Name	      */
+	u8		fipenabled;	/*  FIP enabled or not */
+	u8		fipfailed;	/*  FIP failed or not	*/
+	u8		resv[2];
+	u8	 pri;	    /*  FCF priority	     */
+	u8	 version;	/*  FIP version used	 */
+	u8	 available;      /*  Available  for  login    */
+	u8	 fka_disabled;   /*  FKA is disabled	  */
+	u8	 maxsz_verified; /*  FCoE max size verified   */
+	u8	 fc_map[3];      /*  FC map		   */
+	u16	vlan;	   /*  FCoE vlan tag/priority   */
+	u32	fka_adv_per;    /*  FIP  ka advert. period   */
+	mac_t	   mac;	    /*  FCF mac		  */
+};
+
+/**
+ *	Trunk states for BCU/BFAL
+ */
+enum bfa_trunk_state {
+	BFA_TRUNK_DISABLED	= 0,	/*  Trunk is not configured	*/
+	BFA_TRUNK_ONLINE	= 1,	/*  Trunk is online		*/
+	BFA_TRUNK_OFFLINE	= 2,	/*  Trunk is offline		*/
+};
+
+/**
+ *	VC attributes for trunked link
+ */
+struct bfa_trunk_vc_attr_s {
+	u32 bb_credit;
+	u32 elp_opmode_flags;
+	u32 req_credit;
+	u16 vc_credits[8];
+};
+
+/**
+ *	Link state information
+ */
+struct bfa_port_link_s {
+	u8	 linkstate;	/*  Link state bfa_port_linkstate */
+	u8	 linkstate_rsn;	/*  bfa_port_linkstate_rsn_t */
+	u8	 topology;	/*  P2P/LOOP bfa_port_topology */
+	u8	 speed;		/*  Link speed (1/2/4/8 G) */
+	u32	linkstate_opt;  /*  Linkstate optional data (debug) */
+	u8	 trunked;	/*  Trunked or not (1 or 0) */
+	u8	 resvd[3];
+	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
+	union {
+		struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
+		struct bfa_trunk_vc_attr_s trunk_vc_attr;
+		struct bfa_fcport_fcf_s fcf; /*  FCF information (for FCoE) */
+	} vc_fcf;
+};
+#pragma pack()
+
+enum bfa_trunk_link_fctl {
+	BFA_TRUNK_LINK_FCTL_NORMAL,
+	BFA_TRUNK_LINK_FCTL_VC,
+	BFA_TRUNK_LINK_FCTL_VC_QOS,
+};
+
+enum bfa_trunk_link_state {
+	BFA_TRUNK_LINK_STATE_UP = 1,		/* link part of trunk */
+	BFA_TRUNK_LINK_STATE_DN_LINKDN = 2,	/* physical link down */
+	BFA_TRUNK_LINK_STATE_DN_GRP_MIS = 3,	/* trunk group different */
+	BFA_TRUNK_LINK_STATE_DN_SPD_MIS = 4,	/* speed mismatch */
+	BFA_TRUNK_LINK_STATE_DN_MODE_MIS = 5,	/* remote port not trunked */
+};
+
+#define BFA_TRUNK_MAX_PORTS	2
+struct bfa_trunk_link_attr_s {
+	wwn_t    trunk_wwn;
+	enum bfa_trunk_link_fctl fctl;
+	enum bfa_trunk_link_state link_state;
+	enum bfa_port_speed	speed;
+	u32 deskew;
+};
+
+struct bfa_trunk_attr_s {
+	enum bfa_trunk_state	state;
+	enum bfa_port_speed	speed;
+	u32		port_id;
+	u32		rsvd;
+	struct bfa_trunk_link_attr_s link_attr[BFA_TRUNK_MAX_PORTS];
+};
+
+struct bfa_rport_hal_stats_s {
+	u32        sm_un_cr;	    /*  uninit: create events      */
+	u32        sm_un_unexp;	    /*  uninit: exception events   */
+	u32        sm_cr_on;	    /*  created: online events     */
+	u32        sm_cr_del;	    /*  created: delete events     */
+	u32        sm_cr_hwf;	    /*  created: IOC down          */
+	u32        sm_cr_unexp;	    /*  created: exception events  */
+	u32        sm_fwc_rsp;	    /*  fw create: f/w responses   */
+	u32        sm_fwc_del;	    /*  fw create: delete events   */
+	u32        sm_fwc_off;	    /*  fw create: offline events  */
+	u32        sm_fwc_hwf;	    /*  fw create: IOC down        */
+	u32        sm_fwc_unexp;	    /*  fw create: exception events*/
+	u32        sm_on_off;	    /*  online: offline events     */
+	u32        sm_on_del;	    /*  online: delete events      */
+	u32        sm_on_hwf;	    /*  online: IOC down events    */
+	u32        sm_on_unexp;	    /*  online: exception events   */
+	u32        sm_fwd_rsp;	    /*  fw delete: fw responses    */
+	u32        sm_fwd_del;	    /*  fw delete: delete events   */
+	u32        sm_fwd_hwf;	    /*  fw delete: IOC down events */
+	u32        sm_fwd_unexp;	    /*  fw delete: exception events*/
+	u32        sm_off_del;	    /*  offline: delete events     */
+	u32        sm_off_on;	    /*  offline: online events     */
+	u32        sm_off_hwf;	    /*  offline: IOC down events   */
+	u32        sm_off_unexp;	    /*  offline: exception events  */
+	u32        sm_del_fwrsp;	    /*  delete: fw responses       */
+	u32        sm_del_hwf;	    /*  delete: IOC down events    */
+	u32        sm_del_unexp;	    /*  delete: exception events   */
+	u32        sm_delp_fwrsp;	    /*  delete pend: fw responses  */
+	u32        sm_delp_hwf;	    /*  delete pend: IOC downs     */
+	u32        sm_delp_unexp;	    /*  delete pend: exceptions    */
+	u32        sm_offp_fwrsp;	    /*  off-pending: fw responses  */
+	u32        sm_offp_del;	    /*  off-pending: deletes       */
+	u32        sm_offp_hwf;	    /*  off-pending: IOC downs     */
+	u32        sm_offp_unexp;	    /*  off-pending: exceptions    */
+	u32        sm_iocd_off;	    /*  IOC down: offline events   */
+	u32        sm_iocd_del;	    /*  IOC down: delete events    */
+	u32        sm_iocd_on;	    /*  IOC down: online events    */
+	u32        sm_iocd_unexp;	    /*  IOC down: exceptions       */
+	u32        rsvd;
+};
+#pragma pack(1)
+/**
+ *  Rport's QoS attributes
+ */
+struct bfa_rport_qos_attr_s {
+	u8			qos_priority;  /*  rport's QoS priority   */
+	u8			rsvd[3];
+	u32	       qos_flow_id;	  /*  QoS flow Id	 */
+};
+#pragma pack()
+
+#define BFA_IOBUCKET_MAX 14
+
+struct bfa_itnim_latency_s {
+	u32 min[BFA_IOBUCKET_MAX];
+	u32 max[BFA_IOBUCKET_MAX];
+	u32 count[BFA_IOBUCKET_MAX];
+	u32 avg[BFA_IOBUCKET_MAX];
+};
+
+struct bfa_itnim_ioprofile_s {
+	u32 clock_res_mul;
+	u32 clock_res_div;
+	u32 index;
+	u32 io_profile_start_time;	/*  IO profile start time	*/
+	u32 iocomps[BFA_IOBUCKET_MAX];	/*  IO completed	*/
+	struct bfa_itnim_latency_s io_latency;
+};
+
+/**
+ * FC physical port statistics.
+ */
+struct bfa_port_fc_stats_s {
+	u64     secs_reset;     /*  Seconds since stats is reset */
+	u64     tx_frames;      /*  Tx frames                   */
+	u64     tx_words;       /*  Tx words                    */
+	u64     tx_lip;         /*  Tx LIP                      */
+	u64     tx_nos;         /*  Tx NOS                      */
+	u64     tx_ols;         /*  Tx OLS                      */
+	u64     tx_lr;          /*  Tx LR                       */
+	u64     tx_lrr;         /*  Tx LRR                      */
+	u64     rx_frames;      /*  Rx frames                   */
+	u64     rx_words;       /*  Rx words                    */
+	u64     lip_count;      /*  Rx LIP                      */
+	u64     nos_count;      /*  Rx NOS                      */
+	u64     ols_count;      /*  Rx OLS                      */
+	u64     lr_count;       /*  Rx LR                       */
+	u64     lrr_count;      /*  Rx LRR                      */
+	u64     invalid_crcs;   /*  Rx CRC err frames           */
+	u64     invalid_crc_gd_eof; /*  Rx CRC err good EOF frames */
+	u64     undersized_frm; /*  Rx undersized frames        */
+	u64     oversized_frm;  /*  Rx oversized frames */
+	u64     bad_eof_frm;    /*  Rx frames with bad EOF      */
+	u64     error_frames;   /*  Errored frames              */
+	u64     dropped_frames; /*  Dropped frames              */
+	u64     link_failures;  /*  Link Failure (LF) count     */
+	u64     loss_of_syncs;  /*  Loss of sync count          */
+	u64     loss_of_signals; /*  Loss of signal count       */
+	u64     primseq_errs;   /*  Primitive sequence protocol err. */
+	u64     bad_os_count;   /*  Invalid ordered sets        */
+	u64     err_enc_out;    /*  Encoding err nonframe_8b10b */
+	u64     err_enc;        /*  Encoding err frame_8b10b    */
+};
+
+/**
+ * Eth Physical Port statistics.
+ */
+struct bfa_port_eth_stats_s {
+	u64     secs_reset;     /*  Seconds since stats is reset */
+	u64     frame_64;       /*  Frames 64 bytes             */
+	u64     frame_65_127;   /*  Frames 65-127 bytes */
+	u64     frame_128_255;  /*  Frames 128-255 bytes        */
+	u64     frame_256_511;  /*  Frames 256-511 bytes        */
+	u64     frame_512_1023; /*  Frames 512-1023 bytes       */
+	u64     frame_1024_1518; /*  Frames 1024-1518 bytes     */
+	u64     frame_1519_1522; /*  Frames 1519-1522 bytes     */
+	u64     tx_bytes;       /*  Tx bytes                    */
+	u64     tx_packets;      /*  Tx packets         */
+	u64     tx_mcast_packets; /*  Tx multicast packets      */
+	u64     tx_bcast_packets; /*  Tx broadcast packets      */
+	u64     tx_control_frame; /*  Tx control frame          */
+	u64     tx_drop;        /*  Tx drops                    */
+	u64     tx_jabber;      /*  Tx jabber                   */
+	u64     tx_fcs_error;   /*  Tx FCS errors               */
+	u64     tx_fragments;   /*  Tx fragments                */
+	u64     rx_bytes;       /*  Rx bytes                    */
+	u64     rx_packets;     /*  Rx packets                  */
+	u64     rx_mcast_packets; /*  Rx multicast packets      */
+	u64     rx_bcast_packets; /*  Rx broadcast packets      */
+	u64     rx_control_frames; /*  Rx control frames        */
+	u64     rx_unknown_opcode; /*  Rx unknown opcode        */
+	u64     rx_drop;        /*  Rx drops                    */
+	u64     rx_jabber;      /*  Rx jabber                   */
+	u64     rx_fcs_error;   /*  Rx FCS errors               */
+	u64     rx_alignment_error; /*  Rx alignment errors     */
+	u64     rx_frame_length_error; /*  Rx frame len errors  */
+	u64     rx_code_error;  /*  Rx code errors              */
+	u64     rx_fragments;   /*  Rx fragments                */
+	u64     rx_pause;       /*  Rx pause                    */
+	u64     rx_zero_pause;  /*  Rx zero pause               */
+	u64     tx_pause;       /*  Tx pause                    */
+	u64     tx_zero_pause;  /*  Tx zero pause               */
+	u64     rx_fcoe_pause;  /*  Rx FCoE pause               */
+	u64     rx_fcoe_zero_pause; /*  Rx FCoE zero pause      */
+	u64     tx_fcoe_pause;  /*  Tx FCoE pause               */
+	u64     tx_fcoe_zero_pause; /*  Tx FCoE zero pause      */
+	u64     rx_iscsi_pause; /*  Rx iSCSI pause              */
+	u64     rx_iscsi_zero_pause; /*  Rx iSCSI zero pause    */
+	u64     tx_iscsi_pause; /*  Tx iSCSI pause              */
+	u64     tx_iscsi_zero_pause; /*  Tx iSCSI zero pause    */
+};
+
+/**
+ *              Port statistics.
+ */
+union bfa_port_stats_u {
+	struct bfa_port_fc_stats_s      fc;
+	struct bfa_port_eth_stats_s     eth;
+};
+
+#endif /* __BFA_DEFS_SVC_H__ */

+ 29 - 12
drivers/scsi/bfa/bfa_module.c → drivers/scsi/bfa/bfa_drv.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -14,10 +14,8 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  * General Public License for more details.
  */
  */
-#include <bfa.h>
-#include <defs/bfa_defs_pci.h>
-#include <cs/bfa_debug.h>
-#include <bfa_iocfc.h>
+
+#include "bfa_modules.h"
 
 
 /**
 /**
  * BFA module list terminated by NULL
  * BFA module list terminated by NULL
@@ -30,9 +28,6 @@ struct bfa_module_s *hal_mods[] = {
 	&hal_mod_uf,
 	&hal_mod_uf,
 	&hal_mod_rport,
 	&hal_mod_rport,
 	&hal_mod_fcpim,
 	&hal_mod_fcpim,
-#ifdef BFA_CFG_PBIND
-	&hal_mod_pbind,
-#endif
 	NULL
 	NULL
 };
 };
 
 
@@ -74,17 +69,39 @@ bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
 	bfa_isr_unhandled,	/* --------- */
 	bfa_isr_unhandled,	/* --------- */
 };
 };
 
 
+
 /**
 /**
  * Message handlers for mailbox command classes
  * Message handlers for mailbox command classes
  */
  */
 bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
 bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
 	NULL,
 	NULL,
-	NULL,			/* BFI_MC_IOC	*/
-	NULL,			/* BFI_MC_DIAG	*/
+	NULL,			/* BFI_MC_IOC   */
+	NULL,			/* BFI_MC_DIAG  */
 	NULL,		/* BFI_MC_FLASH */
 	NULL,		/* BFI_MC_FLASH */
-	NULL,			/* BFI_MC_CEE	*/
-	NULL,			/* BFI_MC_PORT	*/
+	NULL,			/* BFI_MC_CEE   */
+	NULL,			/* BFI_MC_PORT  */
 	bfa_iocfc_isr,		/* BFI_MC_IOCFC */
 	bfa_iocfc_isr,		/* BFI_MC_IOCFC */
 	NULL,
 	NULL,
 };
 };
 
 
+
+
+void
+bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+{
+	struct bfa_port_s	*port = &bfa->modules.port;
+	u32		dm_len;
+	u8			*dm_kva;
+	u64		dm_pa;
+
+	dm_len = bfa_port_meminfo();
+	dm_kva = bfa_meminfo_dma_virt(mi);
+	dm_pa  = bfa_meminfo_dma_phys(mi);
+
+	memset(port, 0, sizeof(struct bfa_port_s));
+	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
+	bfa_port_mem_claim(port, dm_kva, dm_pa);
+
+	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
+	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+}

+ 908 - 103
drivers/scsi/bfa/include/protocol/fc.h → drivers/scsi/bfa/bfa_fc.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,13 +15,50 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#ifndef __FC_H__
-#define __FC_H__
+#ifndef __BFA_FC_H__
+#define __BFA_FC_H__
 
 
-#include <protocol/types.h>
+#include "bfa_os_inc.h"
+
+typedef u64 wwn_t;
+typedef u64 lun_t;
+
+#define WWN_NULL	(0)
+#define FC_SYMNAME_MAX	256	/*  max name server symbolic name size */
+#define FC_ALPA_MAX	128
 
 
 #pragma pack(1)
 #pragma pack(1)
 
 
+#define MAC_ADDRLEN	(6)
+struct mac_s { u8 mac[MAC_ADDRLEN]; };
+#define mac_t struct mac_s
+
+/*
+ * generic SCSI cdb definition
+ */
+#define SCSI_MAX_CDBLEN     16
+struct scsi_cdb_s {
+	u8         scsi_cdb[SCSI_MAX_CDBLEN];
+};
+#define scsi_cdb_t struct scsi_cdb_s
+
+/* ------------------------------------------------------------
+ * SCSI status byte values
+ * ------------------------------------------------------------
+ */
+#define SCSI_STATUS_GOOD                   0x00
+#define SCSI_STATUS_CHECK_CONDITION        0x02
+#define SCSI_STATUS_CONDITION_MET          0x04
+#define SCSI_STATUS_BUSY                   0x08
+#define SCSI_STATUS_INTERMEDIATE           0x10
+#define SCSI_STATUS_ICM                    0x14 /* intermediate condition met */
+#define SCSI_STATUS_RESERVATION_CONFLICT   0x18
+#define SCSI_STATUS_COMMAND_TERMINATED     0x22
+#define SCSI_STATUS_QUEUE_FULL             0x28
+#define SCSI_STATUS_ACA_ACTIVE             0x30
+
+#define SCSI_MAX_ALLOC_LEN      0xFF    /* maximum allocarion length */
+
 /*
 /*
  * Fibre Channel Header Structure (FCHS) definition
  * Fibre Channel Header Structure (FCHS) definition
  */
  */
@@ -51,9 +88,9 @@ struct fchs_s {
 	u32        ro;		/* relative offset */
 	u32        ro;		/* relative offset */
 };
 };
 
 
-#define FC_SOF_LEN      4
-#define FC_EOF_LEN      4
-#define FC_CRC_LEN      4
+#define FC_SOF_LEN		4
+#define FC_EOF_LEN		4
+#define FC_CRC_LEN		4
 
 
 /*
 /*
  * Fibre Channel BB_E Header Structure
  * Fibre Channel BB_E Header Structure
@@ -140,10 +177,12 @@ enum {
 	FC_TYPE_FC_FSS		= 0x22,	/* Fabric Switch Services */
 	FC_TYPE_FC_FSS		= 0x22,	/* Fabric Switch Services */
 	FC_TYPE_FC_AL		= 0x23,	/* FC-AL */
 	FC_TYPE_FC_AL		= 0x23,	/* FC-AL */
 	FC_TYPE_FC_SNMP		= 0x24,	/* FC-SNMP */
 	FC_TYPE_FC_SNMP		= 0x24,	/* FC-SNMP */
+	FC_TYPE_FC_SPINFAB	= 0xEE,	/* SPINFAB */
+	FC_TYPE_FC_DIAG		= 0xEF,	/* DIAG */
 	FC_TYPE_MAX		= 256,	/* 256 FC-4 types */
 	FC_TYPE_MAX		= 256,	/* 256 FC-4 types */
 };
 };
 
 
-struct fc_fc4types_s{
+struct fc_fc4types_s {
 	u8         bits[FC_TYPE_MAX / 8];
 	u8         bits[FC_TYPE_MAX / 8];
 };
 };
 
 
@@ -168,7 +207,7 @@ enum {
  */
  */
 enum {
 enum {
 	FC_MIN_WELL_KNOWN_ADDR		= 0xFFFFF0,
 	FC_MIN_WELL_KNOWN_ADDR		= 0xFFFFF0,
-	FC_DOMAIN_CONTROLLER_MASK 	= 0xFFFC00,
+	FC_DOMAIN_CONTROLLER_MASK	= 0xFFFC00,
 	FC_ALIAS_SERVER			= 0xFFFFF8,
 	FC_ALIAS_SERVER			= 0xFFFFF8,
 	FC_MGMT_SERVER			= 0xFFFFFA,
 	FC_MGMT_SERVER			= 0xFFFFFA,
 	FC_TIME_SERVER			= 0xFFFFFB,
 	FC_TIME_SERVER			= 0xFFFFFB,
@@ -201,7 +240,7 @@ enum {
 /*
 /*
  * generic ELS command
  * generic ELS command
  */
  */
-struct fc_els_cmd_s{
+struct fc_els_cmd_s {
 	u32        els_code:8;	/* ELS Command Code */
 	u32        els_code:8;	/* ELS Command Code */
 	u32        reserved:24;
 	u32        reserved:24;
 };
 };
@@ -233,6 +272,8 @@ enum {
 	FC_ELS_PDISC = 0x50,	/* Discover N_Port Parameters. */
 	FC_ELS_PDISC = 0x50,	/* Discover N_Port Parameters. */
 	FC_ELS_FDISC = 0x51,	/* Discover F_Port Parameters. */
 	FC_ELS_FDISC = 0x51,	/* Discover F_Port Parameters. */
 	FC_ELS_ADISC = 0x52,	/* Discover Address. */
 	FC_ELS_ADISC = 0x52,	/* Discover Address. */
+	FC_ELS_FARP_REQ = 0x54,	/* FARP Request. */
+	FC_ELS_FARP_REP = 0x55,	/* FARP Reply. */
 	FC_ELS_FAN = 0x60,	/* Fabric Address Notification */
 	FC_ELS_FAN = 0x60,	/* Fabric Address Notification */
 	FC_ELS_RSCN = 0x61,	/* Reg State Change Notification */
 	FC_ELS_RSCN = 0x61,	/* Reg State Change Notification */
 	FC_ELS_SCR = 0x62,	/* State Change Registration. */
 	FC_ELS_SCR = 0x62,	/* State Change Registration. */
@@ -272,7 +313,7 @@ enum {
  * N_Port PLOGI Common Service Parameters.
  * N_Port PLOGI Common Service Parameters.
  * FC-PH-x. Figure-76. pg. 308.
  * FC-PH-x. Figure-76. pg. 308.
  */
  */
-struct fc_plogi_csp_s{
+struct fc_plogi_csp_s {
 	u8         verhi;	/* FC-PH high version */
 	u8         verhi;	/* FC-PH high version */
 	u8         verlo;	/* FC-PH low version */
 	u8         verlo;	/* FC-PH low version */
 	u16        bbcred;	/* BB_Credit */
 	u16        bbcred;	/* BB_Credit */
@@ -326,7 +367,7 @@ struct fc_plogi_csp_s{
  * N_Port PLOGI Class Specific Parameters.
  * N_Port PLOGI Class Specific Parameters.
  * FC-PH-x. Figure 78. pg. 318.
  * FC-PH-x. Figure 78. pg. 318.
  */
  */
-struct fc_plogi_clp_s{
+struct fc_plogi_clp_s {
 #ifdef __BIGENDIAN
 #ifdef __BIGENDIAN
 	u32        class_valid:1;
 	u32        class_valid:1;
 	u32        intermix:1;	/* class intermix supported if set =1.
 	u32        intermix:1;	/* class intermix supported if set =1.
@@ -361,29 +402,29 @@ struct fc_plogi_clp_s{
 	u32        reserved8:16;
 	u32        reserved8:16;
 };
 };
 
 
-#define FLOGI_VVL_BRCD    0x42524344 /* ASCII value for each character in
-				      * string "BRCD" */
+/* ASCII value for each character in string "BRCD" */
+#define FLOGI_VVL_BRCD    0x42524344
 
 
 /*
 /*
  * PLOGI els command and reply payload
  * PLOGI els command and reply payload
  */
  */
-struct fc_logi_s{
+struct fc_logi_s {
 	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	struct fc_els_cmd_s els_cmd;	/* ELS command code */
-	struct fc_plogi_csp_s  csp;		/* common service params */
+	struct fc_plogi_csp_s csp;		/* common service params */
 	wwn_t           port_name;
 	wwn_t           port_name;
 	wwn_t           node_name;
 	wwn_t           node_name;
-	struct fc_plogi_clp_s  class1;		/* class 1 service parameters */
-	struct fc_plogi_clp_s  class2;		/* class 2 service parameters */
-	struct fc_plogi_clp_s  class3;		/* class 3 service parameters */
-	struct fc_plogi_clp_s  class4;		/* class 4 service parameters */
+	struct fc_plogi_clp_s class1;		/* class 1 service parameters */
+	struct fc_plogi_clp_s class2;		/* class 2 service parameters */
+	struct fc_plogi_clp_s class3;		/* class 3 service parameters */
+	struct fc_plogi_clp_s class4;		/* class 4 service parameters */
 	u8         vvl[16];	/* vendor version level */
 	u8         vvl[16];	/* vendor version level */
 };
 };
 
 
 /*
 /*
  * LOGO els command payload
  * LOGO els command payload
  */
  */
-struct fc_logo_s{
-	struct fc_els_cmd_s    els_cmd;	/* ELS command code */
+struct fc_logo_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        res1:8;
 	u32        res1:8;
 	u32        nport_id:24;	/* N_Port identifier of source */
 	u32        nport_id:24;	/* N_Port identifier of source */
 	wwn_t           orig_port_name;	/* Port name of the LOGO originator */
 	wwn_t           orig_port_name;	/* Port name of the LOGO originator */
@@ -393,7 +434,7 @@ struct fc_logo_s{
  * ADISC els command payload
  * ADISC els command payload
  */
  */
 struct fc_adisc_s {
 struct fc_adisc_s {
-	struct fc_els_cmd_s    els_cmd;	/* ELS command code */
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        res1:8;
 	u32        res1:8;
 	u32        orig_HA:24;	/* originator hard address */
 	u32        orig_HA:24;	/* originator hard address */
 	wwn_t           orig_port_name;	/* originator port name */
 	wwn_t           orig_port_name;	/* originator port name */
@@ -405,7 +446,7 @@ struct fc_adisc_s {
 /*
 /*
  * Exchange status block
  * Exchange status block
  */
  */
-struct fc_exch_status_blk_s{
+struct fc_exch_status_blk_s {
 	u32        oxid:16;
 	u32        oxid:16;
 	u32        rxid:16;
 	u32        rxid:16;
 	u32        res1:8;
 	u32        res1:8;
@@ -423,7 +464,7 @@ struct fc_exch_status_blk_s{
  * RES els command payload
  * RES els command payload
  */
  */
 struct fc_res_s {
 struct fc_res_s {
-	struct fc_els_cmd_s    els_cmd;	/* ELS command code */
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        res1:8;
 	u32        res1:8;
 	u32        nport_id:24;	/* N_Port identifier of source */
 	u32        nport_id:24;	/* N_Port identifier of source */
 	u32        oxid:16;
 	u32        oxid:16;
@@ -434,16 +475,16 @@ struct fc_res_s {
 /*
 /*
  * RES els accept payload
  * RES els accept payload
  */
  */
-struct fc_res_acc_s{
-	struct fc_els_cmd_s els_cmd;	/* ELS command code */
-	struct fc_exch_status_blk_s fc_exch_blk; /* Exchange status block */
+struct fc_res_acc_s {
+	struct fc_els_cmd_s		els_cmd;	/* ELS command code */
+	struct fc_exch_status_blk_s	fc_exch_blk; /* Exchange status block */
 };
 };
 
 
 /*
 /*
  * REC els command payload
  * REC els command payload
  */
  */
 struct fc_rec_s {
 struct fc_rec_s {
-	struct fc_els_cmd_s    els_cmd;	/* ELS command code */
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        res1:8;
 	u32        res1:8;
 	u32        nport_id:24;	/* N_Port identifier of source */
 	u32        nport_id:24;	/* N_Port identifier of source */
 	u32        oxid:16;
 	u32        oxid:16;
@@ -451,9 +492,9 @@ struct fc_rec_s {
 };
 };
 
 
 #define FC_REC_ESB_OWN_RSP	0x80000000	/* responder owns */
 #define FC_REC_ESB_OWN_RSP	0x80000000	/* responder owns */
-#define FC_REC_ESB_SI		0x40000000	/* SI is owned 	*/
+#define FC_REC_ESB_SI		0x40000000	/* SI is owned	*/
 #define FC_REC_ESB_COMP		0x20000000	/* exchange is complete	*/
 #define FC_REC_ESB_COMP		0x20000000	/* exchange is complete	*/
-#define FC_REC_ESB_ENDCOND_ABN	0x10000000	/* abnormal ending 	*/
+#define FC_REC_ESB_ENDCOND_ABN	0x10000000	/* abnormal ending	*/
 #define FC_REC_ESB_RQACT	0x04000000	/* recovery qual active	*/
 #define FC_REC_ESB_RQACT	0x04000000	/* recovery qual active	*/
 #define FC_REC_ESB_ERRP_MSK	0x03000000
 #define FC_REC_ESB_ERRP_MSK	0x03000000
 #define FC_REC_ESB_OXID_INV	0x00800000	/* invalid OXID		*/
 #define FC_REC_ESB_OXID_INV	0x00800000	/* invalid OXID		*/
@@ -464,7 +505,7 @@ struct fc_rec_s {
  * REC els accept payload
  * REC els accept payload
  */
  */
 struct fc_rec_acc_s {
 struct fc_rec_acc_s {
-	struct fc_els_cmd_s    els_cmd;	/* ELS command code */
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        oxid:16;
 	u32        oxid:16;
 	u32        rxid:16;
 	u32        rxid:16;
 	u32        res1:8;
 	u32        res1:8;
@@ -479,7 +520,7 @@ struct fc_rec_acc_s {
  * RSI els payload
  * RSI els payload
  */
  */
 struct fc_rsi_s {
 struct fc_rsi_s {
-	struct fc_els_cmd_s    els_cmd;
+	struct fc_els_cmd_s els_cmd;
 	u32        res1:8;
 	u32        res1:8;
 	u32        orig_sid:24;
 	u32        orig_sid:24;
 	u32        oxid:16;
 	u32        oxid:16;
@@ -490,7 +531,7 @@ struct fc_rsi_s {
  * structure for PRLI paramater pages, both request & response
  * structure for PRLI paramater pages, both request & response
  * see FC-PH-X table 113 & 115 for explanation also FCP table 8
  * see FC-PH-X table 113 & 115 for explanation also FCP table 8
  */
  */
-struct fc_prli_params_s{
+struct fc_prli_params_s {
 	u32        reserved:16;
 	u32        reserved:16;
 #ifdef __BIGENDIAN
 #ifdef __BIGENDIAN
 	u32        reserved1:5;
 	u32        reserved1:5;
@@ -531,7 +572,7 @@ enum {
 	FC_PRLI_ACC_PREDEF_IMG = 0x5,	/* predefined image - no prli needed */
 	FC_PRLI_ACC_PREDEF_IMG = 0x5,	/* predefined image - no prli needed */
 };
 };
 
 
-struct fc_prli_params_page_s{
+struct fc_prli_params_page_s {
 	u32        type:8;
 	u32        type:8;
 	u32        codext:8;
 	u32        codext:8;
 #ifdef __BIGENDIAN
 #ifdef __BIGENDIAN
@@ -551,13 +592,13 @@ struct fc_prli_params_page_s{
 
 
 	u32        origprocas;
 	u32        origprocas;
 	u32        rspprocas;
 	u32        rspprocas;
-	struct fc_prli_params_s  servparams;
+	struct fc_prli_params_s servparams;
 };
 };
 
 
 /*
 /*
  * PRLI request and accept payload, FC-PH-X tables 112 & 114
  * PRLI request and accept payload, FC-PH-X tables 112 & 114
  */
  */
-struct fc_prli_s{
+struct fc_prli_s {
 	u32        command:8;
 	u32        command:8;
 	u32        pglen:8;
 	u32        pglen:8;
 	u32        pagebytes:16;
 	u32        pagebytes:16;
@@ -567,7 +608,7 @@ struct fc_prli_s{
 /*
 /*
  * PRLO logout params page
  * PRLO logout params page
  */
  */
-struct fc_prlo_params_page_s{
+struct fc_prlo_params_page_s {
 	u32        type:8;
 	u32        type:8;
 	u32        type_ext:8;
 	u32        type_ext:8;
 #ifdef __BIGENDIAN
 #ifdef __BIGENDIAN
@@ -592,17 +633,17 @@ struct fc_prlo_params_page_s{
 /*
 /*
  * PRLO els command payload
  * PRLO els command payload
  */
  */
-struct fc_prlo_s{
-	u32        	command:8;
-	u32        	page_len:8;
-	u32        	payload_len:16;
-	struct fc_prlo_params_page_s 	prlo_params[1];
+struct fc_prlo_s {
+	u32	command:8;
+	u32	page_len:8;
+	u32	payload_len:16;
+	struct fc_prlo_params_page_s	prlo_params[1];
 };
 };
 
 
 /*
 /*
  * PRLO Logout response parameter page
  * PRLO Logout response parameter page
  */
  */
-struct fc_prlo_acc_params_page_s{
+struct fc_prlo_acc_params_page_s {
 	u32        type:8;
 	u32        type:8;
 	u32        type_ext:8;
 	u32        type_ext:8;
 
 
@@ -628,7 +669,7 @@ struct fc_prlo_acc_params_page_s{
 /*
 /*
  * PRLO els command ACC payload
  * PRLO els command ACC payload
  */
  */
-struct fc_prlo_acc_s{
+struct fc_prlo_acc_s {
 	u32        command:8;
 	u32        command:8;
 	u32        page_len:8;
 	u32        page_len:8;
 	u32        payload_len:16;
 	u32        payload_len:16;
@@ -650,7 +691,7 @@ enum {
 	FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01
 	FC_VU_SCR_REG_FUNC_FABRIC_NAME_CHANGE = 0x01
 };
 };
 
 
-struct fc_scr_s{
+struct fc_scr_s {
 	u32 command:8;
 	u32 command:8;
 	u32 res:24;
 	u32 res:24;
 	u32 vu_reg_func:8; /* Vendor Unique Registrations */
 	u32 vu_reg_func:8; /* Vendor Unique Registrations */
@@ -674,7 +715,7 @@ enum {
  * LS_RJT els reply payload
  * LS_RJT els reply payload
  */
  */
 struct fc_ls_rjt_s {
 struct fc_ls_rjt_s {
-	struct fc_els_cmd_s    els_cmd;		/* ELS command code */
+	struct fc_els_cmd_s els_cmd;		/* ELS command code */
 	u32        res1:8;
 	u32        res1:8;
 	u32        reason_code:8;		/* Reason code for reject */
 	u32        reason_code:8;		/* Reason code for reject */
 	u32        reason_code_expl:8;	/* Reason code explanation */
 	u32        reason_code_expl:8;	/* Reason code explanation */
@@ -722,8 +763,8 @@ enum {
 /*
 /*
  * RRQ els command payload
  * RRQ els command payload
  */
  */
-struct fc_rrq_s{
-	struct fc_els_cmd_s    els_cmd;	/* ELS command code */
+struct fc_rrq_s {
+	struct fc_els_cmd_s els_cmd;	/* ELS command code */
 	u32        res1:8;
 	u32        res1:8;
 	u32        s_id:24;	/* exchange originator S_ID */
 	u32        s_id:24;	/* exchange originator S_ID */
 
 
@@ -736,7 +777,7 @@ struct fc_rrq_s{
 /*
 /*
  * ABTS BA_ACC reply payload
  * ABTS BA_ACC reply payload
  */
  */
-struct fc_ba_acc_s{
+struct fc_ba_acc_s {
 	u32        seq_id_valid:8;	/* set to 0x00 for Abort Exchange */
 	u32        seq_id_valid:8;	/* set to 0x00 for Abort Exchange */
 	u32        seq_id:8;	/* invalid for Abort Exchange */
 	u32        seq_id:8;	/* invalid for Abort Exchange */
 	u32        res2:16;
 	u32        res2:16;
@@ -749,7 +790,7 @@ struct fc_ba_acc_s{
 /*
 /*
  * ABTS BA_RJT reject payload
  * ABTS BA_RJT reject payload
  */
  */
-struct fc_ba_rjt_s{
+struct fc_ba_rjt_s {
 	u32        res1:8;		/* Reserved */
 	u32        res1:8;		/* Reserved */
 	u32        reason_code:8;	/* reason code for reject */
 	u32        reason_code:8;	/* reason code for reject */
 	u32        reason_expl:8;	/* reason code explanation */
 	u32        reason_expl:8;	/* reason code explanation */
@@ -759,9 +800,9 @@ struct fc_ba_rjt_s{
 /*
 /*
  * TPRLO logout parameter page
  * TPRLO logout parameter page
  */
  */
-struct fc_tprlo_params_page_s{
-	u32        type:8;
-	u32        type_ext:8;
+struct fc_tprlo_params_page_s {
+u32        type:8;
+u32        type_ext:8;
 
 
 #ifdef __BIGENDIAN
 #ifdef __BIGENDIAN
 	u32        opa_valid:1;
 	u32        opa_valid:1;
@@ -787,7 +828,7 @@ struct fc_tprlo_params_page_s{
 /*
 /*
  * TPRLO ELS command payload
  * TPRLO ELS command payload
  */
  */
-struct fc_tprlo_s{
+struct fc_tprlo_s {
 	u32        command:8;
 	u32        command:8;
 	u32        page_len:8;
 	u32        page_len:8;
 	u32        payload_len:16;
 	u32        payload_len:16;
@@ -795,7 +836,7 @@ struct fc_tprlo_s{
 	struct fc_tprlo_params_page_s tprlo_params[1];
 	struct fc_tprlo_params_page_s tprlo_params[1];
 };
 };
 
 
-enum fc_tprlo_type{
+enum fc_tprlo_type {
 	FC_GLOBAL_LOGO = 1,
 	FC_GLOBAL_LOGO = 1,
 	FC_TPR_LOGO
 	FC_TPR_LOGO
 };
 };
@@ -803,7 +844,7 @@ enum fc_tprlo_type{
 /*
 /*
  * TPRLO els command ACC payload
  * TPRLO els command ACC payload
  */
  */
-struct fc_tprlo_acc_s{
+struct fc_tprlo_acc_s {
 	u32	command:8;
 	u32	command:8;
 	u32	page_len:8;
 	u32	page_len:8;
 	u32	payload_len:16;
 	u32	payload_len:16;
@@ -815,21 +856,21 @@ struct fc_tprlo_acc_s{
  */
  */
 #define FC_RSCN_PGLEN	0x4
 #define FC_RSCN_PGLEN	0x4
 
 
-enum fc_rscn_format{
+enum fc_rscn_format {
 	FC_RSCN_FORMAT_PORTID	= 0x0,
 	FC_RSCN_FORMAT_PORTID	= 0x0,
 	FC_RSCN_FORMAT_AREA	= 0x1,
 	FC_RSCN_FORMAT_AREA	= 0x1,
 	FC_RSCN_FORMAT_DOMAIN	= 0x2,
 	FC_RSCN_FORMAT_DOMAIN	= 0x2,
 	FC_RSCN_FORMAT_FABRIC	= 0x3,
 	FC_RSCN_FORMAT_FABRIC	= 0x3,
 };
 };
 
 
-struct fc_rscn_event_s{
+struct fc_rscn_event_s {
 	u32        format:2;
 	u32        format:2;
 	u32        qualifier:4;
 	u32        qualifier:4;
 	u32        resvd:2;
 	u32        resvd:2;
 	u32        portid:24;
 	u32        portid:24;
 };
 };
 
 
-struct fc_rscn_pl_s{
+struct fc_rscn_pl_s {
 	u8         command;
 	u8         command;
 	u8         pagelen;
 	u8         pagelen;
 	u16        payldlen;
 	u16        payldlen;
@@ -840,18 +881,18 @@ struct fc_rscn_pl_s{
  * ECHO els command req payload
  * ECHO els command req payload
  */
  */
 struct fc_echo_s {
 struct fc_echo_s {
-	struct fc_els_cmd_s    els_cmd;
+	struct fc_els_cmd_s els_cmd;
 };
 };
 
 
 /*
 /*
  * RNID els command
  * RNID els command
  */
  */
 
 
-#define RNID_NODEID_DATA_FORMAT_COMMON    		 0x00
-#define RNID_NODEID_DATA_FORMAT_FCP3        		 0x08
-#define RNID_NODEID_DATA_FORMAT_DISCOVERY     		0xDF
+#define RNID_NODEID_DATA_FORMAT_COMMON			0x00
+#define RNID_NODEID_DATA_FORMAT_FCP3			0x08
+#define RNID_NODEID_DATA_FORMAT_DISCOVERY		0xDF
 
 
-#define RNID_ASSOCIATED_TYPE_UNKNOWN                    0x00000001
+#define RNID_ASSOCIATED_TYPE_UNKNOWN			0x00000001
 #define RNID_ASSOCIATED_TYPE_OTHER                      0x00000002
 #define RNID_ASSOCIATED_TYPE_OTHER                      0x00000002
 #define RNID_ASSOCIATED_TYPE_HUB                        0x00000003
 #define RNID_ASSOCIATED_TYPE_HUB                        0x00000003
 #define RNID_ASSOCIATED_TYPE_SWITCH                     0x00000004
 #define RNID_ASSOCIATED_TYPE_SWITCH                     0x00000004
@@ -868,8 +909,8 @@ struct fc_echo_s {
 /*
 /*
  * RNID els command payload
  * RNID els command payload
  */
  */
-struct fc_rnid_cmd_s{
-	struct fc_els_cmd_s    els_cmd;
+struct fc_rnid_cmd_s {
+	struct fc_els_cmd_s els_cmd;
 	u32        node_id_data_format:8;
 	u32        node_id_data_format:8;
 	u32        reserved:24;
 	u32        reserved:24;
 };
 };
@@ -878,12 +919,12 @@ struct fc_rnid_cmd_s{
  * RNID els response payload
  * RNID els response payload
  */
  */
 
 
-struct fc_rnid_common_id_data_s{
+struct fc_rnid_common_id_data_s {
 	wwn_t           port_name;
 	wwn_t           port_name;
 	wwn_t           node_name;
 	wwn_t           node_name;
 };
 };
 
 
-struct fc_rnid_general_topology_data_s{
+struct fc_rnid_general_topology_data_s {
 	u32        vendor_unique[4];
 	u32        vendor_unique[4];
 	u32        asso_type;
 	u32        asso_type;
 	u32        phy_port_num;
 	u32        phy_port_num;
@@ -896,8 +937,8 @@ struct fc_rnid_general_topology_data_s{
 	u32        vendor_specific:16;
 	u32        vendor_specific:16;
 };
 };
 
 
-struct fc_rnid_acc_s{
-	struct fc_els_cmd_s    els_cmd;
+struct fc_rnid_acc_s {
+	struct fc_els_cmd_s els_cmd;
 	u32        node_id_data_format:8;
 	u32        node_id_data_format:8;
 	u32        common_id_data_length:8;
 	u32        common_id_data_length:8;
 	u32        reserved:8;
 	u32        reserved:8;
@@ -920,7 +961,7 @@ struct fc_rnid_acc_s{
 #define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE      0x00000003
 #define RNID_ASSOCIATED_TYPE_VIRTUALIZATION_DEVICE      0x00000003
 #define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE      0x000000FF
 #define RNID_ASSOCIATED_TYPE_MULTI_FUNCTION_DEVICE      0x000000FF
 
 
-enum fc_rpsc_speed_cap{
+enum fc_rpsc_speed_cap {
 	RPSC_SPEED_CAP_1G = 0x8000,
 	RPSC_SPEED_CAP_1G = 0x8000,
 	RPSC_SPEED_CAP_2G = 0x4000,
 	RPSC_SPEED_CAP_2G = 0x4000,
 	RPSC_SPEED_CAP_4G = 0x2000,
 	RPSC_SPEED_CAP_4G = 0x2000,
@@ -931,7 +972,7 @@ enum fc_rpsc_speed_cap{
 	RPSC_SPEED_CAP_UNKNOWN = 0x0001,
 	RPSC_SPEED_CAP_UNKNOWN = 0x0001,
 };
 };
 
 
-enum fc_rpsc_op_speed_s{
+enum fc_rpsc_op_speed {
 	RPSC_OP_SPEED_1G = 0x8000,
 	RPSC_OP_SPEED_1G = 0x8000,
 	RPSC_OP_SPEED_2G = 0x4000,
 	RPSC_OP_SPEED_2G = 0x4000,
 	RPSC_OP_SPEED_4G = 0x2000,
 	RPSC_OP_SPEED_4G = 0x2000,
@@ -942,24 +983,24 @@ enum fc_rpsc_op_speed_s{
 	RPSC_OP_SPEED_NOT_EST = 0x0001,	/*! speed not established */
 	RPSC_OP_SPEED_NOT_EST = 0x0001,	/*! speed not established */
 };
 };
 
 
-struct fc_rpsc_speed_info_s{
-	u16        port_speed_cap;	/*! see fc_rpsc_speed_cap_t */
-	u16        port_op_speed;	/*! see fc_rpsc_op_speed_t */
+struct fc_rpsc_speed_info_s {
+	u16        port_speed_cap;	/*! see enum fc_rpsc_speed_cap */
+	u16        port_op_speed;	/*! see enum fc_rpsc_op_speed */
 };
 };
 
 
-enum link_e2e_beacon_subcmd{
+enum link_e2e_beacon_subcmd {
 	LINK_E2E_BEACON_ON = 1,
 	LINK_E2E_BEACON_ON = 1,
 	LINK_E2E_BEACON_OFF = 2
 	LINK_E2E_BEACON_OFF = 2
 };
 };
 
 
-enum beacon_type{
+enum beacon_type {
 	BEACON_TYPE_NORMAL	= 1,	/*! Normal Beaconing. Green */
 	BEACON_TYPE_NORMAL	= 1,	/*! Normal Beaconing. Green */
 	BEACON_TYPE_WARN	= 2,	/*! Warning Beaconing. Yellow/Amber */
 	BEACON_TYPE_WARN	= 2,	/*! Warning Beaconing. Yellow/Amber */
 	BEACON_TYPE_CRITICAL	= 3	/*! Critical Beaconing. Red */
 	BEACON_TYPE_CRITICAL	= 3	/*! Critical Beaconing. Red */
 };
 };
 
 
 struct link_e2e_beacon_param_s {
 struct link_e2e_beacon_param_s {
-	u8         beacon_type;	/* Beacon Type. See beacon_type_t */
+	u8         beacon_type;	/* Beacon Type. See enum beacon_type */
 	u8         beacon_frequency;
 	u8         beacon_frequency;
 					/* Beacon frequency. Number of blinks
 					/* Beacon frequency. Number of blinks
 					 * per 10 seconds
 					 * per 10 seconds
@@ -978,12 +1019,13 @@ struct link_e2e_beacon_param_s {
 };
 };
 
 
 /*
 /*
- * Link E2E beacon request/good response format. For LS_RJTs use fc_ls_rjt_t
+ * Link E2E beacon request/good response format.
+ * For LS_RJTs use struct fc_ls_rjt_s
  */
  */
-struct link_e2e_beacon_req_s{
+struct link_e2e_beacon_req_s {
 	u32        ls_code;	/*! FC_ELS_E2E_LBEACON in requests *
 	u32        ls_code;	/*! FC_ELS_E2E_LBEACON in requests *
 					 *or FC_ELS_ACC in good replies */
 					 *or FC_ELS_ACC in good replies */
-	u32        ls_sub_cmd;	/*! See link_e2e_beacon_subcmd_t */
+	u32        ls_sub_cmd;	/*! See enum link_e2e_beacon_subcmd */
 	struct link_e2e_beacon_param_s beacon_parm;
 	struct link_e2e_beacon_param_s beacon_parm;
 };
 };
 
 
@@ -992,14 +1034,14 @@ struct link_e2e_beacon_req_s{
  * all the ports within that domain (TODO - I don't think FOS implements
  * all the ports within that domain (TODO - I don't think FOS implements
  * this...).
  * this...).
  */
  */
-struct fc_rpsc_cmd_s{
-	struct fc_els_cmd_s    els_cmd;
+struct fc_rpsc_cmd_s {
+	struct fc_els_cmd_s els_cmd;
 };
 };
 
 
 /*
 /*
  * RPSC Acc
  * RPSC Acc
  */
  */
-struct fc_rpsc_acc_s{
+struct fc_rpsc_acc_s {
 	u32        command:8;
 	u32        command:8;
 	u32        rsvd:8;
 	u32        rsvd:8;
 	u32        num_entries:16;
 	u32        num_entries:16;
@@ -1012,51 +1054,50 @@ struct fc_rpsc_acc_s{
  */
  */
 #define FC_BRCD_TOKEN    0x42524344
 #define FC_BRCD_TOKEN    0x42524344
 
 
-struct fc_rpsc2_cmd_s{
-	struct fc_els_cmd_s    els_cmd;
-	u32       	token;
-	u16     	resvd;
-	u16     	num_pids;       /* Number of pids in the request */
+struct fc_rpsc2_cmd_s {
+	struct fc_els_cmd_s els_cmd;
+	u32	token;
+	u16	resvd;
+	u16	num_pids;	/* Number of pids in the request */
 	struct  {
 	struct  {
 		u32	rsvd1:8;
 		u32	rsvd1:8;
-		u32	pid:24;	/* port identifier */
+		u32	pid:24;		/* port identifier */
 	} pid_list[1];
 	} pid_list[1];
 };
 };
 
 
-enum fc_rpsc2_port_type{
+enum fc_rpsc2_port_type {
 	RPSC2_PORT_TYPE_UNKNOWN = 0,
 	RPSC2_PORT_TYPE_UNKNOWN = 0,
 	RPSC2_PORT_TYPE_NPORT   = 1,
 	RPSC2_PORT_TYPE_NPORT   = 1,
 	RPSC2_PORT_TYPE_NLPORT  = 2,
 	RPSC2_PORT_TYPE_NLPORT  = 2,
 	RPSC2_PORT_TYPE_NPIV_PORT  = 0x5f,
 	RPSC2_PORT_TYPE_NPIV_PORT  = 0x5f,
 	RPSC2_PORT_TYPE_NPORT_TRUNK  = 0x6f,
 	RPSC2_PORT_TYPE_NPORT_TRUNK  = 0x6f,
 };
 };
-
 /*
 /*
  * RPSC2 portInfo entry structure
  * RPSC2 portInfo entry structure
  */
  */
-struct fc_rpsc2_port_info_s{
+struct fc_rpsc2_port_info_s {
     u32    pid;        /* PID */
     u32    pid;        /* PID */
     u16    resvd1;
     u16    resvd1;
     u16    index;      /* port number / index */
     u16    index;      /* port number / index */
     u8     resvd2;
     u8     resvd2;
-    u8    	type;        /* port type N/NL/... */
+    u8	   type;	/* port type N/NL/... */
     u16    speed;      /* port Operating Speed */
     u16    speed;      /* port Operating Speed */
 };
 };
 
 
 /*
 /*
  * RPSC2 Accept payload
  * RPSC2 Accept payload
  */
  */
-struct fc_rpsc2_acc_s{
+struct fc_rpsc2_acc_s {
 	u8        els_cmd;
 	u8        els_cmd;
 	u8        resvd;
 	u8        resvd;
-	u16       num_pids;  /* Number of pids in the request */
-	struct fc_rpsc2_port_info_s  port_info[1];    /* port information */
+    u16       num_pids;  /* Number of pids in the request */
+    struct fc_rpsc2_port_info_s port_info[1];    /* port information */
 };
 };
 
 
 /**
 /**
  * bit fields so that multiple classes can be specified
  * bit fields so that multiple classes can be specified
  */
  */
-enum fc_cos{
+enum fc_cos {
 	FC_CLASS_2	= 0x04,
 	FC_CLASS_2	= 0x04,
 	FC_CLASS_3	= 0x08,
 	FC_CLASS_3	= 0x08,
 	FC_CLASS_2_3	= 0x0C,
 	FC_CLASS_2_3	= 0x0C,
@@ -1065,11 +1106,11 @@ enum fc_cos{
 /*
 /*
  * symbolic name
  * symbolic name
  */
  */
-struct fc_symname_s{
+struct fc_symname_s {
 	u8         symname[FC_SYMNAME_MAX];
 	u8         symname[FC_SYMNAME_MAX];
 };
 };
 
 
-struct fc_alpabm_s{
+struct fc_alpabm_s {
 	u8         alpa_bm[FC_ALPA_MAX / 8];
 	u8         alpa_bm[FC_ALPA_MAX / 8];
 };
 };
 
 
@@ -1094,7 +1135,7 @@ struct fc_alpabm_s{
  * Virtual Fabric Tagging header format
  * Virtual Fabric Tagging header format
  * @caution This is defined only in BIG ENDIAN format.
  * @caution This is defined only in BIG ENDIAN format.
  */
  */
-struct fc_vft_s{
+struct fc_vft_s {
 	u32        r_ctl:8;
 	u32        r_ctl:8;
 	u32        ver:2;
 	u32        ver:2;
 	u32        type:4;
 	u32        type:4;
@@ -1106,6 +1147,770 @@ struct fc_vft_s{
 	u32        res_c:24;
 	u32        res_c:24;
 };
 };
 
 
-#pragma pack()
+/*
+ * FCP
+ */
+enum {
+	FCP_RJT		= 0x01000000,	/* SRR reject */
+	FCP_SRR_ACCEPT	= 0x02000000,	/* SRR accept */
+	FCP_SRR		= 0x14000000,	/* Sequence Retransmission Request */
+};
+
+/*
+ * SRR FC-4 LS payload
+ */
+struct fc_srr_s {
+	u32	ls_cmd;
+	u32        ox_id:16;	/* ox-id */
+	u32        rx_id:16;	/* rx-id */
+	u32        ro;		/* relative offset */
+	u32        r_ctl:8;		/* R_CTL for I.U. */
+	u32        res:24;
+};
+
+
+/*
+ * FCP_CMND definitions
+ */
+#define FCP_CMND_CDB_LEN    16
+#define FCP_CMND_LUN_LEN    8
+
+struct fcp_cmnd_s {
+	lun_t           lun;		/* 64-bit LU number */
+	u8         crn;		/* command reference number */
+#ifdef __BIGENDIAN
+	u8         resvd:1,
+			priority:4,	/* FCP-3: SAM-3 priority */
+			taskattr:3;	/* scsi task attribute */
+#else
+	u8         taskattr:3,	/* scsi task attribute */
+			priority:4,	/* FCP-3: SAM-3 priority */
+			resvd:1;
+#endif
+	u8         tm_flags;	/* task management flags */
+#ifdef __BIGENDIAN
+	u8         addl_cdb_len:6,	/* additional CDB length words */
+			iodir:2;	/* read/write FCP_DATA IUs */
+#else
+	u8         iodir:2,	/* read/write FCP_DATA IUs */
+			addl_cdb_len:6;	/* additional CDB length */
+#endif
+	scsi_cdb_t      cdb;
+
+	/*
+	 * !!! additional cdb bytes follows here!!!
+	 */
+	u32        fcp_dl;	/* bytes to be transferred */
+};
+
+#define fcp_cmnd_cdb_len(_cmnd) ((_cmnd)->addl_cdb_len * 4 + FCP_CMND_CDB_LEN)
+#define fcp_cmnd_fcpdl(_cmnd)	((&(_cmnd)->fcp_dl)[(_cmnd)->addl_cdb_len])
 
 
+/*
+ * struct fcp_cmnd_s .iodir field values
+ */
+enum fcp_iodir {
+	FCP_IODIR_NONE	= 0,
+	FCP_IODIR_WRITE = 1,
+	FCP_IODIR_READ	= 2,
+	FCP_IODIR_RW	= 3,
+};
+
+/*
+ * Task attribute field
+ */
+enum {
+	FCP_TASK_ATTR_SIMPLE	= 0,
+	FCP_TASK_ATTR_HOQ	= 1,
+	FCP_TASK_ATTR_ORDERED	= 2,
+	FCP_TASK_ATTR_ACA	= 4,
+	FCP_TASK_ATTR_UNTAGGED	= 5,	/* obsolete in FCP-3 */
+};
+
+/*
+ * Task management flags field - only one bit shall be set
+ */
+enum fcp_tm_cmnd {
+	FCP_TM_ABORT_TASK_SET	= BIT(1),
+	FCP_TM_CLEAR_TASK_SET	= BIT(2),
+	FCP_TM_LUN_RESET	= BIT(4),
+	FCP_TM_TARGET_RESET	= BIT(5),	/* obsolete in FCP-3 */
+	FCP_TM_CLEAR_ACA	= BIT(6),
+};
+
+/*
+ * FCP_XFER_RDY IU defines
+ */
+struct fcp_xfer_rdy_s {
+	u32        data_ro;
+	u32        burst_len;
+	u32        reserved;
+};
+
+/*
+ * FCP_RSP residue flags
+ */
+enum fcp_residue {
+	FCP_NO_RESIDUE = 0,	/* no residue */
+	FCP_RESID_OVER = 1,	/* more data left that was not sent */
+	FCP_RESID_UNDER = 2,	/* less data than requested */
+};
+
+enum {
+	FCP_RSPINFO_GOOD = 0,
+	FCP_RSPINFO_DATALEN_MISMATCH = 1,
+	FCP_RSPINFO_CMND_INVALID = 2,
+	FCP_RSPINFO_ROLEN_MISMATCH = 3,
+	FCP_RSPINFO_TM_NOT_SUPP = 4,
+	FCP_RSPINFO_TM_FAILED = 5,
+};
+
+struct fcp_rspinfo_s {
+	u32        res0:24;
+	u32        rsp_code:8;	/* response code (as above) */
+	u32        res1;
+};
+
+struct fcp_resp_s {
+	u32        reserved[2];	/* 2 words reserved */
+	u16        reserved2;
+#ifdef __BIGENDIAN
+	u8         reserved3:3;
+	u8         fcp_conf_req:1;	/* FCP_CONF is requested */
+	u8         resid_flags:2;	/* underflow/overflow */
+	u8         sns_len_valid:1;/* sense len is valid */
+	u8         rsp_len_valid:1;/* response len is valid */
+#else
+	u8         rsp_len_valid:1;/* response len is valid */
+	u8         sns_len_valid:1;/* sense len is valid */
+	u8         resid_flags:2;	/* underflow/overflow */
+	u8         fcp_conf_req:1;	/* FCP_CONF is requested */
+	u8         reserved3:3;
 #endif
 #endif
+	u8         scsi_status;	/* one byte SCSI status */
+	u32        residue;	/* residual data bytes */
+	u32        sns_len;	/* length od sense info */
+	u32        rsp_len;	/* length of response info */
+};
+
+#define fcp_snslen(__fcprsp)	((__fcprsp)->sns_len_valid ?		\
+					(__fcprsp)->sns_len : 0)
+#define fcp_rsplen(__fcprsp)	((__fcprsp)->rsp_len_valid ?		\
+					(__fcprsp)->rsp_len : 0)
+#define fcp_rspinfo(__fcprsp)	((struct fcp_rspinfo_s *)((__fcprsp) + 1))
+#define fcp_snsinfo(__fcprsp)	(((u8 *)fcp_rspinfo(__fcprsp)) +	\
+						fcp_rsplen(__fcprsp))
+
+struct fcp_cmnd_fr_s {
+	struct fchs_s fchs;
+	struct fcp_cmnd_s fcp;
+};
+
+/*
+ * CT
+ */
+struct ct_hdr_s {
+	u32	rev_id:8;	/* Revision of the CT */
+	u32	in_id:24;	/* Initiator Id */
+	u32	gs_type:8;	/* Generic service Type */
+	u32	gs_sub_type:8;	/* Generic service sub type */
+	u32	options:8;	/* options */
+	u32	rsvrd:8;	/* reserved */
+	u32	cmd_rsp_code:16;/* ct command/response code */
+	u32	max_res_size:16;/* maximum/residual size */
+	u32	frag_id:8;	/* fragment ID */
+	u32	reason_code:8;	/* reason code */
+	u32	exp_code:8;	/* explanation code */
+	u32	vendor_unq:8;	/* vendor unique */
+};
+
+/*
+ * defines for the Revision
+ */
+enum {
+	CT_GS3_REVISION = 0x01,
+};
+
+/*
+ * defines for gs_type
+ */
+enum {
+	CT_GSTYPE_KEYSERVICE	= 0xF7,
+	CT_GSTYPE_ALIASSERVICE	= 0xF8,
+	CT_GSTYPE_MGMTSERVICE	= 0xFA,
+	CT_GSTYPE_TIMESERVICE	= 0xFB,
+	CT_GSTYPE_DIRSERVICE	= 0xFC,
+};
+
+/*
+ * defines for gs_sub_type for gs type directory service
+ */
+enum {
+	CT_GSSUBTYPE_NAMESERVER = 0x02,
+};
+
+/*
+ * defines for gs_sub_type for gs type management service
+ */
+enum {
+	CT_GSSUBTYPE_CFGSERVER	= 0x01,
+	CT_GSSUBTYPE_UNZONED_NS = 0x02,
+	CT_GSSUBTYPE_ZONESERVER = 0x03,
+	CT_GSSUBTYPE_LOCKSERVER = 0x04,
+	CT_GSSUBTYPE_HBA_MGMTSERVER = 0x10,	/* for FDMI */
+};
+
+/*
+ * defines for CT response code field
+ */
+enum {
+	CT_RSP_REJECT = 0x8001,
+	CT_RSP_ACCEPT = 0x8002,
+};
+
+/*
+ * defintions for CT reason code
+ */
+enum {
+	CT_RSN_INV_CMD		= 0x01,
+	CT_RSN_INV_VER		= 0x02,
+	CT_RSN_LOGIC_ERR	= 0x03,
+	CT_RSN_INV_SIZE		= 0x04,
+	CT_RSN_LOGICAL_BUSY	= 0x05,
+	CT_RSN_PROTO_ERR	= 0x07,
+	CT_RSN_UNABLE_TO_PERF	= 0x09,
+	CT_RSN_NOT_SUPP			= 0x0B,
+	CT_RSN_SERVER_NOT_AVBL  = 0x0D,
+	CT_RSN_SESSION_COULD_NOT_BE_ESTBD = 0x0E,
+	CT_RSN_VENDOR_SPECIFIC  = 0xFF,
+
+};
+
+/*
+ * definitions for explanations code for Name server
+ */
+enum {
+	CT_NS_EXP_NOADDITIONAL	= 0x00,
+	CT_NS_EXP_ID_NOT_REG	= 0x01,
+	CT_NS_EXP_PN_NOT_REG	= 0x02,
+	CT_NS_EXP_NN_NOT_REG	= 0x03,
+	CT_NS_EXP_CS_NOT_REG	= 0x04,
+	CT_NS_EXP_IPN_NOT_REG	= 0x05,
+	CT_NS_EXP_IPA_NOT_REG	= 0x06,
+	CT_NS_EXP_FT_NOT_REG	= 0x07,
+	CT_NS_EXP_SPN_NOT_REG	= 0x08,
+	CT_NS_EXP_SNN_NOT_REG	= 0x09,
+	CT_NS_EXP_PT_NOT_REG	= 0x0A,
+	CT_NS_EXP_IPP_NOT_REG	= 0x0B,
+	CT_NS_EXP_FPN_NOT_REG	= 0x0C,
+	CT_NS_EXP_HA_NOT_REG	= 0x0D,
+	CT_NS_EXP_FD_NOT_REG	= 0x0E,
+	CT_NS_EXP_FF_NOT_REG	= 0x0F,
+	CT_NS_EXP_ACCESSDENIED	= 0x10,
+	CT_NS_EXP_UNACCEPTABLE_ID = 0x11,
+	CT_NS_EXP_DATABASEEMPTY		= 0x12,
+	CT_NS_EXP_NOT_REG_IN_SCOPE	= 0x13,
+	CT_NS_EXP_DOM_ID_NOT_PRESENT	= 0x14,
+	CT_NS_EXP_PORT_NUM_NOT_PRESENT	= 0x15,
+	CT_NS_EXP_NO_DEVICE_ATTACHED	= 0x16
+};
+
+/*
+ * defintions for the explanation code for all servers
+ */
+enum {
+	CT_EXP_AUTH_EXCEPTION			= 0xF1,
+	CT_EXP_DB_FULL					= 0xF2,
+	CT_EXP_DB_EMPTY					= 0xF3,
+	CT_EXP_PROCESSING_REQ			= 0xF4,
+	CT_EXP_UNABLE_TO_VERIFY_CONN	= 0xF5,
+	CT_EXP_DEVICES_NOT_IN_CMN_ZONE  = 0xF6
+};
+
+/*
+ * Command codes for Name server
+ */
+enum {
+	GS_GID_PN	= 0x0121,	/* Get Id on port name */
+	GS_GPN_ID	= 0x0112,	/* Get port name on ID */
+	GS_GNN_ID	= 0x0113,	/* Get node name on ID */
+	GS_GID_FT	= 0x0171,	/* Get Id on FC4 type */
+	GS_GSPN_ID	= 0x0118,	/* Get symbolic PN on ID */
+	GS_RFT_ID	= 0x0217,	/* Register fc4type on ID */
+	GS_RSPN_ID	= 0x0218,	/* Register symbolic PN on ID */
+	GS_RPN_ID	= 0x0212,	/* Register port name */
+	GS_RNN_ID	= 0x0213,	/* Register node name */
+	GS_RCS_ID	= 0x0214,	/* Register class of service */
+	GS_RPT_ID	= 0x021A,	/* Register port type */
+	GS_GA_NXT	= 0x0100,	/* Get all next */
+	GS_RFF_ID	= 0x021F,	/* Register FC4 Feature		*/
+};
+
+struct fcgs_id_req_s{
+	u32 rsvd:8;
+	u32 dap:24; /* port identifier */
+};
+#define fcgs_gpnid_req_t struct fcgs_id_req_s
+#define fcgs_gnnid_req_t struct fcgs_id_req_s
+#define fcgs_gspnid_req_t struct fcgs_id_req_s
+
+struct fcgs_gidpn_req_s {
+	wwn_t	port_name;	/* port wwn */
+};
+
+struct fcgs_gidpn_resp_s {
+	u32	rsvd:8;
+	u32	dap:24;	/* port identifier */
+};
+
+/**
+ * RFT_ID
+ */
+struct fcgs_rftid_req_s {
+	u32	rsvd:8;
+	u32	dap:24;		/* port identifier */
+	u32	fc4_type[8];	/* fc4 types */
+};
+
+/**
+ * RFF_ID : Register FC4 features.
+ */
+
+#define FC_GS_FCP_FC4_FEATURE_INITIATOR  0x02
+#define FC_GS_FCP_FC4_FEATURE_TARGET	 0x01
+
+struct fcgs_rffid_req_s {
+    u32    rsvd:8;
+    u32    dap:24;		/* port identifier	*/
+    u32    rsvd1:16;
+    u32    fc4ftr_bits:8;		/* fc4 feature bits	*/
+    u32    fc4_type:8;		/* corresponding FC4 Type */
+};
+
+/**
+ * GID_FT Request
+ */
+struct fcgs_gidft_req_s {
+	u8	reserved;
+	u8	domain_id;	/* domain, 0 - all fabric */
+	u8	area_id;	/* area, 0 - whole domain */
+	u8	fc4_type;	/* FC_TYPE_FCP for SCSI devices */
+};		/* GID_FT Request */
+
+/**
+ * GID_FT Response
+ */
+struct fcgs_gidft_resp_s {
+	u8		last:1;	/* last port identifier flag */
+	u8		reserved:7;
+	u32	pid:24;	/* port identifier */
+};		/* GID_FT Response */
+
+/**
+ * RSPN_ID
+ */
+struct fcgs_rspnid_req_s {
+	u32	rsvd:8;
+	u32	dap:24;		/* port identifier */
+	u8		spn_len;	/* symbolic port name length */
+	u8		spn[256];	/* symbolic port name */
+};
+
+/**
+ * RPN_ID
+ */
+struct fcgs_rpnid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	wwn_t		port_name;
+};
+
+/**
+ * RNN_ID
+ */
+struct fcgs_rnnid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	wwn_t		node_name;
+};
+
+/**
+ * RCS_ID
+ */
+struct fcgs_rcsid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	u32	cos;
+};
+
+/**
+ * RPT_ID
+ */
+struct fcgs_rptid_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+	u32	port_type:8;
+	u32	rsvd1:24;
+};
+
+/**
+ * GA_NXT Request
+ */
+struct fcgs_ganxt_req_s {
+	u32	rsvd:8;
+	u32	port_id:24;
+};
+
+/**
+ * GA_NXT Response
+ */
+struct fcgs_ganxt_rsp_s {
+	u32	port_type:8;	/* Port Type */
+	u32	port_id:24;	/* Port Identifier */
+	wwn_t		port_name;	/* Port Name */
+	u8		spn_len;	/* Length of Symbolic Port Name */
+	char		spn[255];	/* Symbolic Port Name */
+	wwn_t		node_name;	/* Node Name */
+	u8		snn_len;	/* Length of Symbolic Node Name */
+	char		snn[255];	/* Symbolic Node Name */
+	u8		ipa[8];		/* Initial Process Associator */
+	u8		ip[16];		/* IP Address */
+	u32	cos;		/* Class of Service */
+	u32	fc4types[8];	/* FC-4 TYPEs */
+	wwn_t		fabric_port_name;
+					/* Fabric Port Name */
+	u32	rsvd:8;		/* Reserved */
+	u32	hard_addr:24;	/* Hard Address */
+};
+
+/*
+ * Fabric Config Server
+ */
+
+/*
+ * Command codes for Fabric Configuration Server
+ */
+enum {
+	GS_FC_GFN_CMD	= 0x0114,	/* GS FC Get Fabric Name  */
+	GS_FC_GMAL_CMD	= 0x0116,	/* GS FC GMAL  */
+	GS_FC_TRACE_CMD	= 0x0400,	/* GS FC Trace Route */
+	GS_FC_PING_CMD	= 0x0401,	/* GS FC Ping */
+};
+
+/*
+ * Source or Destination Port Tags.
+ */
+enum {
+	GS_FTRACE_TAG_NPORT_ID		= 1,
+	GS_FTRACE_TAG_NPORT_NAME	= 2,
+};
+
+/*
+* Port Value : Could be a Port id or wwn
+ */
+union fcgs_port_val_u {
+	u32	nport_id;
+	wwn_t		nport_wwn;
+};
+
+#define GS_FTRACE_MAX_HOP_COUNT	20
+#define GS_FTRACE_REVISION	1
+
+/*
+ * Ftrace Related Structures.
+ */
+
+/*
+ * STR (Switch Trace) Reject Reason Codes. From FC-SW.
+ */
+enum {
+	GS_FTRACE_STR_CMD_COMPLETED_SUCC	= 0,
+	GS_FTRACE_STR_CMD_NOT_SUPP_IN_NEXT_SWITCH,
+	GS_FTRACE_STR_NO_RESP_FROM_NEXT_SWITCH,
+	GS_FTRACE_STR_MAX_HOP_CNT_REACHED,
+	GS_FTRACE_STR_SRC_PORT_NOT_FOUND,
+	GS_FTRACE_STR_DST_PORT_NOT_FOUND,
+	GS_FTRACE_STR_DEVICES_NOT_IN_COMMON_ZONE,
+	GS_FTRACE_STR_NO_ROUTE_BW_PORTS,
+	GS_FTRACE_STR_NO_ADDL_EXPLN,
+	GS_FTRACE_STR_FABRIC_BUSY,
+	GS_FTRACE_STR_FABRIC_BUILD_IN_PROGRESS,
+	GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_START = 0xf0,
+	GS_FTRACE_STR_VENDOR_SPECIFIC_ERR_END = 0xff,
+};
+
+/*
+ * Ftrace Request
+ */
+struct fcgs_ftrace_req_s {
+	u32	revision;
+	u16	src_port_tag;	/* Source Port tag */
+	u16	src_port_len;	/* Source Port len */
+	union fcgs_port_val_u src_port_val;	/* Source Port value */
+	u16	dst_port_tag;	/* Destination Port tag */
+	u16	dst_port_len;	/* Destination Port len */
+	union fcgs_port_val_u dst_port_val;	/* Destination Port value */
+	u32	token;
+	u8		vendor_id[8];	/* T10 Vendor Identifier */
+	u8		vendor_info[8];	/* Vendor specific Info */
+	u32	max_hop_cnt;	/* Max Hop Count */
+};
+
+/*
+ * Path info structure
+ */
+struct fcgs_ftrace_path_info_s {
+	wwn_t		switch_name;		/* Switch WWN */
+	u32	domain_id;
+	wwn_t		ingress_port_name;	/* Ingress ports wwn */
+	u32	ingress_phys_port_num;	/* Ingress ports physical port
+						 * number
+						 */
+	wwn_t		egress_port_name;	/* Ingress ports wwn */
+	u32	egress_phys_port_num;	/* Ingress ports physical port
+						 * number
+						 */
+};
+
+/*
+ * Ftrace Acc Response
+ */
+struct fcgs_ftrace_resp_s {
+	u32	revision;
+	u32	token;
+	u8		vendor_id[8];		/* T10 Vendor Identifier */
+	u8		vendor_info[8];		/* Vendor specific Info */
+	u32	str_rej_reason_code;	/* STR Reject Reason Code */
+	u32	num_path_info_entries;	/* No. of path info entries */
+	/*
+	 * path info entry/entries.
+	 */
+	struct fcgs_ftrace_path_info_s path_info[1];
+
+};
+
+/*
+* Fabric Config Server : FCPing
+ */
+
+/*
+ * FC Ping Request
+ */
+struct fcgs_fcping_req_s {
+	u32	revision;
+	u16	port_tag;
+	u16	port_len;	/* Port len */
+	union fcgs_port_val_u port_val;	/* Port value */
+	u32	token;
+};
+
+/*
+ * FC Ping Response
+ */
+struct fcgs_fcping_resp_s {
+	u32	token;
+};
+
+/*
+ * Command codes for zone server query.
+ */
+enum {
+	ZS_GZME = 0x0124,	/* Get zone member extended */
+};
+
+/*
+ * ZS GZME request
+ */
+#define ZS_GZME_ZNAMELEN	32
+struct zs_gzme_req_s {
+	u8	znamelen;
+	u8	rsvd[3];
+	u8	zname[ZS_GZME_ZNAMELEN];
+};
+
+enum zs_mbr_type {
+	ZS_MBR_TYPE_PWWN	= 1,
+	ZS_MBR_TYPE_DOMPORT	= 2,
+	ZS_MBR_TYPE_PORTID	= 3,
+	ZS_MBR_TYPE_NWWN	= 4,
+};
+
+struct zs_mbr_wwn_s {
+	u8	mbr_type;
+	u8	rsvd[3];
+	wwn_t	wwn;
+};
+
+struct zs_query_resp_s {
+	u32	nmbrs;	/*  number of zone members */
+	struct zs_mbr_wwn_s	mbr[1];
+};
+
+/*
+ * GMAL Command ( Get ( interconnect Element) Management Address List)
+ * To retrieve the IP Address of a Switch.
+ */
+
+#define CT_GMAL_RESP_PREFIX_TELNET	 "telnet://"
+#define CT_GMAL_RESP_PREFIX_HTTP	 "http://"
+
+/*  GMAL/GFN request */
+struct fcgs_req_s {
+	wwn_t    wwn;   /* PWWN/NWWN */
+};
+
+#define fcgs_gmal_req_t struct fcgs_req_s
+#define fcgs_gfn_req_t struct fcgs_req_s
+
+/* Accept Response to GMAL */
+struct fcgs_gmal_resp_s {
+	u32	ms_len;   /* Num of entries */
+	u8	ms_ma[256];
+};
+
+struct fcgs_gmal_entry_s {
+	u8  len;
+	u8  prefix[7]; /* like "http://" */
+	u8  ip_addr[248];
+};
+
+/*
+ * FDMI
+ */
+/*
+ * FDMI Command Codes
+ */
+#define	FDMI_GRHL		0x0100
+#define	FDMI_GHAT		0x0101
+#define	FDMI_GRPL		0x0102
+#define	FDMI_GPAT		0x0110
+#define	FDMI_RHBA		0x0200
+#define	FDMI_RHAT		0x0201
+#define	FDMI_RPRT		0x0210
+#define	FDMI_RPA		0x0211
+#define	FDMI_DHBA		0x0300
+#define	FDMI_DPRT		0x0310
+
+/*
+ * FDMI reason codes
+ */
+#define	FDMI_NO_ADDITIONAL_EXP		0x00
+#define	FDMI_HBA_ALREADY_REG		0x10
+#define	FDMI_HBA_ATTRIB_NOT_REG		0x11
+#define	FDMI_HBA_ATTRIB_MULTIPLE	0x12
+#define	FDMI_HBA_ATTRIB_LENGTH_INVALID	0x13
+#define	FDMI_HBA_ATTRIB_NOT_PRESENT	0x14
+#define	FDMI_PORT_ORIG_NOT_IN_LIST	0x15
+#define	FDMI_PORT_HBA_NOT_IN_LIST	0x16
+#define	FDMI_PORT_ATTRIB_NOT_REG	0x20
+#define	FDMI_PORT_NOT_REG		0x21
+#define	FDMI_PORT_ATTRIB_MULTIPLE	0x22
+#define	FDMI_PORT_ATTRIB_LENGTH_INVALID	0x23
+#define	FDMI_PORT_ALREADY_REGISTEREED	0x24
+
+/*
+ * FDMI Transmission Speed Mask values
+ */
+#define	FDMI_TRANS_SPEED_1G		0x00000001
+#define	FDMI_TRANS_SPEED_2G		0x00000002
+#define	FDMI_TRANS_SPEED_10G		0x00000004
+#define	FDMI_TRANS_SPEED_4G		0x00000008
+#define	FDMI_TRANS_SPEED_8G		0x00000010
+#define	FDMI_TRANS_SPEED_16G		0x00000020
+#define	FDMI_TRANS_SPEED_UNKNOWN	0x00008000
+
+/*
+ * FDMI HBA attribute types
+ */
+enum fdmi_hba_attribute_type {
+	FDMI_HBA_ATTRIB_NODENAME = 1,	/* 0x0001 */
+	FDMI_HBA_ATTRIB_MANUFACTURER,	/* 0x0002 */
+	FDMI_HBA_ATTRIB_SERIALNUM,	/* 0x0003 */
+	FDMI_HBA_ATTRIB_MODEL,		/* 0x0004 */
+	FDMI_HBA_ATTRIB_MODEL_DESC,	/* 0x0005 */
+	FDMI_HBA_ATTRIB_HW_VERSION,	/* 0x0006 */
+	FDMI_HBA_ATTRIB_DRIVER_VERSION,	/* 0x0007 */
+	FDMI_HBA_ATTRIB_ROM_VERSION,	/* 0x0008 */
+	FDMI_HBA_ATTRIB_FW_VERSION,	/* 0x0009 */
+	FDMI_HBA_ATTRIB_OS_NAME,	/* 0x000A */
+	FDMI_HBA_ATTRIB_MAX_CT,		/* 0x000B */
+
+	FDMI_HBA_ATTRIB_MAX_TYPE
+};
+
+/*
+ * FDMI Port attribute types
+ */
+enum fdmi_port_attribute_type {
+	FDMI_PORT_ATTRIB_FC4_TYPES = 1,	/* 0x0001 */
+	FDMI_PORT_ATTRIB_SUPP_SPEED,	/* 0x0002 */
+	FDMI_PORT_ATTRIB_PORT_SPEED,	/* 0x0003 */
+	FDMI_PORT_ATTRIB_FRAME_SIZE,	/* 0x0004 */
+	FDMI_PORT_ATTRIB_DEV_NAME,	/* 0x0005 */
+	FDMI_PORT_ATTRIB_HOST_NAME,	/* 0x0006 */
+
+	FDMI_PORT_ATTR_MAX_TYPE
+};
+
+/*
+ * FDMI attribute
+ */
+struct fdmi_attr_s {
+	u16        type;
+	u16        len;
+	u8         value[1];
+};
+
+/*
+ * HBA Attribute Block
+ */
+struct fdmi_hba_attr_s {
+	u32        attr_count;	/* # of attributes */
+	struct fdmi_attr_s hba_attr;	/* n attributes */
+};
+
+/*
+ * Registered Port List
+ */
+struct fdmi_port_list_s {
+	u32        num_ports;	/* number Of Port Entries */
+	wwn_t           port_entry;	/* one or more */
+};
+
+/*
+ * Port Attribute Block
+ */
+struct fdmi_port_attr_s {
+	u32        attr_count;	/* # of attributes */
+	struct fdmi_attr_s port_attr;	/* n attributes */
+};
+
+/*
+ * FDMI Register HBA Attributes
+ */
+struct fdmi_rhba_s {
+	wwn_t           hba_id;		/* HBA Identifier */
+	struct fdmi_port_list_s port_list;	/* Registered Port List */
+	struct fdmi_hba_attr_s hba_attr_blk;	/* HBA attribute block */
+};
+
+/*
+ * FDMI Register Port
+ */
+struct fdmi_rprt_s {
+	wwn_t           hba_id;		/* HBA Identifier */
+	wwn_t           port_name;	/* Port wwn */
+	struct fdmi_port_attr_s port_attr_blk;	/* Port Attr Block */
+};
+
+/*
+ * FDMI Register Port Attributes
+ */
+struct fdmi_rpa_s {
+	wwn_t           port_name;	/* port wwn */
+	struct fdmi_port_attr_s port_attr_blk;	/* Port Attr Block */
+};
+
+#pragma pack()
+
+#endif	/* __BFA_FC_H__ */

+ 127 - 166
drivers/scsi/bfa/fcbuild.c → drivers/scsi/bfa/bfa_fcbuild.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -18,25 +18,25 @@
  * fcbuild.c - FC link service frame building and parsing routines
  * fcbuild.c - FC link service frame building and parsing routines
  */
  */
 
 
-#include <bfa_os_inc.h>
-#include "fcbuild.h"
+#include "bfa_os_inc.h"
+#include "bfa_fcbuild.h"
 
 
 /*
 /*
  * static build functions
  * static build functions
  */
  */
-static void fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id);
-static void fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id);
-static struct fchs_s   fc_els_req_tmpl;
-static struct fchs_s   fc_els_rsp_tmpl;
-static struct fchs_s   fc_bls_req_tmpl;
-static struct fchs_s   fc_bls_rsp_tmpl;
+static void     fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+				 u16 ox_id);
+static void     fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+				 u16 ox_id);
+static struct fchs_s fc_els_req_tmpl;
+static struct fchs_s fc_els_rsp_tmpl;
+static struct fchs_s fc_bls_req_tmpl;
+static struct fchs_s fc_bls_rsp_tmpl;
 static struct fc_ba_acc_s ba_acc_tmpl;
 static struct fc_ba_acc_s ba_acc_tmpl;
 static struct fc_logi_s plogi_tmpl;
 static struct fc_logi_s plogi_tmpl;
 static struct fc_prli_s prli_tmpl;
 static struct fc_prli_s prli_tmpl;
 static struct fc_rrq_s rrq_tmpl;
 static struct fc_rrq_s rrq_tmpl;
-static struct fchs_s   fcp_fchs_tmpl;
+static struct fchs_s fcp_fchs_tmpl;
 
 
 void
 void
 fcbuild_init(void)
 fcbuild_init(void)
@@ -123,7 +123,7 @@ fcbuild_init(void)
 	rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
 	rrq_tmpl.els_cmd.els_code = FC_ELS_RRQ;
 
 
 	/*
 	/*
-	 * fcp_fchs_tmpl
+	 * fcp_struct fchs_s mpl
 	 */
 	 */
 	fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
 	fcp_fchs_tmpl.routing = FC_RTG_FC4_DEV_DATA;
 	fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
 	fcp_fchs_tmpl.cat_info = FC_CAT_UNSOLICIT_CMD;
@@ -135,8 +135,7 @@ fcbuild_init(void)
 }
 }
 
 
 static void
 static void
-fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u32 ox_id)
+fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 {
 {
 	bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
 	bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
 
 
@@ -158,8 +157,7 @@ fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 }
 }
 
 
 void
 void
-fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id)
+fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
 {
 	bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
 	bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = (d_id);
 	fchs->d_id = (d_id);
@@ -168,8 +166,7 @@ fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 }
 }
 
 
 static void
 static void
-fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id)
+fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
 {
 	bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
 	bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
 	fchs->d_id = d_id;
@@ -180,8 +177,8 @@ fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 enum fc_parse_status
 enum fc_parse_status
 fc_els_rsp_parse(struct fchs_s *fchs, int len)
 fc_els_rsp_parse(struct fchs_s *fchs, int len)
 {
 {
-	struct fc_els_cmd_s   *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-	struct fc_ls_rjt_s    *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_ls_rjt_s *ls_rjt = (struct fc_ls_rjt_s *) els_cmd;
 
 
 	len = len;
 	len = len;
 
 
@@ -199,8 +196,7 @@ fc_els_rsp_parse(struct fchs_s *fchs, int len)
 }
 }
 
 
 static void
 static void
-fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id)
+fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
 {
 	bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
 	bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
 	fchs->d_id = d_id;
@@ -213,7 +209,7 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 		 u16 ox_id, wwn_t port_name, wwn_t node_name,
 		 u16 ox_id, wwn_t port_name, wwn_t node_name,
 		 u16 pdu_size, u8 els_code)
 		 u16 pdu_size, u8 els_code)
 {
 {
-	struct fc_logi_s     *plogi = (struct fc_logi_s *) (pld);
+	struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
 
 
 	bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 	bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
 
@@ -233,12 +229,11 @@ fc_plogi_x_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 
 u16
 u16
 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
-		u16 ox_id, wwn_t port_name, wwn_t node_name,
-		u16 pdu_size, u8 set_npiv, u8 set_auth,
-		u16 local_bb_credits)
+		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size,
+	       u8 set_npiv, u8 set_auth, u16 local_bb_credits)
 {
 {
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
-	u32 	*vvl_info;
+	u32	*vvl_info;
 
 
 	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
 
@@ -292,8 +287,7 @@ fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 
 
 u16
 u16
 fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
 fc_fdisc_build(struct fchs_s *fchs, struct fc_logi_s *flogi, u32 s_id,
-		u16 ox_id, wwn_t port_name, wwn_t node_name,
-		u16 pdu_size)
+		u16 ox_id, wwn_t port_name, wwn_t node_name, u16 pdu_size)
 {
 {
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
 
 
@@ -330,9 +324,9 @@ fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 enum fc_parse_status
 enum fc_parse_status
 fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
 fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
 {
 {
-	struct fc_els_cmd_s   *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
-	struct fc_logi_s     *plogi;
-	struct fc_ls_rjt_s    *ls_rjt;
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_logi_s *plogi;
+	struct fc_ls_rjt_s *ls_rjt;
 
 
 	switch (els_cmd->els_code) {
 	switch (els_cmd->els_code) {
 	case FC_ELS_LS_RJT:
 	case FC_ELS_LS_RJT:
@@ -364,7 +358,7 @@ fc_plogi_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
 enum fc_parse_status
 enum fc_parse_status
 fc_plogi_parse(struct fchs_s *fchs)
 fc_plogi_parse(struct fchs_s *fchs)
 {
 {
-	struct fc_logi_s     *plogi = (struct fc_logi_s *) (fchs + 1);
+	struct fc_logi_s *plogi = (struct fc_logi_s *) (fchs + 1);
 
 
 	if (plogi->class3.class_valid != 1)
 	if (plogi->class3.class_valid != 1)
 		return FC_PARSE_FAILURE;
 		return FC_PARSE_FAILURE;
@@ -381,7 +375,7 @@ u16
 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 	      u16 ox_id)
 	      u16 ox_id)
 {
 {
-	struct fc_prli_s      *prli = (struct fc_prli_s *) (pld);
+	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
 	bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
@@ -398,19 +392,16 @@ fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 
 
 u16
 u16
 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
-		  u16 ox_id, enum bfa_port_role role)
+		  u16 ox_id, enum bfa_lport_role role)
 {
 {
-	struct fc_prli_s      *prli = (struct fc_prli_s *) (pld);
+	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
 	bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
 
 
 	prli->command = FC_ELS_ACC;
 	prli->command = FC_ELS_ACC;
 
 
-	if ((role & BFA_PORT_ROLE_FCP_TM) == BFA_PORT_ROLE_FCP_TM)
-		prli->parampage.servparams.target = 1;
-	else
-		prli->parampage.servparams.initiator = 1;
+	prli->parampage.servparams.initiator = 1;
 
 
 	prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
 	prli->parampage.rspcode = FC_PRLI_ACC_XQTD;
 
 
@@ -452,12 +443,12 @@ fc_prli_parse(struct fc_prli_s *prli)
 }
 }
 
 
 u16
 u16
-fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
-			u32 s_id, u16 ox_id, wwn_t port_name)
+fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id, u32 s_id,
+	      u16 ox_id, wwn_t port_name)
 {
 {
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
 
-	memset(logo, '\0', sizeof(struct fc_logo_s));
+	bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
 	logo->els_cmd.els_code = FC_ELS_LOGO;
 	logo->els_cmd.els_code = FC_ELS_LOGO;
 	logo->nport_id = (s_id);
 	logo->nport_id = (s_id);
 	logo->orig_port_name = port_name;
 	logo->orig_port_name = port_name;
@@ -470,7 +461,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 		 u32 s_id, u16 ox_id, wwn_t port_name,
 		 u32 s_id, u16 ox_id, wwn_t port_name,
 		 wwn_t node_name, u8 els_code)
 		 wwn_t node_name, u8 els_code)
 {
 {
-	memset(adisc, '\0', sizeof(struct fc_adisc_s));
+	bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
 
 
 	adisc->els_cmd.els_code = els_code;
 	adisc->els_cmd.els_code = els_code;
 
 
@@ -489,8 +480,7 @@ fc_adisc_x_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 
 
 u16
 u16
 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
 fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc, u32 d_id,
-		u32 s_id, u16 ox_id, wwn_t port_name,
-		wwn_t node_name)
+		u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name)
 {
 {
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
 	return fc_adisc_x_build(fchs, adisc, d_id, s_id, ox_id, port_name,
 				node_name, FC_ELS_ADISC);
 				node_name, FC_ELS_ADISC);
@@ -523,10 +513,10 @@ fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len, wwn_t port_name,
 }
 }
 
 
 enum fc_parse_status
 enum fc_parse_status
-fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
-		 wwn_t node_name, wwn_t port_name)
+fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap, wwn_t node_name,
+	       wwn_t port_name)
 {
 {
-	struct fc_adisc_s     *adisc = (struct fc_adisc_s *) pld;
+	struct fc_adisc_s *adisc = (struct fc_adisc_s *) pld;
 
 
 	if (adisc->els_cmd.els_code != FC_ELS_ACC)
 	if (adisc->els_cmd.els_code != FC_ELS_ACC)
 		return FC_PARSE_FAILURE;
 		return FC_PARSE_FAILURE;
@@ -542,13 +532,13 @@ fc_adisc_parse(struct fchs_s *fchs, void *pld, u32 host_dap,
 enum fc_parse_status
 enum fc_parse_status
 fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
 fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name, wwn_t port_name)
 {
 {
-	struct fc_logi_s     *pdisc = (struct fc_logi_s *) (fchs + 1);
+	struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
 
 
 	if (pdisc->class3.class_valid != 1)
 	if (pdisc->class3.class_valid != 1)
 		return FC_PARSE_FAILURE;
 		return FC_PARSE_FAILURE;
 
 
 	if ((bfa_os_ntohs(pdisc->class3.rxsz) <
 	if ((bfa_os_ntohs(pdisc->class3.rxsz) <
-		 (FC_MIN_PDUSZ - sizeof(struct fchs_s)))
+		(FC_MIN_PDUSZ - sizeof(struct fchs_s)))
 	    || (pdisc->class3.rxsz == 0))
 	    || (pdisc->class3.rxsz == 0))
 		return FC_PARSE_FAILURE;
 		return FC_PARSE_FAILURE;
 
 
@@ -584,8 +574,8 @@ fc_abts_rsp_parse(struct fchs_s *fchs, int len)
 }
 }
 
 
 u16
 u16
-fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id,
-			 u32 s_id, u16 ox_id, u16 rrq_oxid)
+fc_rrq_build(struct fchs_s *fchs, struct fc_rrq_s *rrq, u32 d_id, u32 s_id,
+	     u16 ox_id, u16 rrq_oxid)
 {
 {
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
 
@@ -604,11 +594,11 @@ u16
 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id, u32 s_id,
 		  u16 ox_id)
 		  u16 ox_id)
 {
 {
-	struct fc_els_cmd_s   *acc = pld;
+	struct fc_els_cmd_s *acc = pld;
 
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
 
-	memset(acc, 0, sizeof(struct fc_els_cmd_s));
+	bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
 	acc->els_code = FC_ELS_ACC;
 	acc->els_code = FC_ELS_ACC;
 
 
 	return sizeof(struct fc_els_cmd_s);
 	return sizeof(struct fc_els_cmd_s);
@@ -620,7 +610,7 @@ fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt, u32 d_id,
 		u8 reason_code_expl)
 		u8 reason_code_expl)
 {
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-	memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
+	bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
 
 
 	ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
 	ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
 	ls_rjt->reason_code = reason_code;
 	ls_rjt->reason_code = reason_code;
@@ -647,11 +637,11 @@ fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
 }
 }
 
 
 u16
 u16
-fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
-			u32 d_id, u32 s_id, u16 ox_id)
+fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd, u32 d_id,
+		u32 s_id, u16 ox_id)
 {
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-	memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
+	bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
 	els_cmd->els_code = FC_ELS_ACC;
 	els_cmd->els_code = FC_ELS_ACC;
 
 
 	return sizeof(struct fc_els_cmd_s);
 	return sizeof(struct fc_els_cmd_s);
@@ -661,8 +651,8 @@ int
 fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
 fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
 {
 {
 	int             num_pages = 0;
 	int             num_pages = 0;
-	struct fc_prlo_s      *prlo;
-	struct fc_tprlo_s     *tprlo;
+	struct fc_prlo_s *prlo;
+	struct fc_tprlo_s *tprlo;
 
 
 	if (els_code == FC_ELS_PRLO) {
 	if (els_code == FC_ELS_PRLO) {
 		prlo = (struct fc_prlo_s *) (fc_frame + 1);
 		prlo = (struct fc_prlo_s *) (fc_frame + 1);
@@ -676,14 +666,13 @@ fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code)
 
 
 u16
 u16
 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
-			u32 d_id, u32 s_id, u16 ox_id,
-			int num_pages)
+		u32 d_id, u32 s_id, u16 ox_id, int num_pages)
 {
 {
 	int             page;
 	int             page;
 
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
 
-	memset(tprlo_acc, 0, (num_pages * 16) + 4);
+	bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
 	tprlo_acc->command = FC_ELS_ACC;
 	tprlo_acc->command = FC_ELS_ACC;
 
 
 	tprlo_acc->page_len = 0x10;
 	tprlo_acc->page_len = 0x10;
@@ -700,15 +689,14 @@ fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
 }
 }
 
 
 u16
 u16
-fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
-			u32 d_id, u32 s_id, u16 ox_id,
-			int num_pages)
+fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc, u32 d_id,
+		  u32 s_id, u16 ox_id, int num_pages)
 {
 {
 	int             page;
 	int             page;
 
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
 
-	memset(prlo_acc, 0, (num_pages * 16) + 4);
+	bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
 	prlo_acc->command = FC_ELS_ACC;
 	prlo_acc->command = FC_ELS_ACC;
 	prlo_acc->page_len = 0x10;
 	prlo_acc->page_len = 0x10;
 	prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
 	prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -726,11 +714,11 @@ fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
 
 
 u16
 u16
 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
 fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
-			u32 s_id, u16 ox_id, u32 data_format)
+		u32 s_id, u16 ox_id, u32 data_format)
 {
 {
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
 
-	memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
+	bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
 
 
 	rnid->els_cmd.els_code = FC_ELS_RNID;
 	rnid->els_cmd.els_code = FC_ELS_RNID;
 	rnid->node_id_data_format = data_format;
 	rnid->node_id_data_format = data_format;
@@ -739,13 +727,12 @@ fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid, u32 d_id,
 }
 }
 
 
 u16
 u16
-fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
-			u32 d_id, u32 s_id, u16 ox_id,
-			u32 data_format,
-			struct fc_rnid_common_id_data_s *common_id_data,
-			struct fc_rnid_general_topology_data_s *gen_topo_data)
+fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc, u32 d_id,
+		  u32 s_id, u16 ox_id, u32 data_format,
+		  struct fc_rnid_common_id_data_s *common_id_data,
+		  struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
 {
-	memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
+	bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
 
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
 
@@ -769,27 +756,26 @@ fc_rnid_acc_build(struct fchs_s *fchs, struct fc_rnid_acc_s *rnid_acc,
 
 
 u16
 u16
 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
 fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc, u32 d_id,
-			u32 s_id, u16 ox_id)
+		u32 s_id, u16 ox_id)
 {
 {
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
 
-	memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
+	bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
 
 
 	rpsc->els_cmd.els_code = FC_ELS_RPSC;
 	rpsc->els_cmd.els_code = FC_ELS_RPSC;
 	return sizeof(struct fc_rpsc_cmd_s);
 	return sizeof(struct fc_rpsc_cmd_s);
 }
 }
 
 
 u16
 u16
-fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
-			u32 d_id, u32 s_id, u32 *pid_list,
-			u16 npids)
+fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2, u32 d_id,
+		u32 s_id, u32 *pid_list, u16 npids)
 {
 {
 	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
 	u32 dctlr_id = FC_DOMAIN_CTRLR(bfa_os_hton3b(d_id));
 	int i = 0;
 	int i = 0;
 
 
 	fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
 	fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
 
 
-	memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
+	bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
 
 
 	rpsc2->els_cmd.els_code = FC_ELS_RPSC;
 	rpsc2->els_cmd.els_code = FC_ELS_RPSC;
 	rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
 	rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
@@ -797,16 +783,15 @@ fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rpsc2,
 	for (i = 0; i < npids; i++)
 	for (i = 0; i < npids; i++)
 		rpsc2->pid_list[i].pid = pid_list[i];
 		rpsc2->pid_list[i].pid = pid_list[i];
 
 
-	return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) *
-			(sizeof(u32)));
+	return sizeof(struct fc_rpsc2_cmd_s) + ((npids - 1) * (sizeof(u32)));
 }
 }
 
 
 u16
 u16
 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
 fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
-			u32 d_id, u32 s_id, u16 ox_id,
-			struct fc_rpsc_speed_info_s *oper_speed)
+		u32 d_id, u32 s_id, u16 ox_id,
+		  struct fc_rpsc_speed_info_s *oper_speed)
 {
 {
-	memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
+	bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
 
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
 
@@ -820,7 +805,6 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
 		bfa_os_htons(oper_speed->port_op_speed);
 		bfa_os_htons(oper_speed->port_op_speed);
 
 
 	return sizeof(struct fc_rpsc_acc_s);
 	return sizeof(struct fc_rpsc_acc_s);
-
 }
 }
 
 
 /*
 /*
@@ -831,7 +815,7 @@ fc_rpsc_acc_build(struct fchs_s *fchs, struct fc_rpsc_acc_s *rpsc_acc,
 u16
 u16
 fc_logo_rsp_parse(struct fchs_s *fchs, int len)
 fc_logo_rsp_parse(struct fchs_s *fchs, int len)
 {
 {
-	struct fc_els_cmd_s   *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
 
 
 	len = len;
 	len = len;
 	if (els_cmd->els_code != FC_ELS_ACC)
 	if (els_cmd->els_code != FC_ELS_ACC)
@@ -841,11 +825,10 @@ fc_logo_rsp_parse(struct fchs_s *fchs, int len)
 }
 }
 
 
 u16
 u16
-fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id, wwn_t port_name, wwn_t node_name,
-			u16 pdu_size)
+fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+	       wwn_t port_name, wwn_t node_name, u16 pdu_size)
 {
 {
-	struct fc_logi_s     *pdisc = (struct fc_logi_s *) (fchs + 1);
+	struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
 
 
 	bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
 	bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
 
 
@@ -862,7 +845,7 @@ fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
 u16
 u16
 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name)
 {
 {
-	struct fc_logi_s     *pdisc = (struct fc_logi_s *) (fchs + 1);
+	struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
 
 
 	if (len < sizeof(struct fc_logi_s))
 	if (len < sizeof(struct fc_logi_s))
 		return FC_PARSE_LEN_INVAL;
 		return FC_PARSE_LEN_INVAL;
@@ -886,11 +869,11 @@ u16
 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 	      int num_pages)
 	      int num_pages)
 {
 {
-	struct fc_prlo_s      *prlo = (struct fc_prlo_s *) (fchs + 1);
+	struct fc_prlo_s *prlo = (struct fc_prlo_s *) (fchs + 1);
 	int             page;
 	int             page;
 
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
-	memset(prlo, 0, (num_pages * 16) + 4);
+	bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
 	prlo->command = FC_ELS_PRLO;
 	prlo->command = FC_ELS_PRLO;
 	prlo->page_len = 0x10;
 	prlo->page_len = 0x10;
 	prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
 	prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -909,7 +892,7 @@ fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
 u16
 u16
 fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
 fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
 {
 {
-	struct fc_prlo_acc_s  *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
+	struct fc_prlo_acc_s *prlo = (struct fc_prlo_acc_s *) (fchs + 1);
 	int             num_pages = 0;
 	int             num_pages = 0;
 	int             page = 0;
 	int             page = 0;
 
 
@@ -941,15 +924,14 @@ fc_prlo_rsp_parse(struct fchs_s *fchs, int len)
 }
 }
 
 
 u16
 u16
-fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id, int num_pages,
-			enum fc_tprlo_type tprlo_type, u32 tpr_id)
+fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+	       int num_pages, enum fc_tprlo_type tprlo_type, u32 tpr_id)
 {
 {
-	struct fc_tprlo_s     *tprlo = (struct fc_tprlo_s *) (fchs + 1);
+	struct fc_tprlo_s *tprlo = (struct fc_tprlo_s *) (fchs + 1);
 	int             page;
 	int             page;
 
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
-	memset(tprlo, 0, (num_pages * 16) + 4);
+	bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
 	tprlo->command = FC_ELS_TPRLO;
 	tprlo->command = FC_ELS_TPRLO;
 	tprlo->page_len = 0x10;
 	tprlo->page_len = 0x10;
 	tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
 	tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
@@ -1003,7 +985,7 @@ fc_tprlo_rsp_parse(struct fchs_s *fchs, int len)
 enum fc_parse_status
 enum fc_parse_status
 fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
 fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
 {
 {
-	struct fc_els_cmd_s   *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
 
 
 	len = len;
 	len = len;
 	if (els_cmd->els_code != FC_ELS_ACC)
 	if (els_cmd->els_code != FC_ELS_ACC)
@@ -1013,11 +995,10 @@ fc_rrq_rsp_parse(struct fchs_s *fchs, int len)
 }
 }
 
 
 u16
 u16
-fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
-			u16 ox_id, u32 reason_code,
-			u32 reason_expl)
+fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id,
+		u32 reason_code, u32 reason_expl)
 {
 {
-	struct fc_ba_rjt_s    *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
+	struct fc_ba_rjt_s *ba_rjt = (struct fc_ba_rjt_s *) (fchs + 1);
 
 
 	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
 	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
 
 
@@ -1062,10 +1043,8 @@ u16
 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	       wwn_t port_name)
 	       wwn_t port_name)
 {
 {
-
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_gidpn_req_s *gidpn =
-			(struct fcgs_gidpn_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_gidpn_req_s *gidpn = (struct fcgs_gidpn_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1080,8 +1059,7 @@ u16
 fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 fc_gpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	       u32 port_id)
 	       u32 port_id)
 {
 {
-
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
 	fcgs_gpnid_req_t *gpnid = (fcgs_gpnid_req_t *) (cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
@@ -1097,8 +1075,7 @@ u16
 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	       u32 port_id)
 	       u32 port_id)
 {
 {
-
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
 	fcgs_gnnid_req_t *gnnid = (fcgs_gnnid_req_t *) (cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
@@ -1124,8 +1101,8 @@ fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
 }
 }
 
 
 u16
 u16
-fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
-			u32 s_id, u16 ox_id)
+fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
+		u8 set_br_reg, u32 s_id, u16 ox_id)
 {
 {
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
 
 
@@ -1141,8 +1118,8 @@ fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr, u8 set_br_reg,
 }
 }
 
 
 u16
 u16
-fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
-			u16 ox_id)
+fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn,
+		u32 s_id, u16 ox_id)
 {
 {
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_CONTROLLER);
 	u16        payldlen;
 	u16        payldlen;
@@ -1162,11 +1139,10 @@ fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
 
 
 u16
 u16
 fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
-	       enum bfa_port_role roles)
+	       enum bfa_lport_role roles)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_rftid_req_s *rftid =
-			(struct fcgs_rftid_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
 	u32        type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        type_value, d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u8         index;
 	u8         index;
 
 
@@ -1182,23 +1158,15 @@ fc_rftid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	type_value = 1 << (FC_TYPE_FCP % 32);
 	type_value = 1 << (FC_TYPE_FCP % 32);
 	rftid->fc4_type[index] = bfa_os_htonl(type_value);
 	rftid->fc4_type[index] = bfa_os_htonl(type_value);
 
 
-	if (roles & BFA_PORT_ROLE_FCP_IPFC) {
-		index = FC_TYPE_IP >> 5;
-		type_value = 1 << (FC_TYPE_IP % 32);
-		rftid->fc4_type[index] |= bfa_os_htonl(type_value);
-	}
-
 	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 }
 }
 
 
 u16
 u16
-fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
-			u16 ox_id, u8 *fc4_bitmap,
-			u32 bitmap_size)
+fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+		   u8 *fc4_bitmap, u32 bitmap_size)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_rftid_req_s *rftid =
-			(struct fcgs_rftid_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rftid_req_s *rftid = (struct fcgs_rftid_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1208,7 +1176,7 @@ fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
 
 
 	rftid->dap = s_id;
 	rftid->dap = s_id;
 	bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
 	bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
-			(bitmap_size < 32 ? bitmap_size : 32));
+		(bitmap_size < 32 ? bitmap_size : 32));
 
 
 	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 }
 }
@@ -1217,9 +1185,8 @@ u16
 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 	       u8 fc4_type, u8 fc4_ftrs)
 	       u8 fc4_type, u8 fc4_ftrs)
 {
 {
-	struct ct_hdr_s         *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_rffid_req_s *rffid =
-			(struct fcgs_rffid_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rffid_req_s *rffid = (struct fcgs_rffid_req_s *)(cthdr + 1);
 	u32         d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32         d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1227,9 +1194,9 @@ fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 
 
 	bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
 	bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
 
 
-	rffid->dap 		 	= s_id;
+	rffid->dap	    = s_id;
 	rffid->fc4ftr_bits  = fc4_ftrs;
 	rffid->fc4ftr_bits  = fc4_ftrs;
-	rffid->fc4_type		= fc4_type;
+	rffid->fc4_type	    = fc4_type;
 
 
 	return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
 	return sizeof(struct fcgs_rffid_req_s) + sizeof(struct ct_hdr_s);
 }
 }
@@ -1239,9 +1206,9 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 		u8 *name)
 		u8 *name)
 {
 {
 
 
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rspnid_req_s *rspnid =
 	struct fcgs_rspnid_req_s *rspnid =
-			(struct fcgs_rspnid_req_s *) (cthdr + 1);
+			(struct fcgs_rspnid_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
@@ -1257,13 +1224,11 @@ fc_rspnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
 }
 }
 
 
 u16
 u16
-fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id,
-			u8 fc4_type)
+fc_gid_ft_build(struct fchs_s *fchs, void *pyld, u32 s_id, u8 fc4_type)
 {
 {
 
 
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_gidft_req_s *gidft =
-			(struct fcgs_gidft_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_gidft_req_s *gidft = (struct fcgs_gidft_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1282,9 +1247,8 @@ u16
 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	       wwn_t port_name)
 	       wwn_t port_name)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_rpnid_req_s *rpnid =
-			(struct fcgs_rpnid_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rpnid_req_s *rpnid = (struct fcgs_rpnid_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1301,9 +1265,8 @@ u16
 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	       wwn_t node_name)
 	       wwn_t node_name)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_rnnid_req_s *rnnid =
-			(struct fcgs_rnnid_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rnnid_req_s *rnnid = (struct fcgs_rnnid_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1320,7 +1283,7 @@ u16
 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	       u32 cos)
 	       u32 cos)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	struct fcgs_rcsid_req_s *rcsid =
 	struct fcgs_rcsid_req_s *rcsid =
 			(struct fcgs_rcsid_req_s *) (cthdr + 1);
 			(struct fcgs_rcsid_req_s *) (cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
@@ -1339,9 +1302,8 @@ u16
 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 	       u8 port_type)
 	       u8 port_type)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_rptid_req_s *rptid =
-			(struct fcgs_rptid_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_rptid_req_s *rptid = (struct fcgs_rptid_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1357,9 +1319,8 @@ fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id,
 u16
 u16
 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
 fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id, u32 port_id)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
-	struct fcgs_ganxt_req_s *ganxt =
-			(struct fcgs_ganxt_req_s *) (cthdr + 1);
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
+	struct fcgs_ganxt_req_s *ganxt = (struct fcgs_ganxt_req_s *)(cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_NAME_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1379,7 +1340,7 @@ fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
 		     u16 cmd_code)
 		     u16 cmd_code)
 {
 {
 
 
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
 
 
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
@@ -1409,12 +1370,12 @@ fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask)
 }
 }
 
 
 /*
 /*
- * GMAL Request
+ *	GMAL Request
  */
  */
 u16
 u16
 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
 	fcgs_gmal_req_t *gmal = (fcgs_gmal_req_t *) (cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
 
 
@@ -1434,7 +1395,7 @@ fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 u16
 u16
 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn)
 {
 {
-	struct ct_hdr_s       *cthdr = (struct ct_hdr_s *) pyld;
+	struct ct_hdr_s *cthdr = (struct ct_hdr_s *) pyld;
 	fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
 	fcgs_gfn_req_t *gfn = (fcgs_gfn_req_t *) (cthdr + 1);
 	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
 	u32        d_id = bfa_os_hton3b(FC_MGMT_SERVER);
 
 

+ 316 - 0
drivers/scsi/bfa/bfa_fcbuild.h

@@ -0,0 +1,316 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+/*
+ * fcbuild.h - FC link service frame building and parsing routines
+ */
+
+#ifndef __FCBUILD_H__
+#define __FCBUILD_H__
+
+#include "bfa_os_inc.h"
+#include "bfa_fc.h"
+#include "bfa_defs_fcs.h"
+
+/*
+ * Utility Macros/functions
+ */
+
+#define wwn_is_equal(_wwn1, _wwn2)		\
+	(memcmp(&(_wwn1), &(_wwn2), sizeof(wwn_t)) == 0)
+
+#define fc_roundup(_l, _s) (((_l) + ((_s) - 1)) & ~((_s) - 1))
+
+/*
+ * Given the fc response length, this routine will return
+ * the length of the actual payload bytes following the CT header.
+ *
+ * Assumes the input response length does not include the crc, eof, etc.
+ */
+static inline   u32
+fc_get_ctresp_pyld_len(u32 resp_len)
+{
+	return resp_len - sizeof(struct ct_hdr_s);
+}
+
+/*
+ * Convert bfa speed to rpsc speed value.
+ */
+static inline  enum bfa_port_speed
+fc_rpsc_operspeed_to_bfa_speed(enum fc_rpsc_op_speed speed)
+{
+	switch (speed) {
+
+	case RPSC_OP_SPEED_1G:
+		return BFA_PORT_SPEED_1GBPS;
+
+	case RPSC_OP_SPEED_2G:
+		return BFA_PORT_SPEED_2GBPS;
+
+	case RPSC_OP_SPEED_4G:
+		return BFA_PORT_SPEED_4GBPS;
+
+	case RPSC_OP_SPEED_8G:
+		return BFA_PORT_SPEED_8GBPS;
+
+	case RPSC_OP_SPEED_10G:
+		return BFA_PORT_SPEED_10GBPS;
+
+	default:
+		return BFA_PORT_SPEED_UNKNOWN;
+	}
+}
+
+/*
+ * Convert RPSC speed to bfa speed value.
+ */
+static inline   enum fc_rpsc_op_speed
+fc_bfa_speed_to_rpsc_operspeed(enum bfa_port_speed op_speed)
+{
+	switch (op_speed) {
+
+	case BFA_PORT_SPEED_1GBPS:
+		return RPSC_OP_SPEED_1G;
+
+	case BFA_PORT_SPEED_2GBPS:
+		return RPSC_OP_SPEED_2G;
+
+	case BFA_PORT_SPEED_4GBPS:
+		return RPSC_OP_SPEED_4G;
+
+	case BFA_PORT_SPEED_8GBPS:
+		return RPSC_OP_SPEED_8G;
+
+	case BFA_PORT_SPEED_10GBPS:
+		return RPSC_OP_SPEED_10G;
+
+	default:
+		return RPSC_OP_SPEED_NOT_EST;
+	}
+}
+
+enum fc_parse_status {
+	FC_PARSE_OK = 0,
+	FC_PARSE_FAILURE = 1,
+	FC_PARSE_BUSY = 2,
+	FC_PARSE_LEN_INVAL,
+	FC_PARSE_ACC_INVAL,
+	FC_PARSE_PWWN_NOT_EQUAL,
+	FC_PARSE_NWWN_NOT_EQUAL,
+	FC_PARSE_RXSZ_INVAL,
+	FC_PARSE_NOT_FCP,
+	FC_PARSE_OPAFLAG_INVAL,
+	FC_PARSE_RPAFLAG_INVAL,
+	FC_PARSE_OPA_INVAL,
+	FC_PARSE_RPA_INVAL,
+
+};
+
+struct fc_templates_s {
+	struct fchs_s fc_els_req;
+	struct fchs_s fc_bls_req;
+	struct fc_logi_s plogi;
+	struct fc_rrq_s rrq;
+};
+
+void            fcbuild_init(void);
+
+u16        fc_flogi_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
+			u32 s_id, u16 ox_id, wwn_t port_name, wwn_t node_name,
+			       u16 pdu_size, u8 set_npiv, u8 set_auth,
+			       u16 local_bb_credits);
+
+u16        fc_fdisc_build(struct fchs_s *buf, struct fc_logi_s *flogi, u32 s_id,
+			       u16 ox_id, wwn_t port_name, wwn_t node_name,
+			       u16 pdu_size);
+
+u16        fc_flogi_acc_build(struct fchs_s *fchs, struct fc_logi_s *flogi,
+				   u32 s_id, u16 ox_id,
+				   wwn_t port_name, wwn_t node_name,
+				   u16 pdu_size,
+				   u16 local_bb_credits);
+
+u16        fc_plogi_build(struct fchs_s *fchs, void *pld, u32 d_id,
+			       u32 s_id, u16 ox_id, wwn_t port_name,
+			       wwn_t node_name, u16 pdu_size);
+
+enum fc_parse_status fc_plogi_parse(struct fchs_s *fchs);
+
+u16        fc_abts_build(struct fchs_s *buf, u32 d_id, u32 s_id,
+			      u16 ox_id);
+
+enum fc_parse_status fc_abts_rsp_parse(struct fchs_s *buf, int len);
+
+u16        fc_rrq_build(struct fchs_s *buf, struct fc_rrq_s *rrq, u32 d_id,
+			     u32 s_id, u16 ox_id, u16 rrq_oxid);
+enum fc_parse_status fc_rrq_rsp_parse(struct fchs_s *buf, int len);
+
+u16        fc_rspnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+				u16 ox_id, u8 *name);
+
+u16        fc_rftid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+			       u16 ox_id, enum bfa_lport_role role);
+
+u16       fc_rftid_build_sol(struct fchs_s *fchs, void *pyld, u32 s_id,
+				   u16 ox_id, u8 *fc4_bitmap,
+				   u32 bitmap_size);
+
+u16	fc_rffid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			u16 ox_id, u8 fc4_type, u8 fc4_ftrs);
+
+u16        fc_gidpn_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u16 ox_id, wwn_t port_name);
+
+u16        fc_gpnid_build(struct fchs_s *fchs, void *pld, u32 s_id,
+			       u16 ox_id, u32 port_id);
+
+u16        fc_scr_build(struct fchs_s *fchs, struct fc_scr_s *scr,
+			u8 set_br_reg, u32 s_id, u16 ox_id);
+
+u16        fc_plogi_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+				   u32 s_id, u16 ox_id,
+				   wwn_t port_name, wwn_t node_name,
+				   u16 pdu_size);
+
+u16        fc_adisc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
+			u32 d_id, u32 s_id, u16 ox_id, wwn_t port_name,
+			       wwn_t node_name);
+
+enum fc_parse_status fc_adisc_parse(struct fchs_s *fchs, void *pld,
+			u32 host_dap, wwn_t node_name, wwn_t port_name);
+
+enum fc_parse_status fc_adisc_rsp_parse(struct fc_adisc_s *adisc, int len,
+				 wwn_t port_name, wwn_t node_name);
+
+u16        fc_adisc_acc_build(struct fchs_s *fchs, struct fc_adisc_s *adisc,
+				   u32 d_id, u32 s_id, u16 ox_id,
+				   wwn_t port_name, wwn_t node_name);
+u16        fc_ls_rjt_build(struct fchs_s *fchs, struct fc_ls_rjt_s *ls_rjt,
+				u32 d_id, u32 s_id, u16 ox_id,
+				u8 reason_code, u8 reason_code_expl);
+u16        fc_ls_acc_build(struct fchs_s *fchs, struct fc_els_cmd_s *els_cmd,
+				u32 d_id, u32 s_id, u16 ox_id);
+u16        fc_prli_build(struct fchs_s *fchs, void *pld, u32 d_id,
+			      u32 s_id, u16 ox_id);
+
+enum fc_parse_status fc_prli_rsp_parse(struct fc_prli_s *prli, int len);
+
+u16        fc_prli_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+				  u32 s_id, u16 ox_id,
+				  enum bfa_lport_role role);
+
+u16        fc_rnid_build(struct fchs_s *fchs, struct fc_rnid_cmd_s *rnid,
+			      u32 d_id, u32 s_id, u16 ox_id,
+			      u32 data_format);
+
+u16        fc_rnid_acc_build(struct fchs_s *fchs,
+			struct fc_rnid_acc_s *rnid_acc, u32 d_id, u32 s_id,
+			u16 ox_id, u32 data_format,
+			struct fc_rnid_common_id_data_s *common_id_data,
+			struct fc_rnid_general_topology_data_s *gen_topo_data);
+
+u16	fc_rpsc2_build(struct fchs_s *fchs, struct fc_rpsc2_cmd_s *rps2c,
+			u32 d_id, u32 s_id, u32 *pid_list, u16 npids);
+u16        fc_rpsc_build(struct fchs_s *fchs, struct fc_rpsc_cmd_s *rpsc,
+			      u32 d_id, u32 s_id, u16 ox_id);
+u16        fc_rpsc_acc_build(struct fchs_s *fchs,
+			struct fc_rpsc_acc_s *rpsc_acc, u32 d_id, u32 s_id,
+			u16 ox_id, struct fc_rpsc_speed_info_s *oper_speed);
+u16        fc_gid_ft_build(struct fchs_s *fchs, void *pld, u32 s_id,
+				u8 fc4_type);
+
+u16        fc_rpnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, wwn_t port_name);
+
+u16        fc_rnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, wwn_t node_name);
+
+u16        fc_rcsid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, u32 cos);
+
+u16        fc_rptid_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id, u8 port_type);
+
+u16        fc_ganxt_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+			       u32 port_id);
+
+u16        fc_logo_build(struct fchs_s *fchs, struct fc_logo_s *logo, u32 d_id,
+			      u32 s_id, u16 ox_id, wwn_t port_name);
+
+u16        fc_logo_acc_build(struct fchs_s *fchs, void *pld, u32 d_id,
+				  u32 s_id, u16 ox_id);
+
+u16        fc_fdmi_reqhdr_build(struct fchs_s *fchs, void *pyld, u32 s_id,
+				     u16 cmd_code);
+u16	fc_gmal_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
+u16	fc_gfn_req_build(struct fchs_s *fchs, void *pyld, u32 s_id, wwn_t wwn);
+
+void		fc_get_fc4type_bitmask(u8 fc4_type, u8 *bit_mask);
+
+void		fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+					 u16 ox_id);
+
+enum fc_parse_status	fc_els_rsp_parse(struct fchs_s *fchs, int len);
+
+enum fc_parse_status	fc_plogi_rsp_parse(struct fchs_s *fchs, int len,
+					wwn_t port_name);
+
+enum fc_parse_status	fc_prli_parse(struct fc_prli_s *prli);
+
+enum fc_parse_status	fc_pdisc_parse(struct fchs_s *fchs, wwn_t node_name,
+					wwn_t port_name);
+
+u16 fc_ba_acc_build(struct fchs_s *fchs, struct fc_ba_acc_s *ba_acc, u32 d_id,
+		u32 s_id, u16 ox_id, u16 rx_id);
+
+int fc_logout_params_pages(struct fchs_s *fc_frame, u8 els_code);
+
+u16 fc_tprlo_acc_build(struct fchs_s *fchs, struct fc_tprlo_acc_s *tprlo_acc,
+		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+
+u16 fc_prlo_acc_build(struct fchs_s *fchs, struct fc_prlo_acc_s *prlo_acc,
+		u32 d_id, u32 s_id, u16 ox_id, int num_pages);
+
+u16 fc_logo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_pdisc_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		u16 ox_id, wwn_t port_name, wwn_t node_name,
+		u16 pdu_size);
+
+u16 fc_pdisc_rsp_parse(struct fchs_s *fchs, int len, wwn_t port_name);
+
+u16 fc_prlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		u16 ox_id, int num_pages);
+
+u16 fc_prlo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_tprlo_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		u16 ox_id, int num_pages, enum fc_tprlo_type tprlo_type,
+		u32 tpr_id);
+
+u16 fc_tprlo_rsp_parse(struct fchs_s *fchs, int len);
+
+u16 fc_ba_rjt_build(struct fchs_s *fchs, u32 d_id, u32 s_id,
+		u16 ox_id, u32 reason_code, u32 reason_expl);
+
+u16 fc_gnnid_build(struct fchs_s *fchs, void *pyld, u32 s_id, u16 ox_id,
+		u32 port_id);
+
+u16 fc_ct_rsp_parse(struct ct_hdr_s *cthdr);
+
+u16 fc_rscn_build(struct fchs_s *fchs, struct fc_rscn_pl_s *rscn, u32 s_id,
+		u16 ox_id);
+#endif

+ 3439 - 21
drivers/scsi/bfa/bfa_fcpim.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,18 +15,291 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#include <bfa.h>
-#include <log/bfa_log_hal.h>
+#include "bfa_modules.h"
+#include "bfa_cb_ioim.h"
 
 
 BFA_TRC_FILE(HAL, FCPIM);
 BFA_TRC_FILE(HAL, FCPIM);
 BFA_MODULE(fcpim);
 BFA_MODULE(fcpim);
 
 
+
+#define bfa_fcpim_add_iostats(__l, __r, __stats)	\
+	(__l->__stats += __r->__stats)
+
+
+/**
+ *  BFA ITNIM Related definitions
+ */
+static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
+
+#define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
+	(((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1))))
+
+#define bfa_fcpim_additn(__itnim)					\
+	list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
+#define bfa_fcpim_delitn(__itnim)	do {				\
+	bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));      \
+	bfa_itnim_update_del_itn_stats(__itnim);      \
+	list_del(&(__itnim)->qe);      \
+	bfa_assert(list_empty(&(__itnim)->io_q));      \
+	bfa_assert(list_empty(&(__itnim)->io_cleanup_q));      \
+	bfa_assert(list_empty(&(__itnim)->pending_q));      \
+} while (0)
+
+#define bfa_itnim_online_cb(__itnim) do {				\
+	if ((__itnim)->bfa->fcs)					\
+		bfa_cb_itnim_online((__itnim)->ditn);      \
+	else {								\
+		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
+		__bfa_cb_itnim_online, (__itnim));      \
+	}								\
+} while (0)
+
+#define bfa_itnim_offline_cb(__itnim) do {				\
+	if ((__itnim)->bfa->fcs)					\
+		bfa_cb_itnim_offline((__itnim)->ditn);      \
+	else {								\
+		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
+		__bfa_cb_itnim_offline, (__itnim));      \
+	}								\
+} while (0)
+
+#define bfa_itnim_sler_cb(__itnim) do {					\
+	if ((__itnim)->bfa->fcs)					\
+		bfa_cb_itnim_sler((__itnim)->ditn);      \
+	else {								\
+		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
+		__bfa_cb_itnim_sler, (__itnim));      \
+	}								\
+} while (0)
+
+/**
+ *  bfa_itnim_sm BFA itnim state machine
+ */
+
+
+enum bfa_itnim_event {
+	BFA_ITNIM_SM_CREATE = 1,	/*  itnim is created */
+	BFA_ITNIM_SM_ONLINE = 2,	/*  itnim is online */
+	BFA_ITNIM_SM_OFFLINE = 3,	/*  itnim is offline */
+	BFA_ITNIM_SM_FWRSP = 4,		/*  firmware response */
+	BFA_ITNIM_SM_DELETE = 5,	/*  deleting an existing itnim */
+	BFA_ITNIM_SM_CLEANUP = 6,	/*  IO cleanup completion */
+	BFA_ITNIM_SM_SLER = 7,		/*  second level error recovery */
+	BFA_ITNIM_SM_HWFAIL = 8,	/*  IOC h/w failure event */
+	BFA_ITNIM_SM_QRESUME = 9,	/*  queue space available */
+};
+
+/**
+ *  BFA IOIM related definitions
+ */
+#define bfa_ioim_move_to_comp_q(__ioim) do {				\
+	list_del(&(__ioim)->qe);					\
+	list_add_tail(&(__ioim)->qe, &(__ioim)->fcpim->ioim_comp_q);	\
+} while (0)
+
+
+#define bfa_ioim_cb_profile_comp(__fcpim, __ioim) do {			\
+	if ((__fcpim)->profile_comp)					\
+		(__fcpim)->profile_comp(__ioim);			\
+} while (0)
+
+#define bfa_ioim_cb_profile_start(__fcpim, __ioim) do {			\
+	if ((__fcpim)->profile_start)					\
+		(__fcpim)->profile_start(__ioim);			\
+} while (0)
+/**
+ *  hal_ioim_sm
+ */
+
+/**
+ * IO state machine events
+ */
+enum bfa_ioim_event {
+	BFA_IOIM_SM_START	= 1,	/*  io start request from host */
+	BFA_IOIM_SM_COMP_GOOD	= 2,	/*  io good comp, resource free */
+	BFA_IOIM_SM_COMP	= 3,	/*  io comp, resource is free */
+	BFA_IOIM_SM_COMP_UTAG	= 4,	/*  io comp, resource is free */
+	BFA_IOIM_SM_DONE	= 5,	/*  io comp, resource not free */
+	BFA_IOIM_SM_FREE	= 6,	/*  io resource is freed */
+	BFA_IOIM_SM_ABORT	= 7,	/*  abort request from scsi stack */
+	BFA_IOIM_SM_ABORT_COMP	= 8,	/*  abort from f/w */
+	BFA_IOIM_SM_ABORT_DONE	= 9,	/*  abort completion from f/w */
+	BFA_IOIM_SM_QRESUME	= 10,	/*  CQ space available to queue IO */
+	BFA_IOIM_SM_SGALLOCED	= 11,	/*  SG page allocation successful */
+	BFA_IOIM_SM_SQRETRY	= 12,	/*  sequence recovery retry */
+	BFA_IOIM_SM_HCB		= 13,	/*  bfa callback complete */
+	BFA_IOIM_SM_CLEANUP	= 14,	/*  IO cleanup from itnim */
+	BFA_IOIM_SM_TMSTART	= 15,	/*  IO cleanup from tskim */
+	BFA_IOIM_SM_TMDONE	= 16,	/*  IO cleanup from tskim */
+	BFA_IOIM_SM_HWFAIL	= 17,	/*  IOC h/w failure event */
+	BFA_IOIM_SM_IOTOV	= 18,	/*  ITN offline TOV */
+};
+
+
+/**
+ *  BFA TSKIM related definitions
+ */
+
+/**
+ * task management completion handling
+ */
+#define bfa_tskim_qcomp(__tskim, __cbfn) do {				\
+	bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe, __cbfn, (__tskim));\
+	bfa_tskim_notify_comp(__tskim);      \
+} while (0)
+
+#define bfa_tskim_notify_comp(__tskim) do {				\
+	if ((__tskim)->notify)						\
+		bfa_itnim_tskdone((__tskim)->itnim);      \
+} while (0)
+
+
+enum bfa_tskim_event {
+	BFA_TSKIM_SM_START	= 1,	/*  TM command start		*/
+	BFA_TSKIM_SM_DONE	= 2,	/*  TM completion		*/
+	BFA_TSKIM_SM_QRESUME	= 3,	/*  resume after qfull		*/
+	BFA_TSKIM_SM_HWFAIL	= 5,	/*  IOC h/w failure event	*/
+	BFA_TSKIM_SM_HCB	= 6,	/*  BFA callback completion	*/
+	BFA_TSKIM_SM_IOS_DONE	= 7,	/*  IO and sub TM completions	*/
+	BFA_TSKIM_SM_CLEANUP	= 8,	/*  TM cleanup on ITN offline	*/
+	BFA_TSKIM_SM_CLEANUP_DONE = 9,	/*  TM abort completion	*/
+};
+
+/**
+ * forward declaration for BFA ITNIM functions
+ */
+static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
+static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
+static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
+static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
+static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
+static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
+static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov(void *itnim_arg);
+static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
+static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
+
+/**
+ * forward declaration of ITNIM state machine
+ */
+static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
+					enum bfa_itnim_event event);
+
+/**
+ * forward declaration for BFA IOIM functions
+ */
+static bfa_boolean_t	bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
+static bfa_boolean_t	bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
+static void		bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
+static bfa_boolean_t	bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
+static void		bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
+static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
+static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
+static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
+
+
+/**
+ * forward declaration of BFA IO state machine
+ */
+static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+static void	bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
+					enum bfa_ioim_event event);
+
+/**
+ * forward declaration for BFA TSKIM functions
+ */
+static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
+static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
+static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
+					lun_t lun);
+static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
+static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
+static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
+static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
+static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
+static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
+
+
+/**
+ * forward declaration of BFA TSKIM state machine
+ */
+static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
+					enum bfa_tskim_event event);
+
 /**
 /**
  *  hal_fcpim_mod BFA FCP Initiator Mode module
  *  hal_fcpim_mod BFA FCP Initiator Mode module
  */
  */
 
 
 /**
 /**
- * 		Compute and return memory needed by FCP(im) module.
+ *	Compute and return memory needed by FCP(im) module.
  */
  */
 static void
 static void
 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
@@ -58,7 +331,7 @@ bfa_fcpim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 
 
 static void
 static void
 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		     struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
 {
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 
 
@@ -67,12 +340,14 @@ bfa_fcpim_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 	bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
 	bfa_trc(bfa, cfg->fwcfg.num_ioim_reqs);
 	bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
 	bfa_trc(bfa, cfg->fwcfg.num_tskim_reqs);
 
 
-	fcpim->bfa            = bfa;
-	fcpim->num_itnims     = cfg->fwcfg.num_rports;
+	fcpim->bfa		= bfa;
+	fcpim->num_itnims	= cfg->fwcfg.num_rports;
 	fcpim->num_ioim_reqs  = cfg->fwcfg.num_ioim_reqs;
 	fcpim->num_ioim_reqs  = cfg->fwcfg.num_ioim_reqs;
 	fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
 	fcpim->num_tskim_reqs = cfg->fwcfg.num_tskim_reqs;
-	fcpim->path_tov       = cfg->drvcfg.path_tov;
-	fcpim->delay_comp	  = cfg->drvcfg.delay_comp;
+	fcpim->path_tov		= cfg->drvcfg.path_tov;
+	fcpim->delay_comp	= cfg->drvcfg.delay_comp;
+	fcpim->profile_comp = NULL;
+	fcpim->profile_start = NULL;
 
 
 	bfa_itnim_attach(fcpim, meminfo);
 	bfa_itnim_attach(fcpim, meminfo);
 	bfa_tskim_attach(fcpim, meminfo);
 	bfa_tskim_attach(fcpim, meminfo);
@@ -103,7 +378,7 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
 {
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfa_itnim_s *itnim;
 	struct bfa_itnim_s *itnim;
-	struct list_head        *qe, *qen;
+	struct list_head *qe, *qen;
 
 
 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 		itnim = (struct bfa_itnim_s *) qe;
 		itnim = (struct bfa_itnim_s *) qe;
@@ -111,6 +386,56 @@ bfa_fcpim_iocdisable(struct bfa_s *bfa)
 	}
 	}
 }
 }
 
 
+void
+bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
+		struct bfa_itnim_iostats_s *rstats)
+{
+	bfa_fcpim_add_iostats(lstats, rstats, total_ios);
+	bfa_fcpim_add_iostats(lstats, rstats, qresumes);
+	bfa_fcpim_add_iostats(lstats, rstats, no_iotags);
+	bfa_fcpim_add_iostats(lstats, rstats, io_aborts);
+	bfa_fcpim_add_iostats(lstats, rstats, no_tskims);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_ok);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_underrun);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_overrun);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_aborted);
+	bfa_fcpim_add_iostats(lstats, rstats, iocomp_timedout);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_nexus_abort);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_proto_err);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_dif_err);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_sqer_needed);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_res_free);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_hostabrts);
+	bfa_fcpim_add_iostats(lstats, rstats, iocom_utags);
+	bfa_fcpim_add_iostats(lstats, rstats, io_cleanups);
+	bfa_fcpim_add_iostats(lstats, rstats, io_tmaborts);
+	bfa_fcpim_add_iostats(lstats, rstats, onlines);
+	bfa_fcpim_add_iostats(lstats, rstats, offlines);
+	bfa_fcpim_add_iostats(lstats, rstats, creates);
+	bfa_fcpim_add_iostats(lstats, rstats, deletes);
+	bfa_fcpim_add_iostats(lstats, rstats, create_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, delete_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, sler_events);
+	bfa_fcpim_add_iostats(lstats, rstats, fw_create);
+	bfa_fcpim_add_iostats(lstats, rstats, fw_delete);
+	bfa_fcpim_add_iostats(lstats, rstats, ioc_disabled);
+	bfa_fcpim_add_iostats(lstats, rstats, cleanup_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_cmnds);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_fw_rsps);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_success);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_failures);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_io_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_qresumes);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_iocdowns);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanups);
+	bfa_fcpim_add_iostats(lstats, rstats, tm_cleanup_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, io_comps);
+	bfa_fcpim_add_iostats(lstats, rstats, input_reqs);
+	bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
+	bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
+	bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
+}
+
 void
 void
 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov)
 {
 {
@@ -130,21 +455,113 @@ bfa_fcpim_path_tov_get(struct bfa_s *bfa)
 }
 }
 
 
 bfa_status_t
 bfa_status_t
-bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_fcpim_stats_s *modstats)
+bfa_fcpim_port_iostats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *stats,
+	u8 lp_tag)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+
+	/* accumulate IO stats from itnim */
+	bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		if (itnim->rport->rport_info.lp_tag != lp_tag)
+			continue;
+		bfa_fcpim_add_stats(stats, &(itnim->stats));
+	}
+	return BFA_STATUS_OK;
+}
+bfa_status_t
+bfa_fcpim_get_modstats(struct bfa_s *bfa, struct bfa_itnim_iostats_s *modstats)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
+
+	/* accumulate IO stats from itnim */
+	bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_fcpim_add_stats(modstats, &(itnim->stats));
+	}
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
+	 struct bfa_fcpim_del_itn_stats_s *modstats)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+
+	*modstats = fcpim->del_itn_stats;
+
+	return BFA_STATUS_OK;
+}
+
+
+bfa_status_t
+bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time)
+{
+	struct bfa_itnim_s *itnim;
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct list_head *qe, *qen;
+
+	/* accumulate IO stats from itnim */
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_itnim_clear_stats(itnim);
+	}
+	fcpim->io_profile = BFA_TRUE;
+	fcpim->io_profile_start_time = time;
+	fcpim->profile_comp = bfa_ioim_profile_comp;
+	fcpim->profile_start = bfa_ioim_profile_start;
+
+	return BFA_STATUS_OK;
+}
+bfa_status_t
+bfa_fcpim_profile_off(struct bfa_s *bfa)
 {
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	fcpim->io_profile = BFA_FALSE;
+	fcpim->io_profile_start_time = 0;
+	fcpim->profile_comp = NULL;
+	fcpim->profile_start = NULL;
+	return BFA_STATUS_OK;
+}
 
 
-	*modstats = fcpim->stats;
+bfa_status_t
+bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
 
 
+	/* clear IO stats from all active itnims */
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		if (itnim->rport->rport_info.lp_tag != lp_tag)
+			continue;
+		bfa_itnim_clear_stats(itnim);
+	}
 	return BFA_STATUS_OK;
 	return BFA_STATUS_OK;
+
 }
 }
 
 
 bfa_status_t
 bfa_status_t
 bfa_fcpim_clr_modstats(struct bfa_s *bfa)
 bfa_fcpim_clr_modstats(struct bfa_s *bfa)
 {
 {
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct list_head *qe, *qen;
+	struct bfa_itnim_s *itnim;
 
 
-	memset(&fcpim->stats, 0, sizeof(struct bfa_fcpim_stats_s));
+	/* clear IO stats from all active itnims */
+	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
+		itnim = (struct bfa_itnim_s *) qe;
+		bfa_itnim_clear_stats(itnim);
+	}
+	bfa_os_memset(&fcpim->del_itn_stats, 0,
+		sizeof(struct bfa_fcpim_del_itn_stats_s));
 
 
 	return BFA_STATUS_OK;
 	return BFA_STATUS_OK;
 }
 }
@@ -176,14 +593,6 @@ bfa_fcpim_update_ioredirect(struct bfa_s *bfa)
 	 * IO redirection is turned off when QoS is enabled and vice versa
 	 * IO redirection is turned off when QoS is enabled and vice versa
 	 */
 	 */
 	ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
 	ioredirect = bfa_fcport_is_qos_enabled(bfa) ? BFA_FALSE : BFA_TRUE;
-
-	/*
-	 * Notify the bfad module of a possible state change in
-	 * IO redirection capability, due to a QoS state change. bfad will
-	 * check on the support for io redirection and update the
-	 * fcpim's ioredirect state accordingly.
-	 */
-	bfa_cb_ioredirect_state_change((void *)(bfa->bfad), ioredirect);
 }
 }
 
 
 void
 void
@@ -192,3 +601,3012 @@ bfa_fcpim_set_ioredirect(struct bfa_s *bfa, bfa_boolean_t state)
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	fcpim->ioredirect = state;
 	fcpim->ioredirect = state;
 }
 }
+
+
+
+/**
+ *  BFA ITNIM module state machine functions
+ */
+
+/**
+ *	Beginning/unallocated state - no events expected.
+ */
+static void
+bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_CREATE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_created);
+		itnim->is_online = BFA_FALSE;
+		bfa_fcpim_additn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Beginning state, only online event expected.
+ */
+static void
+bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_ONLINE:
+		if (bfa_itnim_send_fwcreate(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Waiting for itnim create response from firmware.
+ */
+static void
+bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_online);
+		itnim->is_online = BFA_TRUE;
+		bfa_itnim_iotov_online(itnim);
+		bfa_itnim_online_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
+		break;
+
+	case BFA_ITNIM_SM_OFFLINE:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+static void
+bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
+			enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_QRESUME:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		bfa_itnim_send_fwcreate(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Waiting for itnim create response from firmware, a delete is pending.
+ */
+static void
+bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
+				enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Online state - normal parking state.
+ */
+static void
+bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_iotov_start(itnim);
+		bfa_itnim_cleanup(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_cleanup(itnim);
+		break;
+
+	case BFA_ITNIM_SM_SLER:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_iotov_start(itnim);
+		bfa_itnim_sler_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		itnim->is_online = BFA_FALSE;
+		bfa_itnim_iotov_start(itnim);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Second level error recovery need.
+ */
+static void
+bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
+		bfa_itnim_cleanup(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+		bfa_itnim_cleanup(itnim);
+		bfa_itnim_iotov_delete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Going offline. Waiting for active IO cleanup.
+ */
+static void
+bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
+				 enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_CLEANUP:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
+		bfa_itnim_iotov_delete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_SLER:
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Deleting itnim. Waiting for active IO cleanup.
+ */
+static void
+bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
+				enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_CLEANUP:
+		if (bfa_itnim_send_fwdelete(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_iocdisable_cleanup(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
+ */
+static void
+bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+static void
+bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
+			enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_QRESUME:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
+		bfa_itnim_send_fwdelete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Offline state.
+ */
+static void
+bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_itnim_iotov_delete(itnim);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_ONLINE:
+		if (bfa_itnim_send_fwcreate(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	IOC h/w failed state.
+ */
+static void
+bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
+				enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_DELETE:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_itnim_iotov_delete(itnim);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	case BFA_ITNIM_SM_OFFLINE:
+		bfa_itnim_offline_cb(itnim);
+		break;
+
+	case BFA_ITNIM_SM_ONLINE:
+		if (bfa_itnim_send_fwcreate(itnim))
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
+		else
+			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Itnim is deleted, waiting for firmware response to delete.
+ */
+static void
+bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_FWRSP:
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+static void
+bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
+		enum bfa_itnim_event event)
+{
+	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
+	bfa_trc(itnim->bfa, event);
+
+	switch (event) {
+	case BFA_ITNIM_SM_QRESUME:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
+		bfa_itnim_send_fwdelete(itnim);
+		break;
+
+	case BFA_ITNIM_SM_HWFAIL:
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+		bfa_reqq_wcancel(&itnim->reqq_wait);
+		bfa_fcpim_delitn(itnim);
+		break;
+
+	default:
+		bfa_sm_fault(itnim->bfa, event);
+	}
+}
+
+/**
+ *	Initiate cleanup of all IOs on an IOC failure.
+ */
+static void
+bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
+{
+	struct bfa_tskim_s *tskim;
+	struct bfa_ioim_s *ioim;
+	struct list_head	*qe, *qen;
+
+	list_for_each_safe(qe, qen, &itnim->tsk_q) {
+		tskim = (struct bfa_tskim_s *) qe;
+		bfa_tskim_iocdisable(tskim);
+	}
+
+	list_for_each_safe(qe, qen, &itnim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_iocdisable(ioim);
+	}
+
+	/**
+	 * For IO request in pending queue, we pretend an early timeout.
+	 */
+	list_for_each_safe(qe, qen, &itnim->pending_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_tov(ioim);
+	}
+
+	list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_iocdisable(ioim);
+	}
+}
+
+/**
+ *	IO cleanup completion
+ */
+static void
+bfa_itnim_cleanp_comp(void *itnim_cbarg)
+{
+	struct bfa_itnim_s *itnim = itnim_cbarg;
+
+	bfa_stats(itnim, cleanup_comps);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
+}
+
+/**
+ *	Initiate cleanup of all IOs.
+ */
+static void
+bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
+{
+	struct bfa_ioim_s  *ioim;
+	struct bfa_tskim_s *tskim;
+	struct list_head	*qe, *qen;
+
+	bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
+
+	list_for_each_safe(qe, qen, &itnim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+
+		/**
+		 * Move IO to a cleanup queue from active queue so that a later
+		 * TM will not pickup this IO.
+		 */
+		list_del(&ioim->qe);
+		list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
+
+		bfa_wc_up(&itnim->wc);
+		bfa_ioim_cleanup(ioim);
+	}
+
+	list_for_each_safe(qe, qen, &itnim->tsk_q) {
+		tskim = (struct bfa_tskim_s *) qe;
+		bfa_wc_up(&itnim->wc);
+		bfa_tskim_cleanup(tskim);
+	}
+
+	bfa_wc_wait(&itnim->wc);
+}
+
+static void
+__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	if (complete)
+		bfa_cb_itnim_online(itnim->ditn);
+}
+
+static void
+__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	if (complete)
+		bfa_cb_itnim_offline(itnim->ditn);
+}
+
+static void
+__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	if (complete)
+		bfa_cb_itnim_sler(itnim->ditn);
+}
+
+/**
+ * Call to resume any I/O requests waiting for room in request queue.
+ */
+static void
+bfa_itnim_qresume(void *cbarg)
+{
+	struct bfa_itnim_s *itnim = cbarg;
+
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
+}
+
+
+
+
+/**
+ *  bfa_itnim_public
+ */
+
+void
+bfa_itnim_iodone(struct bfa_itnim_s *itnim)
+{
+	bfa_wc_down(&itnim->wc);
+}
+
+void
+bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
+{
+	bfa_wc_down(&itnim->wc);
+}
+
+void
+bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+		u32 *dm_len)
+{
+	/**
+	 * ITN memory
+	 */
+	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
+}
+
+void
+bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+{
+	struct bfa_s	*bfa = fcpim->bfa;
+	struct bfa_itnim_s *itnim;
+	int	i, j;
+
+	INIT_LIST_HEAD(&fcpim->itnim_q);
+
+	itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
+	fcpim->itnim_arr = itnim;
+
+	for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
+		bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
+		itnim->bfa = bfa;
+		itnim->fcpim = fcpim;
+		itnim->reqq = BFA_REQQ_QOS_LO;
+		itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
+		itnim->iotov_active = BFA_FALSE;
+		bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
+
+		INIT_LIST_HEAD(&itnim->io_q);
+		INIT_LIST_HEAD(&itnim->io_cleanup_q);
+		INIT_LIST_HEAD(&itnim->pending_q);
+		INIT_LIST_HEAD(&itnim->tsk_q);
+		INIT_LIST_HEAD(&itnim->delay_comp_q);
+		for (j = 0; j < BFA_IOBUCKET_MAX; j++)
+			itnim->ioprofile.io_latency.min[j] = ~0;
+		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
+	}
+
+	bfa_meminfo_kva(minfo) = (u8 *) itnim;
+}
+
+void
+bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
+{
+	bfa_stats(itnim, ioc_disabled);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
+}
+
+static bfa_boolean_t
+bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
+{
+	struct bfi_itnim_create_req_s *m;
+
+	itnim->msg_no++;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
+	if (!m) {
+		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
+			bfa_lpuid(itnim->bfa));
+	m->fw_handle = itnim->rport->fw_handle;
+	m->class = FC_CLASS_3;
+	m->seq_rec = itnim->seq_rec;
+	m->msg_no = itnim->msg_no;
+	bfa_stats(itnim, fw_create);
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(itnim->bfa, itnim->reqq);
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
+{
+	struct bfi_itnim_delete_req_s *m;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
+	if (!m) {
+		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
+			bfa_lpuid(itnim->bfa));
+	m->fw_handle = itnim->rport->fw_handle;
+	bfa_stats(itnim, fw_delete);
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(itnim->bfa, itnim->reqq);
+	return BFA_TRUE;
+}
+
+/**
+ * Cleanup all pending failed inflight requests.
+ */
+static void
+bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
+{
+	struct bfa_ioim_s *ioim;
+	struct list_head *qe, *qen;
+
+	list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
+		ioim = (struct bfa_ioim_s *)qe;
+		bfa_ioim_delayed_comp(ioim, iotov);
+	}
+}
+
+/**
+ * Start all pending IO requests.
+ */
+static void
+bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
+{
+	struct bfa_ioim_s *ioim;
+
+	bfa_itnim_iotov_stop(itnim);
+
+	/**
+	 * Abort all inflight IO requests in the queue
+	 */
+	bfa_itnim_delayed_comp(itnim, BFA_FALSE);
+
+	/**
+	 * Start all pending IO requests.
+	 */
+	while (!list_empty(&itnim->pending_q)) {
+		bfa_q_deq(&itnim->pending_q, &ioim);
+		list_add_tail(&ioim->qe, &itnim->io_q);
+		bfa_ioim_start(ioim);
+	}
+}
+
+/**
+ * Fail all pending IO requests
+ */
+static void
+bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
+{
+	struct bfa_ioim_s *ioim;
+
+	/**
+	 * Fail all inflight IO requests in the queue
+	 */
+	bfa_itnim_delayed_comp(itnim, BFA_TRUE);
+
+	/**
+	 * Fail any pending IO requests.
+	 */
+	while (!list_empty(&itnim->pending_q)) {
+		bfa_q_deq(&itnim->pending_q, &ioim);
+		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+		bfa_ioim_tov(ioim);
+	}
+}
+
+/**
+ * IO TOV timer callback. Fail any pending IO requests.
+ */
+static void
+bfa_itnim_iotov(void *itnim_arg)
+{
+	struct bfa_itnim_s *itnim = itnim_arg;
+
+	itnim->iotov_active = BFA_FALSE;
+
+	bfa_cb_itnim_tov_begin(itnim->ditn);
+	bfa_itnim_iotov_cleanup(itnim);
+	bfa_cb_itnim_tov(itnim->ditn);
+}
+
+/**
+ * Start IO TOV timer for failing back pending IO requests in offline state.
+ */
+static void
+bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
+{
+	if (itnim->fcpim->path_tov > 0) {
+
+		itnim->iotov_active = BFA_TRUE;
+		bfa_assert(bfa_itnim_hold_io(itnim));
+		bfa_timer_start(itnim->bfa, &itnim->timer,
+			bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
+	}
+}
+
+/**
+ * Stop IO TOV timer.
+ */
+static void
+bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
+{
+	if (itnim->iotov_active) {
+		itnim->iotov_active = BFA_FALSE;
+		bfa_timer_stop(&itnim->timer);
+	}
+}
+
+/**
+ * Stop IO TOV timer.
+ */
+static void
+bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
+{
+	bfa_boolean_t pathtov_active = BFA_FALSE;
+
+	if (itnim->iotov_active)
+		pathtov_active = BFA_TRUE;
+
+	bfa_itnim_iotov_stop(itnim);
+	if (pathtov_active)
+		bfa_cb_itnim_tov_begin(itnim->ditn);
+	bfa_itnim_iotov_cleanup(itnim);
+	if (pathtov_active)
+		bfa_cb_itnim_tov(itnim->ditn);
+}
+
+static void
+bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
+	fcpim->del_itn_stats.del_itn_iocomp_aborted +=
+		itnim->stats.iocomp_aborted;
+	fcpim->del_itn_stats.del_itn_iocomp_timedout +=
+		itnim->stats.iocomp_timedout;
+	fcpim->del_itn_stats.del_itn_iocom_sqer_needed +=
+		itnim->stats.iocom_sqer_needed;
+	fcpim->del_itn_stats.del_itn_iocom_res_free +=
+		itnim->stats.iocom_res_free;
+	fcpim->del_itn_stats.del_itn_iocom_hostabrts +=
+		itnim->stats.iocom_hostabrts;
+	fcpim->del_itn_stats.del_itn_total_ios += itnim->stats.total_ios;
+	fcpim->del_itn_stats.del_io_iocdowns += itnim->stats.io_iocdowns;
+	fcpim->del_itn_stats.del_tm_iocdowns += itnim->stats.tm_iocdowns;
+}
+
+
+
+/**
+ *  bfa_itnim_public
+ */
+
+/**
+ *	Itnim interrupt processing.
+ */
+void
+bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	union bfi_itnim_i2h_msg_u msg;
+	struct bfa_itnim_s *itnim;
+
+	bfa_trc(bfa, m->mhdr.msg_id);
+
+	msg.msg = m;
+
+	switch (m->mhdr.msg_id) {
+	case BFI_ITNIM_I2H_CREATE_RSP:
+		itnim = BFA_ITNIM_FROM_TAG(fcpim,
+						msg.create_rsp->bfa_handle);
+		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
+		bfa_stats(itnim, create_comps);
+		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
+		break;
+
+	case BFI_ITNIM_I2H_DELETE_RSP:
+		itnim = BFA_ITNIM_FROM_TAG(fcpim,
+						msg.delete_rsp->bfa_handle);
+		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
+		bfa_stats(itnim, delete_comps);
+		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
+		break;
+
+	case BFI_ITNIM_I2H_SLER_EVENT:
+		itnim = BFA_ITNIM_FROM_TAG(fcpim,
+						msg.sler_event->bfa_handle);
+		bfa_stats(itnim, sler_events);
+		bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
+		break;
+
+	default:
+		bfa_trc(bfa, m->mhdr.msg_id);
+		bfa_assert(0);
+	}
+}
+
+
+
+/**
+ *  bfa_itnim_api
+ */
+
+struct bfa_itnim_s *
+bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct bfa_itnim_s *itnim;
+
+	itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
+	bfa_assert(itnim->rport == rport);
+
+	itnim->ditn = ditn;
+
+	bfa_stats(itnim, creates);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
+
+	return itnim;
+}
+
+void
+bfa_itnim_delete(struct bfa_itnim_s *itnim)
+{
+	bfa_stats(itnim, deletes);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
+}
+
+void
+bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
+{
+	itnim->seq_rec = seq_rec;
+	bfa_stats(itnim, onlines);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
+}
+
+void
+bfa_itnim_offline(struct bfa_itnim_s *itnim)
+{
+	bfa_stats(itnim, offlines);
+	bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
+}
+
+/**
+ * Return true if itnim is considered offline for holding off IO request.
+ * IO is not held if itnim is being deleted.
+ */
+bfa_boolean_t
+bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
+{
+	return itnim->fcpim->path_tov && itnim->iotov_active &&
+		(bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
+		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable));
+}
+
+bfa_status_t
+bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+		struct bfa_itnim_ioprofile_s *ioprofile)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(itnim->bfa);
+	if (!fcpim->io_profile)
+		return BFA_STATUS_IOPROFILE_OFF;
+
+	itnim->ioprofile.index = BFA_IOBUCKET_MAX;
+	itnim->ioprofile.io_profile_start_time =
+		bfa_io_profile_start_time(itnim->bfa);
+	itnim->ioprofile.clock_res_mul = bfa_io_lat_clock_res_mul;
+	itnim->ioprofile.clock_res_div = bfa_io_lat_clock_res_div;
+	*ioprofile = itnim->ioprofile;
+
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
+	struct bfa_itnim_iostats_s *stats)
+{
+	*stats = itnim->stats;
+}
+
+void
+bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
+{
+	int j;
+	bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
+	bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
+	for (j = 0; j < BFA_IOBUCKET_MAX; j++)
+		itnim->ioprofile.io_latency.min[j] = ~0;
+}
+
+/**
+ *  BFA IO module state machine functions
+ */
+
+/**
+ *	IO is not started (unallocated).
+ */
+static void
+bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+	bfa_trc_fp(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_START:
+		if (!bfa_itnim_is_online(ioim->itnim)) {
+			if (!bfa_itnim_hold_io(ioim->itnim)) {
+				bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+				list_del(&ioim->qe);
+				list_add_tail(&ioim->qe,
+					&ioim->fcpim->ioim_comp_q);
+				bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+						__bfa_cb_ioim_pathtov, ioim);
+			} else {
+				list_del(&ioim->qe);
+				list_add_tail(&ioim->qe,
+					&ioim->itnim->pending_q);
+			}
+			break;
+		}
+
+		if (ioim->nsges > BFI_SGE_INLINE) {
+			if (!bfa_ioim_sge_setup(ioim)) {
+				bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
+				return;
+			}
+		}
+
+		if (!bfa_ioim_send_ioreq(ioim)) {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+			break;
+		}
+
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+		break;
+
+	case BFA_IOIM_SM_IOTOV:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+				__bfa_cb_ioim_pathtov, ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/**
+		 * IO in pending queue can get abort requests. Complete abort
+		 * requests immediately.
+		 */
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+				__bfa_cb_ioim_abort, ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ *	IO is waiting for SG pages.
+ */
+static void
+bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_SGALLOCED:
+		if (!bfa_ioim_send_ioreq(ioim)) {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+			break;
+		}
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ *	IO is active.
+ */
+static void
+bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+	bfa_trc_fp(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_COMP_GOOD:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+			      __bfa_cb_ioim_good_comp, ioim);
+		break;
+
+	case BFA_IOIM_SM_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		ioim->iosp->abort_explicit = BFA_TRUE;
+		ioim->io_cbfn = __bfa_cb_ioim_abort;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		ioim->iosp->abort_explicit = BFA_FALSE;
+		ioim->io_cbfn = __bfa_cb_ioim_failed;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_SQRETRY:
+		if (bfa_ioim_get_iotag(ioim) != BFA_TRUE) {
+			/* max retry completed free IO */
+			bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+			bfa_ioim_move_to_comp_q(ioim);
+			bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+					__bfa_cb_ioim_failed, ioim);
+			break;
+		}
+		/* waiting for IO tag resource free */
+		bfa_sm_set_state(ioim, bfa_ioim_sm_cmnd_retry);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+*	IO is retried with new tag.
+*/
+static void
+bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+	bfa_trc_fp(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_FREE:
+		/* abts and rrq done. Now retry the IO with new tag */
+		if (!bfa_ioim_send_ioreq(ioim)) {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
+			break;
+		}
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+	break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		ioim->iosp->abort_explicit = BFA_FALSE;
+		ioim->io_cbfn = __bfa_cb_ioim_failed;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+	break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
+			 __bfa_cb_ioim_failed, ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/** in this state IO abort is done.
+		 * Waiting for IO tag resource free.
+		 */
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ *	IO is being aborted, waiting for completion from firmware.
+ */
+static void
+bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+	case BFA_IOIM_SM_DONE:
+	case BFA_IOIM_SM_FREE:
+		break;
+
+	case BFA_IOIM_SM_ABORT_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_COMP_UTAG:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
+		ioim->iosp->abort_explicit = BFA_FALSE;
+
+		if (bfa_ioim_send_abort(ioim))
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		else {
+			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+			bfa_stats(ioim->itnim, qwait);
+			bfa_reqq_wait(ioim->bfa, ioim->reqq,
+					  &ioim->iosp->reqq_wait);
+		}
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ * IO is being cleaned up (implicit abort), waiting for completion from
+ * firmware.
+ */
+static void
+bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+	case BFA_IOIM_SM_DONE:
+	case BFA_IOIM_SM_FREE:
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/**
+		 * IO is already being aborted implicitly
+		 */
+		ioim->io_cbfn = __bfa_cb_ioim_abort;
+		break;
+
+	case BFA_IOIM_SM_ABORT_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_COMP_UTAG:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		/**
+		 * IO can be in cleanup state already due to TM command.
+		 * 2nd cleanup request comes from ITN offline event.
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ *	IO is waiting for room in request CQ
+ */
+static void
+bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_QRESUME:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
+		bfa_ioim_send_ioreq(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ *	Active IO is being aborted, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_QRESUME:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
+		bfa_ioim_send_abort(ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
+		ioim->iosp->abort_explicit = BFA_FALSE;
+		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
+		break;
+
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
+			      ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ *	Active IO is being cleaned up, waiting for room in request CQ.
+ */
+static void
+bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_QRESUME:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
+		bfa_ioim_send_abort(ioim);
+		break;
+
+	case BFA_IOIM_SM_ABORT:
+		/**
+		 * IO is alraedy being cleaned up implicitly
+		 */
+		ioim->io_cbfn = __bfa_cb_ioim_abort;
+		break;
+
+	case BFA_IOIM_SM_COMP_GOOD:
+	case BFA_IOIM_SM_COMP:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_DONE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
+		bfa_ioim_move_to_comp_q(ioim);
+		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
+			      ioim);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ * IO bfa callback is pending.
+ */
+static void
+bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+	bfa_trc_fp(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_HCB:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+		bfa_ioim_free(ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ * IO bfa callback is pending. IO resource cannot be freed.
+ */
+static void
+bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_HCB:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
+		list_del(&ioim->qe);
+		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
+		break;
+
+	case BFA_IOIM_SM_FREE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+/**
+ * IO is completed, waiting resource free from firmware.
+ */
+static void
+bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, event);
+
+	switch (event) {
+	case BFA_IOIM_SM_FREE:
+		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+		bfa_ioim_free(ioim);
+		break;
+
+	case BFA_IOIM_SM_CLEANUP:
+		bfa_ioim_notify_cleanup(ioim);
+		break;
+
+	case BFA_IOIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(ioim->bfa, event);
+	}
+}
+
+
+
+/**
+ *  hal_ioim_private
+ */
+
+static void
+__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
+}
+
+static void
+__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s	*ioim = cbarg;
+	struct bfi_ioim_rsp_s *m;
+	u8	*snsinfo = NULL;
+	u8	sns_len = 0;
+	s32	residue = 0;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
+	if (m->io_status == BFI_IOIM_STS_OK) {
+		/**
+		 * setup sense information, if present
+		 */
+		if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
+					m->sns_len) {
+			sns_len = m->sns_len;
+			snsinfo = ioim->iosp->snsinfo;
+		}
+
+		/**
+		 * setup residue value correctly for normal completions
+		 */
+		if (m->resid_flags == FCP_RESID_UNDER) {
+			residue = bfa_os_ntohl(m->residue);
+			bfa_stats(ioim->itnim, iocomp_underrun);
+		}
+		if (m->resid_flags == FCP_RESID_OVER) {
+			residue = bfa_os_ntohl(m->residue);
+			residue = -residue;
+			bfa_stats(ioim->itnim, iocomp_overrun);
+		}
+	}
+
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
+			  m->scsi_status, sns_len, snsinfo, residue);
+}
+
+static void
+__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
+			  0, 0, NULL, 0);
+}
+
+static void
+__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	bfa_stats(ioim->itnim, path_tov_expired);
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
+			  0, 0, NULL, 0);
+}
+
+static void
+__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
+		return;
+	}
+
+	bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
+}
+
+static void
+bfa_ioim_sgpg_alloced(void *cbarg)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
+	list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
+	bfa_ioim_sgpg_setup(ioim);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
+}
+
+/**
+ * Send I/O request to firmware.
+ */
+static	bfa_boolean_t
+bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
+{
+	struct bfa_itnim_s *itnim = ioim->itnim;
+	struct bfi_ioim_req_s *m;
+	static struct fcp_cmnd_s cmnd_z0 = { 0 };
+	struct bfi_sge_s      *sge;
+	u32	pgdlen = 0;
+	u32	fcp_dl;
+	u64 addr;
+	struct scatterlist *sg;
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
+	if (!m) {
+		bfa_stats(ioim->itnim, qwait);
+		bfa_reqq_wait(ioim->bfa, ioim->reqq,
+				  &ioim->iosp->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	/**
+	 * build i/o request message next
+	 */
+	m->io_tag = bfa_os_htons(ioim->iotag);
+	m->rport_hdl = ioim->itnim->rport->fw_handle;
+	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
+
+	/**
+	 * build inline IO SG element here
+	 */
+	sge = &m->sges[0];
+	if (ioim->nsges) {
+		sg = (struct scatterlist *)scsi_sglist(cmnd);
+		addr = bfa_os_sgaddr(sg_dma_address(sg));
+		sge->sga = *(union bfi_addr_u *) &addr;
+		pgdlen = sg_dma_len(sg);
+		sge->sg_len = pgdlen;
+		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
+					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
+		bfa_sge_to_be(sge);
+		sge++;
+	}
+
+	if (ioim->nsges > BFI_SGE_INLINE) {
+		sge->sga = ioim->sgpg->sgpg_pa;
+	} else {
+		sge->sga.a32.addr_lo = 0;
+		sge->sga.a32.addr_hi = 0;
+	}
+	sge->sg_len = pgdlen;
+	sge->flags = BFI_SGE_PGDLEN;
+	bfa_sge_to_be(sge);
+
+	/**
+	 * set up I/O command parameters
+	 */
+	bfa_os_assign(m->cmnd, cmnd_z0);
+	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
+	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
+	bfa_os_assign(m->cmnd.cdb,
+			*(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
+	fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
+	m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl);
+
+	/**
+	 * set up I/O message header
+	 */
+	switch (m->cmnd.iodir) {
+	case FCP_IODIR_READ:
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
+		bfa_stats(itnim, input_reqs);
+		ioim->itnim->stats.rd_throughput += fcp_dl;
+		break;
+	case FCP_IODIR_WRITE:
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
+		bfa_stats(itnim, output_reqs);
+		ioim->itnim->stats.wr_throughput += fcp_dl;
+		break;
+	case FCP_IODIR_RW:
+		bfa_stats(itnim, input_reqs);
+		bfa_stats(itnim, output_reqs);
+	default:
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+	}
+	if (itnim->seq_rec ||
+	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
+		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
+
+#ifdef IOIM_ADVANCED
+	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
+	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
+	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
+
+	/**
+	 * Handle large CDB (>16 bytes).
+	 */
+	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
+					FCP_CMND_CDB_LEN) / sizeof(u32);
+	if (m->cmnd.addl_cdb_len) {
+		bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
+				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
+				m->cmnd.addl_cdb_len * sizeof(u32));
+		fcp_cmnd_fcpdl(&m->cmnd) =
+				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
+	}
+#endif
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(ioim->bfa, ioim->reqq);
+	return BFA_TRUE;
+}
+
+/**
+ * Setup any additional SG pages needed.Inline SG element is setup
+ * at queuing time.
+ */
+static bfa_boolean_t
+bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
+{
+	u16	nsgpgs;
+
+	bfa_assert(ioim->nsges > BFI_SGE_INLINE);
+
+	/**
+	 * allocate SG pages needed
+	 */
+	nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
+	if (!nsgpgs)
+		return BFA_TRUE;
+
+	if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
+	    != BFA_STATUS_OK) {
+		bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
+		return BFA_FALSE;
+	}
+
+	ioim->nsgpgs = nsgpgs;
+	bfa_ioim_sgpg_setup(ioim);
+
+	return BFA_TRUE;
+}
+
+static void
+bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
+{
+	int		sgeid, nsges, i;
+	struct bfi_sge_s      *sge;
+	struct bfa_sgpg_s *sgpg;
+	u32	pgcumsz;
+	u64        addr;
+	struct scatterlist *sg;
+	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
+
+	sgeid = BFI_SGE_INLINE;
+	ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
+
+	sg = scsi_sglist(cmnd);
+	sg = sg_next(sg);
+
+	do {
+		sge = sgpg->sgpg->sges;
+		nsges = ioim->nsges - sgeid;
+		if (nsges > BFI_SGPG_DATA_SGES)
+			nsges = BFI_SGPG_DATA_SGES;
+
+		pgcumsz = 0;
+		for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
+			addr = bfa_os_sgaddr(sg_dma_address(sg));
+			sge->sga = *(union bfi_addr_u *) &addr;
+			sge->sg_len = sg_dma_len(sg);
+			pgcumsz += sge->sg_len;
+
+			/**
+			 * set flags
+			 */
+			if (i < (nsges - 1))
+				sge->flags = BFI_SGE_DATA;
+			else if (sgeid < (ioim->nsges - 1))
+				sge->flags = BFI_SGE_DATA_CPL;
+			else
+				sge->flags = BFI_SGE_DATA_LAST;
+
+			bfa_sge_to_le(sge);
+		}
+
+		sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
+
+		/**
+		 * set the link element of each page
+		 */
+		if (sgeid == ioim->nsges) {
+			sge->flags = BFI_SGE_PGDLEN;
+			sge->sga.a32.addr_lo = 0;
+			sge->sga.a32.addr_hi = 0;
+		} else {
+			sge->flags = BFI_SGE_LINK;
+			sge->sga = sgpg->sgpg_pa;
+		}
+		sge->sg_len = pgcumsz;
+
+		bfa_sge_to_le(sge);
+	} while (sgeid < ioim->nsges);
+}
+
+/**
+ * Send I/O abort request to firmware.
+ */
+static	bfa_boolean_t
+bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
+{
+	struct bfi_ioim_abort_req_s *m;
+	enum bfi_ioim_h2i	msgop;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
+	if (!m)
+		return BFA_FALSE;
+
+	/**
+	 * build i/o request message next
+	 */
+	if (ioim->iosp->abort_explicit)
+		msgop = BFI_IOIM_H2I_IOABORT_REQ;
+	else
+		msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
+
+	bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
+	m->io_tag    = bfa_os_htons(ioim->iotag);
+	m->abort_tag = ++ioim->abort_tag;
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(ioim->bfa, ioim->reqq);
+	return BFA_TRUE;
+}
+
+/**
+ * Call to resume any I/O requests waiting for room in request queue.
+ */
+static void
+bfa_ioim_qresume(void *cbarg)
+{
+	struct bfa_ioim_s *ioim = cbarg;
+
+	bfa_stats(ioim->itnim, qresumes);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
+}
+
+
+static void
+bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
+{
+	/**
+	 * Move IO from itnim queue to fcpim global queue since itnim will be
+	 * freed.
+	 */
+	list_del(&ioim->qe);
+	list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+
+	if (!ioim->iosp->tskim) {
+		if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
+			bfa_cb_dequeue(&ioim->hcb_qe);
+			list_del(&ioim->qe);
+			list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
+		}
+		bfa_itnim_iodone(ioim->itnim);
+	} else
+		bfa_tskim_iodone(ioim->iosp->tskim);
+}
+
+static bfa_boolean_t
+bfa_ioim_is_abortable(struct bfa_ioim_s *ioim)
+{
+	if ((bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit) &&
+	    (!bfa_q_is_on_q(&ioim->itnim->pending_q, ioim)))	||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort))		||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_abort_qfull))	||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb))		||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_hcb_free))	||
+	    (bfa_sm_cmp_state(ioim, bfa_ioim_sm_resfree)))
+		return BFA_FALSE;
+
+	return BFA_TRUE;
+}
+
+/**
+ *	or after the link comes back.
+ */
+void
+bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
+{
+	/**
+	 * If path tov timer expired, failback with PATHTOV status - these
+	 * IO requests are not normally retried by IO stack.
+	 *
+	 * Otherwise device cameback online and fail it with normal failed
+	 * status so that IO stack retries these failed IO requests.
+	 */
+	if (iotov)
+		ioim->io_cbfn = __bfa_cb_ioim_pathtov;
+	else {
+		ioim->io_cbfn = __bfa_cb_ioim_failed;
+		bfa_stats(ioim->itnim, iocom_nexus_abort);
+	}
+	bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
+
+	/**
+	 * Move IO to fcpim global queue since itnim will be
+	 * freed.
+	 */
+	list_del(&ioim->qe);
+	list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+}
+
+
+
+/**
+ *  hal_ioim_friend
+ */
+
+/**
+ * Memory allocation and initialization.
+ */
+void
+bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+{
+	struct bfa_ioim_s		*ioim;
+	struct bfa_ioim_sp_s	*iosp;
+	u16		i;
+	u8			*snsinfo;
+	u32		snsbufsz;
+
+	/**
+	 * claim memory first
+	 */
+	ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
+	fcpim->ioim_arr = ioim;
+	bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
+
+	iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
+	fcpim->ioim_sp_arr = iosp;
+	bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
+
+	/**
+	 * Claim DMA memory for per IO sense data.
+	 */
+	snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
+	fcpim->snsbase.pa  = bfa_meminfo_dma_phys(minfo);
+	bfa_meminfo_dma_phys(minfo) += snsbufsz;
+
+	fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
+	bfa_meminfo_dma_virt(minfo) += snsbufsz;
+	snsinfo = fcpim->snsbase.kva;
+	bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
+
+	/**
+	 * Initialize ioim free queues
+	 */
+	INIT_LIST_HEAD(&fcpim->ioim_free_q);
+	INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
+	INIT_LIST_HEAD(&fcpim->ioim_comp_q);
+
+	for (i = 0; i < fcpim->num_ioim_reqs;
+	     i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
+		/*
+		 * initialize IOIM
+		 */
+		bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
+		ioim->iotag   = i;
+		ioim->bfa     = fcpim->bfa;
+		ioim->fcpim   = fcpim;
+		ioim->iosp    = iosp;
+		iosp->snsinfo = snsinfo;
+		INIT_LIST_HEAD(&ioim->sgpg_q);
+		bfa_reqq_winit(&ioim->iosp->reqq_wait,
+				   bfa_ioim_qresume, ioim);
+		bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
+				   bfa_ioim_sgpg_alloced, ioim);
+		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
+
+		list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
+	}
+}
+
+/**
+ * Driver detach time call.
+ */
+void
+bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
+{
+}
+
+void
+bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
+	struct bfa_ioim_s *ioim;
+	u16	iotag;
+	enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
+
+	iotag = bfa_os_ntohs(rsp->io_tag);
+
+	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
+	bfa_assert(ioim->iotag == iotag);
+
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_trc(ioim->bfa, rsp->io_status);
+	bfa_trc(ioim->bfa, rsp->reuse_io_tag);
+
+	if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
+		bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
+
+	switch (rsp->io_status) {
+	case BFI_IOIM_STS_OK:
+		bfa_stats(ioim->itnim, iocomp_ok);
+		if (rsp->reuse_io_tag == 0)
+			evt = BFA_IOIM_SM_DONE;
+		else
+			evt = BFA_IOIM_SM_COMP;
+		break;
+
+	case BFI_IOIM_STS_TIMEDOUT:
+		bfa_stats(ioim->itnim, iocomp_timedout);
+	case BFI_IOIM_STS_ABORTED:
+		rsp->io_status = BFI_IOIM_STS_ABORTED;
+		bfa_stats(ioim->itnim, iocomp_aborted);
+		if (rsp->reuse_io_tag == 0)
+			evt = BFA_IOIM_SM_DONE;
+		else
+			evt = BFA_IOIM_SM_COMP;
+		break;
+
+	case BFI_IOIM_STS_PROTO_ERR:
+		bfa_stats(ioim->itnim, iocom_proto_err);
+		bfa_assert(rsp->reuse_io_tag);
+		evt = BFA_IOIM_SM_COMP;
+		break;
+
+	case BFI_IOIM_STS_SQER_NEEDED:
+		bfa_stats(ioim->itnim, iocom_sqer_needed);
+		bfa_assert(rsp->reuse_io_tag == 0);
+		evt = BFA_IOIM_SM_SQRETRY;
+		break;
+
+	case BFI_IOIM_STS_RES_FREE:
+		bfa_stats(ioim->itnim, iocom_res_free);
+		evt = BFA_IOIM_SM_FREE;
+		break;
+
+	case BFI_IOIM_STS_HOST_ABORTED:
+		bfa_stats(ioim->itnim, iocom_hostabrts);
+		if (rsp->abort_tag != ioim->abort_tag) {
+			bfa_trc(ioim->bfa, rsp->abort_tag);
+			bfa_trc(ioim->bfa, ioim->abort_tag);
+			return;
+		}
+
+		if (rsp->reuse_io_tag)
+			evt = BFA_IOIM_SM_ABORT_COMP;
+		else
+			evt = BFA_IOIM_SM_ABORT_DONE;
+		break;
+
+	case BFI_IOIM_STS_UTAG:
+		bfa_stats(ioim->itnim, iocom_utags);
+		evt = BFA_IOIM_SM_COMP_UTAG;
+		break;
+
+	default:
+		bfa_assert(0);
+	}
+
+	bfa_sm_send_event(ioim, evt);
+}
+
+void
+bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
+	struct bfa_ioim_s *ioim;
+	u16	iotag;
+
+	iotag = bfa_os_ntohs(rsp->io_tag);
+
+	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
+	bfa_assert(ioim->iotag == iotag);
+
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+	bfa_ioim_cb_profile_comp(fcpim, ioim);
+
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
+}
+
+void
+bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
+{
+	ioim->start_time = bfa_os_get_clock();
+}
+
+void
+bfa_ioim_profile_comp(struct bfa_ioim_s *ioim)
+{
+	u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
+	u32 index = bfa_ioim_get_index(fcp_dl);
+	u64 end_time = bfa_os_get_clock();
+	struct bfa_itnim_latency_s *io_lat =
+			&(ioim->itnim->ioprofile.io_latency);
+	u32 val = (u32)(end_time - ioim->start_time);
+
+	bfa_itnim_ioprofile_update(ioim->itnim, index);
+
+	io_lat->count[index]++;
+	io_lat->min[index] = (io_lat->min[index] < val) ?
+		io_lat->min[index] : val;
+	io_lat->max[index] = (io_lat->max[index] > val) ?
+		io_lat->max[index] : val;
+	io_lat->avg[index] += val;
+}
+/**
+ * Called by itnim to clean up IO while going offline.
+ */
+void
+bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_stats(ioim->itnim, io_cleanups);
+
+	ioim->iosp->tskim = NULL;
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
+}
+
+void
+bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_stats(ioim->itnim, io_tmaborts);
+
+	ioim->iosp->tskim = tskim;
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
+}
+
+/**
+ * IOC failure handling.
+ */
+void
+bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_stats(ioim->itnim, io_iocdowns);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
+}
+
+/**
+ * IO offline TOV popped. Fail the pending IO.
+ */
+void
+bfa_ioim_tov(struct bfa_ioim_s *ioim)
+{
+	bfa_trc(ioim->bfa, ioim->iotag);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
+}
+
+
+
+/**
+ *  hal_ioim_api
+ */
+
+/**
+ * Allocate IOIM resource for initiator mode I/O request.
+ */
+struct bfa_ioim_s *
+bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
+		struct bfa_itnim_s *itnim, u16 nsges)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct bfa_ioim_s *ioim;
+
+	/**
+	 * alocate IOIM resource
+	 */
+	bfa_q_deq(&fcpim->ioim_free_q, &ioim);
+	if (!ioim) {
+		bfa_stats(itnim, no_iotags);
+		return NULL;
+	}
+
+	ioim->dio = dio;
+	ioim->itnim = itnim;
+	ioim->nsges = nsges;
+	ioim->nsgpgs = 0;
+
+	bfa_stats(itnim, total_ios);
+	fcpim->ios_active++;
+
+	list_add_tail(&ioim->qe, &itnim->io_q);
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+
+	return ioim;
+}
+
+void
+bfa_ioim_free(struct bfa_ioim_s *ioim)
+{
+	struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
+
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+	bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
+
+	bfa_assert_fp(list_empty(&ioim->sgpg_q) ||
+			(ioim->nsges > BFI_SGE_INLINE));
+
+	if (ioim->nsgpgs > 0)
+		bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
+
+	bfa_stats(ioim->itnim, io_comps);
+	fcpim->ios_active--;
+
+	list_del(&ioim->qe);
+	list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
+}
+
+void
+bfa_ioim_start(struct bfa_ioim_s *ioim)
+{
+	bfa_trc_fp(ioim->bfa, ioim->iotag);
+
+	bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
+
+	/**
+	 * Obtain the queue over which this request has to be issued
+	 */
+	ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
+			bfa_cb_ioim_get_reqq(ioim->dio) :
+			bfa_itnim_get_reqq(ioim);
+
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
+}
+
+/**
+ * Driver I/O abort request.
+ */
+bfa_status_t
+bfa_ioim_abort(struct bfa_ioim_s *ioim)
+{
+
+	bfa_trc(ioim->bfa, ioim->iotag);
+
+	if (!bfa_ioim_is_abortable(ioim))
+		return BFA_STATUS_FAILED;
+
+	bfa_stats(ioim->itnim, io_aborts);
+	bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
+
+	return BFA_STATUS_OK;
+}
+
+
+/**
+ *  BFA TSKIM state machine functions
+ */
+
+/**
+ *	Task management command beginning state.
+ */
+static void
+bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_START:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
+		bfa_tskim_gather_ios(tskim);
+
+		/**
+		 * If device is offline, do not send TM on wire. Just cleanup
+		 * any pending IO requests and complete TM request.
+		 */
+		if (!bfa_itnim_is_online(tskim->itnim)) {
+			bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+			tskim->tsk_status = BFI_TSKIM_STS_OK;
+			bfa_tskim_cleanup_ios(tskim);
+			return;
+		}
+
+		if (!bfa_tskim_send(tskim)) {
+			bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
+			bfa_stats(tskim->itnim, tm_qwait);
+			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
+					  &tskim->reqq_wait);
+		}
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/**
+ * brief
+ *	TM command is active, awaiting completion from firmware to
+ *	cleanup IO requests in TM scope.
+ */
+static void
+bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_DONE:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+		bfa_tskim_cleanup_ios(tskim);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
+		if (!bfa_tskim_send_abort(tskim)) {
+			bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
+			bfa_stats(tskim->itnim, tm_qwait);
+			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
+				&tskim->reqq_wait);
+		}
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/**
+ *	An active TM is being cleaned up since ITN is offline. Awaiting cleanup
+ *	completion event from firmware.
+ */
+static void
+bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_DONE:
+		/**
+		 * Ignore and wait for ABORT completion from firmware.
+		 */
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP_DONE:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+		bfa_tskim_cleanup_ios(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+static void
+bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_IOS_DONE:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		/**
+		 * Ignore, TM command completed on wire.
+		 * Notify TM conmpletion on IO cleanup completion.
+		 */
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/**
+ *	Task management command is waiting for room in request CQ
+ */
+static void
+bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_QRESUME:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
+		bfa_tskim_send(tskim);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		/**
+		 * No need to send TM on wire since ITN is offline.
+		 */
+		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		bfa_tskim_cleanup_ios(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/**
+ *	Task management command is active, awaiting for room in request CQ
+ *	to send clean up request.
+ */
+static void
+bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
+		enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_DONE:
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		/**
+		 *
+		 * Fall through !!!
+		 */
+
+	case BFA_TSKIM_SM_QRESUME:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
+		bfa_tskim_send_abort(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
+		bfa_reqq_wcancel(&tskim->reqq_wait);
+		bfa_tskim_iocdisable_ios(tskim);
+		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+/**
+ *	BFA callback is pending
+ */
+static void
+bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
+{
+	bfa_trc(tskim->bfa, event);
+
+	switch (event) {
+	case BFA_TSKIM_SM_HCB:
+		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
+		bfa_tskim_free(tskim);
+		break;
+
+	case BFA_TSKIM_SM_CLEANUP:
+		bfa_tskim_notify_comp(tskim);
+		break;
+
+	case BFA_TSKIM_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_sm_fault(tskim->bfa, event);
+	}
+}
+
+
+
+/**
+ *  hal_tskim_private
+ */
+
+static void
+__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_tskim_s *tskim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
+		return;
+	}
+
+	bfa_stats(tskim->itnim, tm_success);
+	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
+}
+
+static void
+__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_tskim_s *tskim = cbarg;
+
+	if (!complete) {
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
+		return;
+	}
+
+	bfa_stats(tskim->itnim, tm_failures);
+	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
+				BFI_TSKIM_STS_FAILED);
+}
+
+static	bfa_boolean_t
+bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
+{
+	switch (tskim->tm_cmnd) {
+	case FCP_TM_TARGET_RESET:
+		return BFA_TRUE;
+
+	case FCP_TM_ABORT_TASK_SET:
+	case FCP_TM_CLEAR_TASK_SET:
+	case FCP_TM_LUN_RESET:
+	case FCP_TM_CLEAR_ACA:
+		return (tskim->lun == lun);
+
+	default:
+		bfa_assert(0);
+	}
+
+	return BFA_FALSE;
+}
+
+/**
+ *	Gather affected IO requests and task management commands.
+ */
+static void
+bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
+{
+	struct bfa_itnim_s *itnim = tskim->itnim;
+	struct bfa_ioim_s *ioim;
+	struct list_head	*qe, *qen;
+
+	INIT_LIST_HEAD(&tskim->io_q);
+
+	/**
+	 * Gather any active IO requests first.
+	 */
+	list_for_each_safe(qe, qen, &itnim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		if (bfa_tskim_match_scope
+			(tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
+			list_del(&ioim->qe);
+			list_add_tail(&ioim->qe, &tskim->io_q);
+		}
+	}
+
+	/**
+	 * Failback any pending IO requests immediately.
+	 */
+	list_for_each_safe(qe, qen, &itnim->pending_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		if (bfa_tskim_match_scope
+			(tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
+			list_del(&ioim->qe);
+			list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
+			bfa_ioim_tov(ioim);
+		}
+	}
+}
+
+/**
+ *	IO cleanup completion
+ */
+static void
+bfa_tskim_cleanp_comp(void *tskim_cbarg)
+{
+	struct bfa_tskim_s *tskim = tskim_cbarg;
+
+	bfa_stats(tskim->itnim, tm_io_comps);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
+}
+
+/**
+ *	Gather affected IO requests and task management commands.
+ */
+static void
+bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
+{
+	struct bfa_ioim_s *ioim;
+	struct list_head	*qe, *qen;
+
+	bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
+
+	list_for_each_safe(qe, qen, &tskim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_wc_up(&tskim->wc);
+		bfa_ioim_cleanup_tm(ioim, tskim);
+	}
+
+	bfa_wc_wait(&tskim->wc);
+}
+
+/**
+ *	Send task management request to firmware.
+ */
+static bfa_boolean_t
+bfa_tskim_send(struct bfa_tskim_s *tskim)
+{
+	struct bfa_itnim_s *itnim = tskim->itnim;
+	struct bfi_tskim_req_s *m;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
+	if (!m)
+		return BFA_FALSE;
+
+	/**
+	 * build i/o request message next
+	 */
+	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
+			bfa_lpuid(tskim->bfa));
+
+	m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
+	m->itn_fhdl = tskim->itnim->rport->fw_handle;
+	m->t_secs = tskim->tsecs;
+	m->lun = tskim->lun;
+	m->tm_flags = tskim->tm_cmnd;
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(tskim->bfa, itnim->reqq);
+	return BFA_TRUE;
+}
+
+/**
+ *	Send abort request to cleanup an active TM to firmware.
+ */
+static bfa_boolean_t
+bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
+{
+	struct bfa_itnim_s	*itnim = tskim->itnim;
+	struct bfi_tskim_abortreq_s	*m;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
+	if (!m)
+		return BFA_FALSE;
+
+	/**
+	 * build i/o request message next
+	 */
+	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
+			bfa_lpuid(tskim->bfa));
+
+	m->tsk_tag  = bfa_os_htons(tskim->tsk_tag);
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(tskim->bfa, itnim->reqq);
+	return BFA_TRUE;
+}
+
+/**
+ *	Call to resume task management cmnd waiting for room in request queue.
+ */
+static void
+bfa_tskim_qresume(void *cbarg)
+{
+	struct bfa_tskim_s *tskim = cbarg;
+
+	bfa_stats(tskim->itnim, tm_qresumes);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
+}
+
+/**
+ * Cleanup IOs associated with a task mangement command on IOC failures.
+ */
+static void
+bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
+{
+	struct bfa_ioim_s *ioim;
+	struct list_head	*qe, *qen;
+
+	list_for_each_safe(qe, qen, &tskim->io_q) {
+		ioim = (struct bfa_ioim_s *) qe;
+		bfa_ioim_iocdisable(ioim);
+	}
+}
+
+
+
+/**
+ *  hal_tskim_friend
+ */
+
+/**
+ * Notification on completions from related ioim.
+ */
+void
+bfa_tskim_iodone(struct bfa_tskim_s *tskim)
+{
+	bfa_wc_down(&tskim->wc);
+}
+
+/**
+ * Handle IOC h/w failure notification from itnim.
+ */
+void
+bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
+{
+	tskim->notify = BFA_FALSE;
+	bfa_stats(tskim->itnim, tm_iocdowns);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
+}
+
+/**
+ * Cleanup TM command and associated IOs as part of ITNIM offline.
+ */
+void
+bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
+{
+	tskim->notify = BFA_TRUE;
+	bfa_stats(tskim->itnim, tm_cleanups);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
+}
+
+/**
+ *	Memory allocation and initialization.
+ */
+void
+bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
+{
+	struct bfa_tskim_s *tskim;
+	u16	i;
+
+	INIT_LIST_HEAD(&fcpim->tskim_free_q);
+
+	tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
+	fcpim->tskim_arr = tskim;
+
+	for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
+		/*
+		 * initialize TSKIM
+		 */
+		bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
+		tskim->tsk_tag = i;
+		tskim->bfa	= fcpim->bfa;
+		tskim->fcpim	= fcpim;
+		tskim->notify  = BFA_FALSE;
+		bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
+					tskim);
+		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
+
+		list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
+	}
+
+	bfa_meminfo_kva(minfo) = (u8 *) tskim;
+}
+
+void
+bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
+{
+	/**
+	* @todo
+	*/
+}
+
+void
+bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
+	struct bfa_tskim_s *tskim;
+	u16	tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
+
+	tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
+	bfa_assert(tskim->tsk_tag == tsk_tag);
+
+	tskim->tsk_status = rsp->tsk_status;
+
+	/**
+	 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
+	 * requests. All other statuses are for normal completions.
+	 */
+	if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
+		bfa_stats(tskim->itnim, tm_cleanup_comps);
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
+	} else {
+		bfa_stats(tskim->itnim, tm_fw_rsps);
+		bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
+	}
+}
+
+
+
+/**
+ *  hal_tskim_api
+ */
+
+
+struct bfa_tskim_s *
+bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
+{
+	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
+	struct bfa_tskim_s *tskim;
+
+	bfa_q_deq(&fcpim->tskim_free_q, &tskim);
+
+	if (tskim)
+		tskim->dtsk = dtsk;
+
+	return tskim;
+}
+
+void
+bfa_tskim_free(struct bfa_tskim_s *tskim)
+{
+	bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
+	list_del(&tskim->qe);
+	list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
+}
+
+/**
+ *	Start a task management command.
+ *
+ * @param[in]	tskim	BFA task management command instance
+ * @param[in]	itnim	i-t nexus for the task management command
+ * @param[in]	lun	lun, if applicable
+ * @param[in]	tm_cmnd	Task management command code.
+ * @param[in]	t_secs	Timeout in seconds
+ *
+ * @return None.
+ */
+void
+bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
+			enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
+{
+	tskim->itnim	= itnim;
+	tskim->lun	= lun;
+	tskim->tm_cmnd = tm_cmnd;
+	tskim->tsecs	= tsecs;
+	tskim->notify  = BFA_FALSE;
+	bfa_stats(itnim, tm_cmnds);
+
+	list_add_tail(&tskim->qe, &itnim->tsk_q);
+	bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
+}

+ 401 - 0
drivers/scsi/bfa/bfa_fcpim.h

@@ -0,0 +1,401 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCPIM_H__
+#define __BFA_FCPIM_H__
+
+#include "bfa.h"
+#include "bfa_svc.h"
+#include "bfi_ms.h"
+#include "bfa_defs_svc.h"
+#include "bfa_cs.h"
+
+
+#define BFA_ITNIM_MIN   32
+#define BFA_ITNIM_MAX   1024
+
+#define BFA_IOIM_MIN	8
+#define BFA_IOIM_MAX	2000
+
+#define BFA_TSKIM_MIN   4
+#define BFA_TSKIM_MAX   512
+#define BFA_FCPIM_PATHTOV_DEF	(30 * 1000)	/* in millisecs */
+#define BFA_FCPIM_PATHTOV_MAX	(90 * 1000)	/* in millisecs */
+
+
+#define bfa_itnim_ioprofile_update(__itnim, __index)			\
+	(__itnim->ioprofile.iocomps[__index]++)
+
+#define BFA_IOIM_RETRY_TAG_OFFSET 11
+#define BFA_IOIM_RETRY_TAG_MASK 0x07ff /* 2K IOs */
+#define BFA_IOIM_RETRY_MAX 7
+
+/* Buckets are are 512 bytes to 2MB */
+static inline u32
+bfa_ioim_get_index(u32 n) {
+	int pos = 0;
+	if (n >= (1UL)<<22)
+		return BFA_IOBUCKET_MAX - 1;
+	n >>= 8;
+	if (n >= (1UL)<<16)
+		n >>= 16; pos += 16;
+	if (n >= 1 << 8)
+		n >>= 8; pos += 8;
+	if (n >= 1 << 4)
+		n >>= 4; pos += 4;
+	if (n >= 1 << 2)
+		n >>= 2; pos += 2;
+	if (n >= 1 << 1)
+		pos += 1;
+
+	return (n == 0) ? (0) : pos;
+}
+
+/*
+ * forward declarations
+ */
+struct bfa_ioim_s;
+struct bfa_tskim_s;
+struct bfad_ioim_s;
+struct bfad_tskim_s;
+
+typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
+
+struct bfa_fcpim_mod_s {
+	struct bfa_s		*bfa;
+	struct bfa_itnim_s	*itnim_arr;
+	struct bfa_ioim_s	*ioim_arr;
+	struct bfa_ioim_sp_s	*ioim_sp_arr;
+	struct bfa_tskim_s	*tskim_arr;
+	struct bfa_dma_s	snsbase;
+	int			num_itnims;
+	int			num_ioim_reqs;
+	int			num_tskim_reqs;
+	u32			path_tov;
+	u16			q_depth;
+	u8			reqq;		/*  Request queue to be used */
+	u8			rsvd;
+	struct list_head	itnim_q;	/*  queue of active itnim */
+	struct list_head	ioim_free_q;	/*  free IO resources	*/
+	struct list_head	ioim_resfree_q; /*  IOs waiting for f/w */
+	struct list_head	ioim_comp_q;	/*  IO global comp Q	*/
+	struct list_head	tskim_free_q;
+	u32		ios_active;	/*  current active IOs	*/
+	u32		delay_comp;
+	struct bfa_fcpim_del_itn_stats_s del_itn_stats;
+	bfa_boolean_t		ioredirect;
+	bfa_boolean_t		io_profile;
+	u32		io_profile_start_time;
+	bfa_fcpim_profile_t     profile_comp;
+	bfa_fcpim_profile_t     profile_start;
+};
+
+/**
+ * BFA IO (initiator mode)
+ */
+struct bfa_ioim_s {
+	struct list_head	qe;		/*  queue elememt	*/
+	bfa_sm_t		sm;		/*  BFA ioim state machine */
+	struct bfa_s		*bfa;		/*  BFA module	*/
+	struct bfa_fcpim_mod_s	*fcpim;		/*  parent fcpim module */
+	struct bfa_itnim_s	*itnim;		/*  i-t-n nexus for this IO  */
+	struct bfad_ioim_s	*dio;		/*  driver IO handle	*/
+	u16		iotag;		/*  FWI IO tag	*/
+	u16		abort_tag;	/*  unqiue abort request tag */
+	u16		nsges;		/*  number of SG elements */
+	u16		nsgpgs;		/*  number of SG pages	*/
+	struct bfa_sgpg_s	*sgpg;		/*  first SG page	*/
+	struct list_head	sgpg_q;		/*  allocated SG pages	*/
+	struct bfa_cb_qe_s	hcb_qe;		/*  bfa callback qelem	*/
+	bfa_cb_cbfn_t		io_cbfn;	/*  IO completion handler */
+	struct bfa_ioim_sp_s *iosp;		/*  slow-path IO handling */
+	u8		reqq;		/*  Request queue for I/O */
+	u64 start_time;			/*  IO's Profile start val */
+};
+
+
+struct bfa_ioim_sp_s {
+	struct bfi_msg_s	comp_rspmsg;	/*  IO comp f/w response */
+	u8			*snsinfo;	/*  sense info for this IO   */
+	struct bfa_sgpg_wqe_s sgpg_wqe;	/*  waitq elem for sgpg	*/
+	struct bfa_reqq_wait_s reqq_wait;	/*  to wait for room in reqq */
+	bfa_boolean_t		abort_explicit;	/*  aborted by OS	*/
+	struct bfa_tskim_s	*tskim;		/*  Relevant TM cmd	*/
+};
+
+/**
+ * BFA Task management command (initiator mode)
+ */
+struct bfa_tskim_s {
+	struct list_head	qe;
+	bfa_sm_t		sm;
+	struct bfa_s	*bfa;	/*  BFA module  */
+	struct bfa_fcpim_mod_s  *fcpim;	/*  parent fcpim module	*/
+	struct bfa_itnim_s	*itnim;	/*  i-t-n nexus for this IO  */
+	struct bfad_tskim_s	*dtsk;   /*  driver task mgmt cmnd	*/
+	bfa_boolean_t	notify;	/*  notify itnim on TM comp  */
+	lun_t	lun;	/*  lun if applicable	*/
+	enum fcp_tm_cmnd	tm_cmnd;	/*  task management command  */
+	u16	tsk_tag;	/*  FWI IO tag	*/
+	u8	tsecs;	/*  timeout in seconds	*/
+	struct bfa_reqq_wait_s  reqq_wait;   /*  to wait for room in reqq */
+	struct list_head	io_q;	/*  queue of affected IOs	*/
+	struct bfa_wc_s	wc;	/*  waiting counter	*/
+	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
+	enum bfi_tskim_status   tsk_status;  /*  TM status	*/
+};
+
+
+/**
+ * BFA i-t-n (initiator mode)
+ */
+struct bfa_itnim_s {
+	struct list_head	qe;		/*  queue element	*/
+	bfa_sm_t	  sm;		/*  i-t-n im BFA state machine  */
+	struct bfa_s	*bfa;		/*  bfa instance	*/
+	struct bfa_rport_s *rport;	/*  bfa rport	*/
+	void	*ditn;		/*  driver i-t-n structure	*/
+	struct bfi_mhdr_s	mhdr;	/*  pre-built mhdr	*/
+	u8	msg_no;		/*  itnim/rport firmware handle */
+	u8	reqq;		/*  CQ for requests	*/
+	struct bfa_cb_qe_s	hcb_qe;	/*  bfa callback qelem	*/
+	struct list_head pending_q;	/*  queue of pending IO requests */
+	struct list_head io_q;		/*  queue of active IO requests */
+	struct list_head io_cleanup_q;	/*  IO being cleaned up	*/
+	struct list_head tsk_q;		/*  queue of active TM commands */
+	struct list_head  delay_comp_q; /*  queue of failed inflight cmds */
+	bfa_boolean_t   seq_rec;	/*  SQER supported	*/
+	bfa_boolean_t   is_online;	/*  itnim is ONLINE for IO	*/
+	bfa_boolean_t   iotov_active;	/*  IO TOV timer is active	 */
+	struct bfa_wc_s	wc;	/*  waiting counter	*/
+	struct bfa_timer_s timer;	/*  pending IO TOV		 */
+	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
+	struct bfa_fcpim_mod_s *fcpim;	/*  fcpim module	*/
+	struct bfa_itnim_iostats_s	stats;
+	struct bfa_itnim_ioprofile_s  ioprofile;
+};
+
+
+#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
+#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
+#define BFA_IOIM_FROM_TAG(_fcpim, _iotag)	\
+	(&fcpim->ioim_arr[(_iotag & BFA_IOIM_RETRY_TAG_MASK)])
+#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag)	\
+	(&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
+
+#define bfa_io_profile_start_time(_bfa)	\
+	(_bfa->modules.fcpim_mod.io_profile_start_time)
+#define bfa_fcpim_get_io_profile(_bfa)	\
+	(_bfa->modules.fcpim_mod.io_profile)
+
+static inline bfa_boolean_t
+bfa_ioim_get_iotag(struct bfa_ioim_s *ioim)
+{
+	u16 k = ioim->iotag;
+
+	k >>= BFA_IOIM_RETRY_TAG_OFFSET; k++;
+
+	if (k > BFA_IOIM_RETRY_MAX)
+		return BFA_FALSE;
+	ioim->iotag &= BFA_IOIM_RETRY_TAG_MASK;
+	ioim->iotag |= k<<BFA_IOIM_RETRY_TAG_OFFSET;
+	return BFA_TRUE;
+}
+/*
+ * function prototypes
+ */
+void	bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
+					struct bfa_meminfo_s *minfo);
+void	bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
+void	bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_ioim_good_comp_isr(struct bfa_s *bfa,
+					struct bfi_msg_s *msg);
+void	bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
+void	bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
+					struct bfa_tskim_s *tskim);
+void	bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
+void	bfa_ioim_tov(struct bfa_ioim_s *ioim);
+
+void	bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
+					struct bfa_meminfo_s *minfo);
+void	bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
+void	bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_tskim_iodone(struct bfa_tskim_s *tskim);
+void	bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
+void	bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
+
+void	bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+					u32 *dm_len);
+void	bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
+					struct bfa_meminfo_s *minfo);
+void	bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
+void	bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
+void	bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_itnim_iodone(struct bfa_itnim_s *itnim);
+void	bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
+bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
+void bfa_ioim_profile_comp(struct bfa_ioim_s *ioim);
+void bfa_ioim_profile_start(struct bfa_ioim_s *ioim);
+
+
+/*
+ * bfa fcpim module API functions
+ */
+void		bfa_fcpim_path_tov_set(struct bfa_s *bfa, u16 path_tov);
+u16	bfa_fcpim_path_tov_get(struct bfa_s *bfa);
+void		bfa_fcpim_qdepth_set(struct bfa_s *bfa, u16 q_depth);
+u16	bfa_fcpim_qdepth_get(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_get_modstats(struct bfa_s *bfa,
+	 struct bfa_itnim_iostats_s *modstats);
+bfa_status_t bfa_fcpim_port_iostats(struct bfa_s *bfa,
+		struct bfa_itnim_iostats_s *stats, u8 lp_tag);
+bfa_status_t bfa_fcpim_get_del_itn_stats(struct bfa_s *bfa,
+	 struct bfa_fcpim_del_itn_stats_s *modstats);
+bfa_status_t bfa_fcpim_port_clear_iostats(struct bfa_s *bfa, u8 lp_tag);
+void bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *fcpim_stats,
+		struct bfa_itnim_iostats_s *itnim_stats);
+bfa_status_t bfa_fcpim_clr_modstats(struct bfa_s *bfa);
+void		bfa_fcpim_set_ioredirect(struct bfa_s *bfa,
+				bfa_boolean_t state);
+void		bfa_fcpim_update_ioredirect(struct bfa_s *bfa);
+bfa_status_t bfa_fcpim_profile_on(struct bfa_s *bfa, u32 time);
+bfa_status_t bfa_fcpim_profile_off(struct bfa_s *bfa);
+#define bfa_fcpim_ioredirect_enabled(__bfa)				\
+	(((struct bfa_fcpim_mod_s *)(BFA_FCPIM_MOD(__bfa)))->ioredirect)
+
+#define bfa_fcpim_get_next_reqq(__bfa, __qid)				\
+{									\
+	struct bfa_fcpim_mod_s *__fcpim = BFA_FCPIM_MOD(__bfa);      \
+	__fcpim->reqq++;						\
+	__fcpim->reqq &= (BFI_IOC_MAX_CQS - 1);      \
+	*(__qid) = __fcpim->reqq;					\
+}
+
+#define bfa_iocfc_map_msg_to_qid(__msg, __qid)				\
+	*(__qid) = (u8)((__msg) & (BFI_IOC_MAX_CQS - 1));
+/*
+ * bfa itnim API functions
+ */
+struct bfa_itnim_s *bfa_itnim_create(struct bfa_s *bfa,
+					struct bfa_rport_s *rport, void *itnim);
+void		bfa_itnim_delete(struct bfa_itnim_s *itnim);
+void		bfa_itnim_online(struct bfa_itnim_s *itnim,
+				 bfa_boolean_t seq_rec);
+void		bfa_itnim_offline(struct bfa_itnim_s *itnim);
+void		bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
+			struct bfa_itnim_iostats_s *stats);
+void		bfa_itnim_clear_stats(struct bfa_itnim_s *itnim);
+bfa_status_t	bfa_itnim_get_ioprofile(struct bfa_itnim_s *itnim,
+		struct bfa_itnim_ioprofile_s *ioprofile);
+#define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
+
+/**
+ *	BFA completion callback for bfa_itnim_online().
+ *
+ * @param[in]		itnim		FCS or driver itnim instance
+ *
+ * return None
+ */
+void	bfa_cb_itnim_online(void *itnim);
+
+/**
+ *	BFA completion callback for bfa_itnim_offline().
+ *
+ * @param[in]		itnim		FCS or driver itnim instance
+ *
+ * return None
+ */
+void	bfa_cb_itnim_offline(void *itnim);
+void	bfa_cb_itnim_tov_begin(void *itnim);
+void	bfa_cb_itnim_tov(void *itnim);
+
+/**
+ *	BFA notification to FCS/driver for second level error recovery.
+ *
+ * Atleast one I/O request has timedout and target is unresponsive to
+ * repeated abort requests. Second level error recovery should be initiated
+ * by starting implicit logout and recovery procedures.
+ *
+ * @param[in]		itnim		FCS or driver itnim instance
+ *
+ * return None
+ */
+void	bfa_cb_itnim_sler(void *itnim);
+
+/*
+ * bfa ioim API functions
+ */
+struct bfa_ioim_s	*bfa_ioim_alloc(struct bfa_s *bfa,
+					struct bfad_ioim_s *dio,
+					struct bfa_itnim_s *itnim,
+					u16 nsgles);
+
+void		bfa_ioim_free(struct bfa_ioim_s *ioim);
+void		bfa_ioim_start(struct bfa_ioim_s *ioim);
+bfa_status_t	bfa_ioim_abort(struct bfa_ioim_s *ioim);
+void		bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim,
+				      bfa_boolean_t iotov);
+
+
+/**
+ *	I/O completion notification.
+ *
+ * @param[in]		dio			driver IO structure
+ * @param[in]		io_status		IO completion status
+ * @param[in]		scsi_status		SCSI status returned by target
+ * @param[in]		sns_len			SCSI sense length, 0 if none
+ * @param[in]		sns_info		SCSI sense data, if any
+ * @param[in]		residue			Residual length
+ *
+ * @return None
+ */
+void	bfa_cb_ioim_done(void *bfad, struct bfad_ioim_s *dio,
+				  enum bfi_ioim_status io_status,
+				  u8 scsi_status, int sns_len,
+				  u8 *sns_info, s32 residue);
+
+/**
+ *	I/O good completion notification.
+ *
+ * @param[in]		dio			driver IO structure
+ *
+ * @return None
+ */
+void	bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
+
+/**
+ *	I/O abort completion notification
+ *
+ * @param[in]		dio			driver IO that was aborted
+ *
+ * @return None
+ */
+void	bfa_cb_ioim_abort(void *bfad, struct bfad_ioim_s *dio);
+
+/*
+ * bfa tskim API functions
+ */
+struct bfa_tskim_s	*bfa_tskim_alloc(struct bfa_s *bfa,
+					struct bfad_tskim_s *dtsk);
+void		bfa_tskim_free(struct bfa_tskim_s *tskim);
+void		bfa_tskim_start(struct bfa_tskim_s *tskim,
+				struct bfa_itnim_s *itnim, lun_t lun,
+				enum fcp_tm_cmnd tm, u8 t_secs);
+void		bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
+				  enum bfi_tskim_status tsk_status);
+
+#endif /* __BFA_FCPIM_H__ */

+ 0 - 192
drivers/scsi/bfa/bfa_fcpim_priv.h

@@ -1,192 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_FCPIM_PRIV_H__
-#define __BFA_FCPIM_PRIV_H__
-
-#include <bfa_fcpim.h>
-#include <defs/bfa_defs_fcpim.h>
-#include <cs/bfa_wc.h>
-#include "bfa_sgpg_priv.h"
-
-#define BFA_ITNIM_MIN   32
-#define BFA_ITNIM_MAX   1024
-
-#define BFA_IOIM_MIN    8
-#define BFA_IOIM_MAX    2000
-
-#define BFA_TSKIM_MIN   4
-#define BFA_TSKIM_MAX   512
-#define BFA_FCPIM_PATHTOV_DEF	(30 * 1000)	/* in millisecs */
-#define BFA_FCPIM_PATHTOV_MAX	(90 * 1000)	/* in millisecs */
-
-#define bfa_fcpim_stats(__fcpim, __stats)   \
-    ((__fcpim)->stats.__stats++)
-
-struct bfa_fcpim_mod_s {
-	struct bfa_s 	*bfa;
-	struct bfa_itnim_s 	*itnim_arr;
-	struct bfa_ioim_s 	*ioim_arr;
-	struct bfa_ioim_sp_s *ioim_sp_arr;
-	struct bfa_tskim_s 	*tskim_arr;
-	struct bfa_dma_s	snsbase;
-	int			num_itnims;
-	int			num_ioim_reqs;
-	int			num_tskim_reqs;
-	u32		path_tov;
-	u16		q_depth;
-	u8              reqq;           /* Request queue to be used */
-	u8		rsvd;
-	struct list_head 	itnim_q;        /*  queue of active itnim    */
-	struct list_head 	ioim_free_q;    /*  free IO resources        */
-	struct list_head 	ioim_resfree_q; /*  IOs waiting for f/w      */
-	struct list_head 	ioim_comp_q;    /*  IO global comp Q         */
-	struct list_head 	tskim_free_q;
-	u32	ios_active;	/*  current active IOs	      */
-	u32	delay_comp;
-	struct bfa_fcpim_stats_s stats;
-	bfa_boolean_t           ioredirect;
-};
-
-struct bfa_ioim_s;
-struct bfa_tskim_s;
-
-/**
- * BFA IO (initiator mode)
- */
-struct bfa_ioim_s {
-	struct list_head qe;		/*  queue elememt            */
-	bfa_sm_t		sm; 	/*  BFA ioim state machine   */
-	struct bfa_s 	        *bfa;	/*  BFA module               */
-	struct bfa_fcpim_mod_s	*fcpim;	/*  parent fcpim module      */
-	struct bfa_itnim_s 	*itnim;	/*  i-t-n nexus for this IO  */
-	struct bfad_ioim_s 	*dio;	/*  driver IO handle         */
-	u16	iotag;		/*  FWI IO tag               */
-	u16	abort_tag;	/*  unqiue abort request tag */
-	u16	nsges;		/*  number of SG elements    */
-	u16	nsgpgs;		/*  number of SG pages       */
-	struct bfa_sgpg_s *sgpg;	/*  first SG page            */
-	struct list_head sgpg_q;		/*  allocated SG pages       */
-	struct bfa_cb_qe_s hcb_qe;	/*  bfa callback qelem       */
-	bfa_cb_cbfn_t io_cbfn;		/*  IO completion handler    */
-	struct bfa_ioim_sp_s *iosp;	/*  slow-path IO handling    */
-	u8 reqq;           		/* Request queue for I/O    */
-};
-
-struct bfa_ioim_sp_s {
-	struct bfi_msg_s 	comp_rspmsg;	/*  IO comp f/w response     */
-	u8			*snsinfo;	/*  sense info for this IO   */
-	struct bfa_sgpg_wqe_s sgpg_wqe;	/*  waitq elem for sgpg      */
-	struct bfa_reqq_wait_s reqq_wait;	/*  to wait for room in reqq */
-	bfa_boolean_t		abort_explicit;	/*  aborted by OS            */
-	struct bfa_tskim_s	*tskim;		/*  Relevant TM cmd          */
-};
-
-/**
- * BFA Task management command (initiator mode)
- */
-struct bfa_tskim_s {
-	struct list_head          qe;
-	bfa_sm_t		sm;
-	struct bfa_s            *bfa;        /*  BFA module  */
-	struct bfa_fcpim_mod_s  *fcpim;      /*  parent fcpim module      */
-	struct bfa_itnim_s      *itnim;      /*  i-t-n nexus for this IO  */
-	struct bfad_tskim_s         *dtsk;   /*  driver task mgmt cmnd    */
-	bfa_boolean_t        notify;         /*  notify itnim on TM comp  */
-	lun_t                lun;            /*  lun if applicable        */
-	enum fcp_tm_cmnd        tm_cmnd;     /*  task management command  */
-	u16             tsk_tag;        /*  FWI IO tag               */
-	u8              tsecs;          /*  timeout in seconds       */
-	struct bfa_reqq_wait_s  reqq_wait;   /*  to wait for room in reqq */
-	struct list_head              io_q;    /*  queue of affected IOs    */
-	struct bfa_wc_s             wc;      /*  waiting counter          */
-	struct bfa_cb_qe_s	hcb_qe;      /*  bfa callback qelem       */
-	enum bfi_tskim_status   tsk_status;  /*  TM status                */
-};
-
-/**
- * BFA i-t-n (initiator mode)
- */
-struct bfa_itnim_s {
-	struct list_head    qe;		/*  queue element               */
-	bfa_sm_t	  sm;		/*  i-t-n im BFA state machine  */
-	struct bfa_s      *bfa;		/*  bfa instance                */
-	struct bfa_rport_s *rport;	/*  bfa rport                   */
-	void           *ditn;		/*  driver i-t-n structure      */
-	struct bfi_mhdr_s      mhdr;	/*  pre-built mhdr              */
-	u8         msg_no;		/*  itnim/rport firmware handle */
-	u8         reqq;		/*  CQ for requests             */
-	struct bfa_cb_qe_s    hcb_qe;	/*  bfa callback qelem          */
-	struct list_head pending_q;	/*  queue of pending IO requests*/
-	struct list_head io_q;		/*  queue of active IO requests */
-	struct list_head io_cleanup_q;	/*  IO being cleaned up         */
-	struct list_head tsk_q;		/*  queue of active TM commands */
-	struct list_head  delay_comp_q;/*  queue of failed inflight cmds */
-	bfa_boolean_t   seq_rec;	/*  SQER supported              */
-	bfa_boolean_t   is_online;	/*  itnim is ONLINE for IO      */
-	bfa_boolean_t   iotov_active;	/*  IO TOV timer is active	 */
-	struct bfa_wc_s        wc;	/*  waiting counter             */
-	struct bfa_timer_s timer;	/*  pending IO TOV		 */
-	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
-	struct bfa_fcpim_mod_s *fcpim;	/*  fcpim module                */
-	struct bfa_itnim_hal_stats_s	stats;
-	struct bfa_itnim_latency_s  io_latency;
-};
-
-#define bfa_itnim_is_online(_itnim) ((_itnim)->is_online)
-#define BFA_FCPIM_MOD(_hal) (&(_hal)->modules.fcpim_mod)
-#define BFA_IOIM_FROM_TAG(_fcpim, _iotag)	\
-	(&fcpim->ioim_arr[_iotag])
-#define BFA_TSKIM_FROM_TAG(_fcpim, _tmtag)                  \
-    (&fcpim->tskim_arr[_tmtag & (fcpim->num_tskim_reqs - 1)])
-
-/*
- * function prototypes
- */
-void            bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim,
-				    struct bfa_meminfo_s *minfo);
-void            bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim);
-void            bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-void            bfa_ioim_good_comp_isr(struct bfa_s *bfa,
-					struct bfi_msg_s *msg);
-void            bfa_ioim_cleanup(struct bfa_ioim_s *ioim);
-void            bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim,
-					struct bfa_tskim_s *tskim);
-void            bfa_ioim_iocdisable(struct bfa_ioim_s *ioim);
-void            bfa_ioim_tov(struct bfa_ioim_s *ioim);
-
-void            bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim,
-				     struct bfa_meminfo_s *minfo);
-void            bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim);
-void            bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-void            bfa_tskim_iodone(struct bfa_tskim_s *tskim);
-void            bfa_tskim_iocdisable(struct bfa_tskim_s *tskim);
-void            bfa_tskim_cleanup(struct bfa_tskim_s *tskim);
-
-void            bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-				      u32 *dm_len);
-void            bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim,
-				     struct bfa_meminfo_s *minfo);
-void            bfa_itnim_detach(struct bfa_fcpim_mod_s *fcpim);
-void            bfa_itnim_iocdisable(struct bfa_itnim_s *itnim);
-void            bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-void            bfa_itnim_iodone(struct bfa_itnim_s *itnim);
-void            bfa_itnim_tskdone(struct bfa_itnim_s *itnim);
-bfa_boolean_t   bfa_itnim_hold_io(struct bfa_itnim_s *itnim);
-
-#endif /* __BFA_FCPIM_PRIV_H__ */
-

+ 0 - 1962
drivers/scsi/bfa/bfa_fcport.c

@@ -1,1962 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include <bfa_svc.h>
-#include <bfi/bfi_pport.h>
-#include <bfi/bfi_pbc.h>
-#include <cs/bfa_debug.h>
-#include <aen/bfa_aen.h>
-#include <cs/bfa_plog.h>
-#include <aen/bfa_aen_port.h>
-
-BFA_TRC_FILE(HAL, FCPORT);
-BFA_MODULE(fcport);
-
-/*
- * The port is considered disabled if corresponding physical port or IOC are
- * disabled explicitly
- */
-#define BFA_PORT_IS_DISABLED(bfa) \
-	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
-	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
-
-/*
- * forward declarations
- */
-static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
-static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
-static void     bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
-static void     bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
-static void     bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
-static void     __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
-static void     bfa_fcport_callback(struct bfa_fcport_s *fcport,
-				enum bfa_pport_linkstate event);
-static void     bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
-				enum bfa_pport_linkstate event);
-static void     __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
-static void     bfa_fcport_stats_get_timeout(void *cbarg);
-static void     bfa_fcport_stats_clr_timeout(void *cbarg);
-
-/**
- *  bfa_pport_private
- */
-
-/**
- * BFA port state machine events
- */
-enum bfa_fcport_sm_event {
-	BFA_FCPORT_SM_START = 1,	/*  start port state machine */
-	BFA_FCPORT_SM_STOP = 2,	/*  stop port state machine */
-	BFA_FCPORT_SM_ENABLE = 3,	/*  enable port */
-	BFA_FCPORT_SM_DISABLE = 4,	/*  disable port state machine */
-	BFA_FCPORT_SM_FWRSP = 5,	/*  firmware enable/disable rsp */
-	BFA_FCPORT_SM_LINKUP = 6,	/*  firmware linkup event */
-	BFA_FCPORT_SM_LINKDOWN = 7,	/*  firmware linkup down */
-	BFA_FCPORT_SM_QRESUME = 8,	/*  CQ space available */
-	BFA_FCPORT_SM_HWFAIL = 9,	/*  IOC h/w failure */
-};
-
-/**
- * BFA port link notification state machine events
- */
-
-enum bfa_fcport_ln_sm_event {
-	BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event */
-	BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event */
-	BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification */
-};
-
-static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
-					enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
-						enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
-						enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
-						enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
-						enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
-						enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
-						enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
-						enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
-					 enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
-					 enum bfa_fcport_sm_event event);
-static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
-					 enum bfa_fcport_sm_event event);
-
-static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
-					 enum bfa_fcport_ln_sm_event event);
-static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
-					 enum bfa_fcport_ln_sm_event event);
-static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
-					 enum bfa_fcport_ln_sm_event event);
-static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
-					 enum bfa_fcport_ln_sm_event event);
-static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
-					 enum bfa_fcport_ln_sm_event event);
-static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
-					 enum bfa_fcport_ln_sm_event event);
-static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
-					 enum bfa_fcport_ln_sm_event event);
-
-static struct bfa_sm_table_s hal_pport_sm_table[] = {
-	{BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
-	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
-	{BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
-	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
-	{BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
-	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
-	{BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
-	{BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
-	{BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
-	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
-	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
-};
-
-static void
-bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
-{
-	union bfa_aen_data_u aen_data;
-	struct bfa_log_mod_s *logmod = fcport->bfa->logm;
-	wwn_t           pwwn = fcport->pwwn;
-	char            pwwn_ptr[BFA_STRING_32];
-
-	memset(&aen_data, 0, sizeof(aen_data));
-	wwn2str(pwwn_ptr, pwwn);
-	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
-
-	aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
-	aen_data.port.pwwn = pwwn;
-}
-
-static void
-bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_START:
-		/**
-		 * Start event after IOC is configured and BFA is started.
-		 */
-		if (bfa_fcport_send_enable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
-		break;
-
-	case BFA_FCPORT_SM_ENABLE:
-		/**
-		 * Port is persistently configured to be in enabled state. Do
-		 * not change state. Port enabling is done when START event is
-		 * received.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		/**
-		 * If a port is persistently configured to be disabled, the
-		 * first event will a port disable request.
-		 */
-		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
-			    enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_QRESUME:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
-		bfa_fcport_send_enable(fcport);
-		break;
-
-	case BFA_FCPORT_SM_STOP:
-		bfa_reqq_wcancel(&fcport->reqq_wait);
-		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
-		break;
-
-	case BFA_FCPORT_SM_ENABLE:
-		/**
-		 * Already enable is in progress.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		/**
-		 * Just send disable request to firmware when room becomes
-		 * available in request queue.
-		 */
-		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
-		bfa_reqq_wcancel(&fcport->reqq_wait);
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
-		break;
-
-	case BFA_FCPORT_SM_LINKUP:
-	case BFA_FCPORT_SM_LINKDOWN:
-		/**
-		 * Possible to get link events when doing back-to-back
-		 * enable/disables.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_reqq_wcancel(&fcport->reqq_wait);
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
-		enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_FWRSP:
-	case BFA_FCPORT_SM_LINKDOWN:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
-		break;
-
-	case BFA_FCPORT_SM_LINKUP:
-		bfa_fcport_update_linkinfo(fcport);
-		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
-
-		bfa_assert(fcport->event_cbfn);
-		bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
-		break;
-
-	case BFA_FCPORT_SM_ENABLE:
-		/**
-		 * Already being enabled.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		if (bfa_fcport_send_disable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
-
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
-		break;
-
-	case BFA_FCPORT_SM_STOP:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_LINKUP:
-		bfa_fcport_update_linkinfo(fcport);
-		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
-		bfa_assert(fcport->event_cbfn);
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
-
-		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
-
-			bfa_trc(fcport->bfa,
-				pevent->link_state.vc_fcf.fcf.fipenabled);
-			bfa_trc(fcport->bfa,
-				pevent->link_state.vc_fcf.fcf.fipfailed);
-
-			if (pevent->link_state.vc_fcf.fcf.fipfailed)
-				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-					BFA_PL_EID_FIP_FCF_DISC, 0,
-					"FIP FCF Discovery Failed");
-			else
-				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-					BFA_PL_EID_FIP_FCF_DISC, 0,
-					"FIP FCF Discovered");
-		}
-
-		bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
-		/**
-		 * If QoS is enabled and it is not online,
-		 * Send a separate event.
-		 */
-		if ((fcport->cfg.qos_enabled)
-		    && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
-			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
-
-		break;
-
-	case BFA_FCPORT_SM_LINKDOWN:
-		/**
-		 * Possible to get link down event.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_ENABLE:
-		/**
-		 * Already enabled.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		if (bfa_fcport_send_disable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
-
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
-		break;
-
-	case BFA_FCPORT_SM_STOP:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_ENABLE:
-		/**
-		 * Already enabled.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		if (bfa_fcport_send_disable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
-
-		bfa_fcport_reset_linkinfo(fcport);
-		bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
-		break;
-
-	case BFA_FCPORT_SM_LINKDOWN:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
-		bfa_fcport_reset_linkinfo(fcport);
-		bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
-		if (BFA_PORT_IS_DISABLED(fcport->bfa))
-			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
-		else
-			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
-		break;
-
-	case BFA_FCPORT_SM_STOP:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
-		bfa_fcport_reset_linkinfo(fcport);
-		if (BFA_PORT_IS_DISABLED(fcport->bfa))
-			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
-		else
-			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
-		bfa_fcport_reset_linkinfo(fcport);
-		bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
-		if (BFA_PORT_IS_DISABLED(fcport->bfa))
-			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
-		else
-			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
-			     enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_QRESUME:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
-		bfa_fcport_send_disable(fcport);
-		break;
-
-	case BFA_FCPORT_SM_STOP:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
-		bfa_reqq_wcancel(&fcport->reqq_wait);
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		/**
-		 * Already being disabled.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_LINKUP:
-	case BFA_FCPORT_SM_LINKDOWN:
-		/**
-		 * Possible to get link events when doing back-to-back
-		 * enable/disables.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
-		bfa_reqq_wcancel(&fcport->reqq_wait);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_FWRSP:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		/**
-		 * Already being disabled.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_ENABLE:
-		if (bfa_fcport_send_enable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
-
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
-		break;
-
-	case BFA_FCPORT_SM_STOP:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
-		break;
-
-	case BFA_FCPORT_SM_LINKUP:
-	case BFA_FCPORT_SM_LINKDOWN:
-		/**
-		 * Possible to get link events when doing back-to-back
-		 * enable/disables.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_START:
-		/**
-		 * Ignore start event for a port that is disabled.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_STOP:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
-		break;
-
-	case BFA_FCPORT_SM_ENABLE:
-		if (bfa_fcport_send_enable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
-
-		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
-			     BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
-		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
-		break;
-
-	case BFA_FCPORT_SM_DISABLE:
-		/**
-		 * Already disabled.
-		 */
-		break;
-
-	case BFA_FCPORT_SM_HWFAIL:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
-		break;
-
-	default:
-		bfa_sm_fault(fcport->bfa, event);
-	}
-}
-
-static void
-bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_START:
-		if (bfa_fcport_send_enable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
-		break;
-
-	default:
-		/**
-		 * Ignore all other events.
-		 */
-		;
-	}
-}
-
-/**
- * Port is enabled. IOC is down/failed.
- */
-static void
-bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_START:
-		if (bfa_fcport_send_enable(fcport))
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
-		else
-			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
-		break;
-
-	default:
-		/**
-		 * Ignore all events.
-		 */
-		;
-	}
-}
-
-/**
- * Port is disabled. IOC is down/failed.
- */
-static void
-bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
-			enum bfa_fcport_sm_event event)
-{
-	bfa_trc(fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_SM_START:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
-		break;
-
-	case BFA_FCPORT_SM_ENABLE:
-		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
-		break;
-
-	default:
-		/**
-		 * Ignore all events.
-		 */
-		;
-	}
-}
-
-/**
- * Link state is down
- */
-static void
-bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
-		enum bfa_fcport_ln_sm_event event)
-{
-	bfa_trc(ln->fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_LN_SM_LINKUP:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
-		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
-		break;
-
-	default:
-		bfa_sm_fault(ln->fcport->bfa, event);
-	}
-}
-
-/**
- * Link state is waiting for down notification
- */
-static void
-bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
-		enum bfa_fcport_ln_sm_event event)
-{
-	bfa_trc(ln->fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_LN_SM_LINKUP:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
-		break;
-
-	case BFA_FCPORT_LN_SM_NOTIFICATION:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
-		break;
-
-	default:
-		bfa_sm_fault(ln->fcport->bfa, event);
-	}
-}
-
-/**
- * Link state is waiting for down notification and there is a pending up
- */
-static void
-bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
-		enum bfa_fcport_ln_sm_event event)
-{
-	bfa_trc(ln->fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_LN_SM_LINKDOWN:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
-		break;
-
-	case BFA_FCPORT_LN_SM_NOTIFICATION:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
-		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
-		break;
-
-	default:
-		bfa_sm_fault(ln->fcport->bfa, event);
-	}
-}
-
-/**
- * Link state is up
- */
-static void
-bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
-		enum bfa_fcport_ln_sm_event event)
-{
-	bfa_trc(ln->fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_LN_SM_LINKDOWN:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
-		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
-		break;
-
-	default:
-		bfa_sm_fault(ln->fcport->bfa, event);
-	}
-}
-
-/**
- * Link state is waiting for up notification
- */
-static void
-bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
-		enum bfa_fcport_ln_sm_event event)
-{
-	bfa_trc(ln->fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_LN_SM_LINKDOWN:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
-		break;
-
-	case BFA_FCPORT_LN_SM_NOTIFICATION:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
-		break;
-
-	default:
-		bfa_sm_fault(ln->fcport->bfa, event);
-	}
-}
-
-/**
- * Link state is waiting for up notification and there is a pending down
- */
-static void
-bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
-		enum bfa_fcport_ln_sm_event event)
-{
-	bfa_trc(ln->fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_LN_SM_LINKUP:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
-		break;
-
-	case BFA_FCPORT_LN_SM_NOTIFICATION:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
-		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
-		break;
-
-	default:
-		bfa_sm_fault(ln->fcport->bfa, event);
-	}
-}
-
-/**
- * Link state is waiting for up notification and there are pending down and up
- */
-static void
-bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
-			enum bfa_fcport_ln_sm_event event)
-{
-	bfa_trc(ln->fcport->bfa, event);
-
-	switch (event) {
-	case BFA_FCPORT_LN_SM_LINKDOWN:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
-		break;
-
-	case BFA_FCPORT_LN_SM_NOTIFICATION:
-		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
-		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
-		break;
-
-	default:
-		bfa_sm_fault(ln->fcport->bfa, event);
-	}
-}
-
-/**
- *  bfa_pport_private
- */
-
-static void
-__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_fcport_ln_s *ln = cbarg;
-
-	if (complete)
-		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
-	else
-		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
-}
-
-static void
-bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
-{
-	if (fcport->bfa->fcs) {
-		fcport->event_cbfn(fcport->event_cbarg, event);
-		return;
-	}
-
-	switch (event) {
-	case BFA_PPORT_LINKUP:
-		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
-		break;
-	case BFA_PPORT_LINKDOWN:
-		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
-		break;
-	default:
-		bfa_assert(0);
-	}
-}
-
-static void
-bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
-{
-	ln->ln_event = event;
-	bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
-}
-
-#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
-							BFA_CACHELINE_SZ))
-
-static void
-bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-		  u32 *dm_len)
-{
-	*dm_len += FCPORT_STATS_DMA_SZ;
-}
-
-static void
-bfa_fcport_qresume(void *cbarg)
-{
-	struct bfa_fcport_s *fcport = cbarg;
-
-	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
-}
-
-static void
-bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
-{
-	u8        *dm_kva;
-	u64        dm_pa;
-
-	dm_kva = bfa_meminfo_dma_virt(meminfo);
-	dm_pa = bfa_meminfo_dma_phys(meminfo);
-
-	fcport->stats_kva = dm_kva;
-	fcport->stats_pa = dm_pa;
-	fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
-
-	dm_kva += FCPORT_STATS_DMA_SZ;
-	dm_pa += FCPORT_STATS_DMA_SZ;
-
-	bfa_meminfo_dma_virt(meminfo) = dm_kva;
-	bfa_meminfo_dma_phys(meminfo) = dm_pa;
-}
-
-/**
- * Memory initialization.
- */
-static void
-bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
-	struct bfa_fcport_ln_s *ln = &fcport->ln;
-	struct bfa_timeval_s tv;
-
-	bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
-	fcport->bfa = bfa;
-	ln->fcport = fcport;
-
-	bfa_fcport_mem_claim(fcport, meminfo);
-
-	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
-	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
-
-	/**
-	 * initialize time stamp for stats reset
-	 */
-	bfa_os_gettimeofday(&tv);
-	fcport->stats_reset_time = tv.tv_sec;
-
-	/**
-	 * initialize and set default configuration
-	 */
-	port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
-	port_cfg->speed = BFA_PPORT_SPEED_AUTO;
-	port_cfg->trunked = BFA_FALSE;
-	port_cfg->maxfrsize = 0;
-
-	port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
-
-	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
-}
-
-static void
-bfa_fcport_detach(struct bfa_s *bfa)
-{
-}
-
-/**
- * Called when IOC is ready.
- */
-static void
-bfa_fcport_start(struct bfa_s *bfa)
-{
-	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
-}
-
-/**
- * Called before IOC is stopped.
- */
-static void
-bfa_fcport_stop(struct bfa_s *bfa)
-{
-	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
-}
-
-/**
- * Called when IOC failure is detected.
- */
-static void
-bfa_fcport_iocdisable(struct bfa_s *bfa)
-{
-	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
-}
-
-static void
-bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
-{
-	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
-
-	fcport->speed = pevent->link_state.speed;
-	fcport->topology = pevent->link_state.topology;
-
-	if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
-		fcport->myalpa = 0;
-
-	/*
-	 * QoS Details
-	 */
-	bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
-	bfa_os_assign(fcport->qos_vc_attr,
-		pevent->link_state.vc_fcf.qos_vc_attr);
-
-
-	bfa_trc(fcport->bfa, fcport->speed);
-	bfa_trc(fcport->bfa, fcport->topology);
-}
-
-static void
-bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
-{
-	fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
-	fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
-}
-
-/**
- * Send port enable message to firmware.
- */
-static          bfa_boolean_t
-bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
-{
-	struct bfi_fcport_enable_req_s *m;
-
-	/**
-	 * Increment message tag before queue check, so that responses to old
-	 * requests are discarded.
-	 */
-	fcport->msgtag++;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
-	if (!m) {
-		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
-							&fcport->reqq_wait);
-		return BFA_FALSE;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
-				bfa_lpuid(fcport->bfa));
-	m->nwwn = fcport->nwwn;
-	m->pwwn = fcport->pwwn;
-	m->port_cfg = fcport->cfg;
-	m->msgtag = fcport->msgtag;
-	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
-	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
-	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
-	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
-	return BFA_TRUE;
-}
-
-/**
- * Send port disable message to firmware.
- */
-static          bfa_boolean_t
-bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
-{
-	struct bfi_fcport_req_s *m;
-
-	/**
-	 * Increment message tag before queue check, so that responses to old
-	 * requests are discarded.
-	 */
-	fcport->msgtag++;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
-	if (!m) {
-		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
-							&fcport->reqq_wait);
-		return BFA_FALSE;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
-			bfa_lpuid(fcport->bfa));
-	m->msgtag = fcport->msgtag;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
-
-	return BFA_TRUE;
-}
-
-static void
-bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
-{
-	fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
-	fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
-
-	bfa_trc(fcport->bfa, fcport->pwwn);
-	bfa_trc(fcport->bfa, fcport->nwwn);
-}
-
-static void
-bfa_fcport_send_txcredit(void *port_cbarg)
-{
-
-	struct bfa_fcport_s *fcport = port_cbarg;
-	struct bfi_fcport_set_svc_params_req_s *m;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
-	if (!m) {
-		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
-		return;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
-			bfa_lpuid(fcport->bfa));
-	m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
-}
-
-static void
-bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
-	struct bfa_qos_stats_s *s)
-{
-	u32     *dip = (u32 *) d;
-	u32     *sip = (u32 *) s;
-	int             i;
-
-	/* Now swap the 32 bit fields */
-	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
-		dip[i] = bfa_os_ntohl(sip[i]);
-}
-
-static void
-bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
-	struct bfa_fcoe_stats_s *s)
-{
-	u32     *dip = (u32 *) d;
-	u32     *sip = (u32 *) s;
-	int             i;
-
-	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
-		i = i + 2) {
-#ifdef __BIGENDIAN
-		dip[i] = bfa_os_ntohl(sip[i]);
-		dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
-#else
-		dip[i] = bfa_os_ntohl(sip[i + 1]);
-		dip[i + 1] = bfa_os_ntohl(sip[i]);
-#endif
-	}
-}
-
-static void
-__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_fcport_s *fcport = cbarg;
-
-	if (complete) {
-		if (fcport->stats_status == BFA_STATUS_OK) {
-			struct bfa_timeval_s tv;
-
-			/* Swap FC QoS or FCoE stats */
-			if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
-				bfa_fcport_qos_stats_swap(
-					&fcport->stats_ret->fcqos,
-					&fcport->stats->fcqos);
-			} else {
-				bfa_fcport_fcoe_stats_swap(
-					&fcport->stats_ret->fcoe,
-					&fcport->stats->fcoe);
-
-				bfa_os_gettimeofday(&tv);
-				fcport->stats_ret->fcoe.secs_reset =
-					tv.tv_sec - fcport->stats_reset_time;
-			}
-		}
-		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
-	} else {
-		fcport->stats_busy = BFA_FALSE;
-		fcport->stats_status = BFA_STATUS_OK;
-	}
-}
-
-static void
-bfa_fcport_stats_get_timeout(void *cbarg)
-{
-	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
-
-	bfa_trc(fcport->bfa, fcport->stats_qfull);
-
-	if (fcport->stats_qfull) {
-		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
-		fcport->stats_qfull = BFA_FALSE;
-	}
-
-	fcport->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
-		fcport);
-}
-
-static void
-bfa_fcport_send_stats_get(void *cbarg)
-{
-	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
-	struct bfi_fcport_req_s *msg;
-
-	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
-
-	if (!msg) {
-		fcport->stats_qfull = BFA_TRUE;
-		bfa_reqq_winit(&fcport->stats_reqq_wait,
-				bfa_fcport_send_stats_get, fcport);
-		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
-				&fcport->stats_reqq_wait);
-		return;
-	}
-	fcport->stats_qfull = BFA_FALSE;
-
-	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
-	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
-		bfa_lpuid(fcport->bfa));
-	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
-}
-
-static void
-__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_fcport_s *fcport = cbarg;
-
-	if (complete) {
-		struct bfa_timeval_s tv;
-
-		/**
-		 * re-initialize time stamp for stats reset
-		 */
-		bfa_os_gettimeofday(&tv);
-		fcport->stats_reset_time = tv.tv_sec;
-
-		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
-	} else {
-		fcport->stats_busy = BFA_FALSE;
-		fcport->stats_status = BFA_STATUS_OK;
-	}
-}
-
-static void
-bfa_fcport_stats_clr_timeout(void *cbarg)
-{
-	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
-
-	bfa_trc(fcport->bfa, fcport->stats_qfull);
-
-	if (fcport->stats_qfull) {
-		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
-		fcport->stats_qfull = BFA_FALSE;
-	}
-
-	fcport->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-			__bfa_cb_fcport_stats_clr, fcport);
-}
-
-static void
-bfa_fcport_send_stats_clear(void *cbarg)
-{
-	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
-	struct bfi_fcport_req_s *msg;
-
-	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
-
-	if (!msg) {
-		fcport->stats_qfull = BFA_TRUE;
-		bfa_reqq_winit(&fcport->stats_reqq_wait,
-				bfa_fcport_send_stats_clear, fcport);
-		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
-				&fcport->stats_reqq_wait);
-		return;
-	}
-	fcport->stats_qfull = BFA_FALSE;
-
-	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
-	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
-			bfa_lpuid(fcport->bfa));
-	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
-}
-
-/**
- *  bfa_pport_public
- */
-
-/**
- * Called to initialize port attributes
- */
-void
-bfa_fcport_init(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	/**
-	 * Initialize port attributes from IOC hardware data.
-	 */
-	bfa_fcport_set_wwns(fcport);
-	if (fcport->cfg.maxfrsize == 0)
-		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
-	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
-	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
-
-	bfa_assert(fcport->cfg.maxfrsize);
-	bfa_assert(fcport->cfg.rx_bbcredit);
-	bfa_assert(fcport->speed_sup);
-}
-
-
-/**
- * Firmware message handler.
- */
-void
-bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	union bfi_fcport_i2h_msg_u i2hmsg;
-
-	i2hmsg.msg = msg;
-	fcport->event_arg.i2hmsg = i2hmsg;
-
-	switch (msg->mhdr.msg_id) {
-	case BFI_FCPORT_I2H_ENABLE_RSP:
-		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
-			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
-		break;
-
-	case BFI_FCPORT_I2H_DISABLE_RSP:
-		if (fcport->msgtag == i2hmsg.pdisable_rsp->msgtag)
-			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
-		break;
-
-	case BFI_FCPORT_I2H_EVENT:
-		switch (i2hmsg.event->link_state.linkstate) {
-		case BFA_PPORT_LINKUP:
-			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
-			break;
-		case BFA_PPORT_LINKDOWN:
-			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
-			break;
-		case BFA_PPORT_TRUNK_LINKDOWN:
-			/** todo: event notification */
-			break;
-		}
-		break;
-
-	case BFI_FCPORT_I2H_STATS_GET_RSP:
-		/*
-		 * check for timer pop before processing the rsp
-		 */
-		if (fcport->stats_busy == BFA_FALSE ||
-			fcport->stats_status == BFA_STATUS_ETIMER)
-			break;
-
-		bfa_timer_stop(&fcport->timer);
-		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
-		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-				__bfa_cb_fcport_stats_get, fcport);
-		break;
-
-	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
-		/*
-		 * check for timer pop before processing the rsp
-		 */
-		if (fcport->stats_busy == BFA_FALSE ||
-			fcport->stats_status == BFA_STATUS_ETIMER)
-			break;
-
-		bfa_timer_stop(&fcport->timer);
-		fcport->stats_status = BFA_STATUS_OK;
-		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
-				__bfa_cb_fcport_stats_clr, fcport);
-		break;
-
-	default:
-		bfa_assert(0);
-	break;
-	}
-}
-
-/**
- *  bfa_pport_api
- */
-
-/**
- * Registered callback for port events.
- */
-void
-bfa_fcport_event_register(struct bfa_s *bfa,
-			 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
-			 void *cbarg)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	fcport->event_cbfn = cbfn;
-	fcport->event_cbarg = cbarg;
-}
-
-bfa_status_t
-bfa_fcport_enable(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	/* if port is PBC disabled, return error */
-	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
-		bfa_trc(bfa, fcport->pwwn);
-		return BFA_STATUS_PBC;
-	}
-
-	if (bfa_ioc_is_disabled(&bfa->ioc))
-		return BFA_STATUS_IOC_DISABLED;
-
-	if (fcport->diag_busy)
-		return BFA_STATUS_DIAG_BUSY;
-	else if (bfa_sm_cmp_state
-		 (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
-		return BFA_STATUS_DEVBUSY;
-
-	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_fcport_disable(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	/* if port is PBC disabled, return error */
-	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
-		bfa_trc(bfa, fcport->pwwn);
-		return BFA_STATUS_PBC;
-	}
-
-	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
-	return BFA_STATUS_OK;
-}
-
-/**
- * Configure port speed.
- */
-bfa_status_t
-bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, speed);
-
-	if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
-		bfa_trc(bfa, fcport->speed_sup);
-		return BFA_STATUS_UNSUPP_SPEED;
-	}
-
-	fcport->cfg.speed = speed;
-
-	return BFA_STATUS_OK;
-}
-
-/**
- * Get current speed.
- */
-enum bfa_pport_speed
-bfa_fcport_get_speed(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->speed;
-}
-
-/**
- * Configure port topology.
- */
-bfa_status_t
-bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, topology);
-	bfa_trc(bfa, fcport->cfg.topology);
-
-	switch (topology) {
-	case BFA_PPORT_TOPOLOGY_P2P:
-	case BFA_PPORT_TOPOLOGY_LOOP:
-	case BFA_PPORT_TOPOLOGY_AUTO:
-		break;
-
-	default:
-		return BFA_STATUS_EINVAL;
-	}
-
-	fcport->cfg.topology = topology;
-	return BFA_STATUS_OK;
-}
-
-/**
- * Get current topology.
- */
-enum bfa_pport_topology
-bfa_fcport_get_topology(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->topology;
-}
-
-bfa_status_t
-bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, alpa);
-	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
-	bfa_trc(bfa, fcport->cfg.hardalpa);
-
-	fcport->cfg.cfg_hardalpa = BFA_TRUE;
-	fcport->cfg.hardalpa = alpa;
-
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
-	bfa_trc(bfa, fcport->cfg.hardalpa);
-
-	fcport->cfg.cfg_hardalpa = BFA_FALSE;
-	return BFA_STATUS_OK;
-}
-
-bfa_boolean_t
-bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	*alpa = fcport->cfg.hardalpa;
-	return fcport->cfg.cfg_hardalpa;
-}
-
-u8
-bfa_fcport_get_myalpa(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->myalpa;
-}
-
-bfa_status_t
-bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, maxfrsize);
-	bfa_trc(bfa, fcport->cfg.maxfrsize);
-
-	/*
-	 * with in range
-	 */
-	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
-		return BFA_STATUS_INVLD_DFSZ;
-
-	/*
-	 * power of 2, if not the max frame size of 2112
-	 */
-	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
-		return BFA_STATUS_INVLD_DFSZ;
-
-	fcport->cfg.maxfrsize = maxfrsize;
-	return BFA_STATUS_OK;
-}
-
-u16
-bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->cfg.maxfrsize;
-}
-
-u32
-bfa_fcport_mypid(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->mypid;
-}
-
-u8
-bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->cfg.rx_bbcredit;
-}
-
-void
-bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
-	bfa_fcport_send_txcredit(fcport);
-}
-
-/**
- * Get port attributes.
- */
-
-wwn_t
-bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	if (node)
-		return fcport->nwwn;
-	else
-		return fcport->pwwn;
-}
-
-void
-bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
-
-	attr->nwwn = fcport->nwwn;
-	attr->pwwn = fcport->pwwn;
-
-	attr->factorypwwn =  bfa_ioc_get_mfg_pwwn(&bfa->ioc);
-	attr->factorynwwn =  bfa_ioc_get_mfg_nwwn(&bfa->ioc);
-
-	bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
-		      sizeof(struct bfa_pport_cfg_s));
-	/*
-	 * speed attributes
-	 */
-	attr->pport_cfg.speed = fcport->cfg.speed;
-	attr->speed_supported = fcport->speed_sup;
-	attr->speed = fcport->speed;
-	attr->cos_supported = FC_CLASS_3;
-
-	/*
-	 * topology attributes
-	 */
-	attr->pport_cfg.topology = fcport->cfg.topology;
-	attr->topology = fcport->topology;
-
-	/*
-	 * beacon attributes
-	 */
-	attr->beacon = fcport->beacon;
-	attr->link_e2e_beacon = fcport->link_e2e_beacon;
-	attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
-
-	attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
-	attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
-
-	/* PBC Disabled State */
-	if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED)
-		attr->port_state = BFA_PPORT_ST_PREBOOT_DISABLED;
-	else {
-		attr->port_state = bfa_sm_to_state(
-				hal_pport_sm_table, fcport->sm);
-		if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
-			attr->port_state = BFA_PPORT_ST_IOCDIS;
-		else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
-			attr->port_state = BFA_PPORT_ST_FWMISMATCH;
-	}
-}
-
-#define BFA_FCPORT_STATS_TOV	1000
-
-/**
- * Fetch port attributes (FCQoS or FCoE).
- */
-bfa_status_t
-bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
-		    bfa_cb_pport_t cbfn, void *cbarg)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	if (fcport->stats_busy) {
-		bfa_trc(bfa, fcport->stats_busy);
-		return BFA_STATUS_DEVBUSY;
-	}
-
-	fcport->stats_busy  = BFA_TRUE;
-	fcport->stats_ret   = stats;
-	fcport->stats_cbfn  = cbfn;
-	fcport->stats_cbarg = cbarg;
-
-	bfa_fcport_send_stats_get(fcport);
-
-	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
-		fcport, BFA_FCPORT_STATS_TOV);
-	return BFA_STATUS_OK;
-}
-
-/**
- * Reset port statistics (FCQoS or FCoE).
- */
-bfa_status_t
-bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	if (fcport->stats_busy) {
-		bfa_trc(bfa, fcport->stats_busy);
-		return BFA_STATUS_DEVBUSY;
-	}
-
-	fcport->stats_busy = BFA_TRUE;
-	fcport->stats_cbfn = cbfn;
-	fcport->stats_cbarg = cbarg;
-
-	bfa_fcport_send_stats_clear(fcport);
-
-	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
-			fcport, BFA_FCPORT_STATS_TOV);
-	return BFA_STATUS_OK;
-}
-
-/**
- * Fetch FCQoS port statistics
- */
-bfa_status_t
-bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
-	bfa_cb_pport_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FC mode */
-	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
-}
-
-/**
- * Reset FCoE port statistics
- */
-bfa_status_t
-bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FC mode */
-	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
-}
-
-/**
- * Fetch FCQoS port statistics
- */
-bfa_status_t
-bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
-	bfa_cb_pport_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FCoE mode */
-	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
-}
-
-/**
- * Reset FCoE port statistics
- */
-bfa_status_t
-bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
-{
-	/* Meaningful only for FCoE mode */
-	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
-
-	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
-}
-
-bfa_status_t
-bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, bitmap);
-	bfa_trc(bfa, fcport->cfg.trunked);
-	bfa_trc(bfa, fcport->cfg.trunk_ports);
-
-	if (!bitmap || (bitmap & (bitmap - 1)))
-		return BFA_STATUS_EINVAL;
-
-	fcport->cfg.trunked = BFA_TRUE;
-	fcport->cfg.trunk_ports = bitmap;
-
-	return BFA_STATUS_OK;
-}
-
-void
-bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
-	qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
-}
-
-void
-bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
-			  struct bfa_qos_vc_attr_s *qos_vc_attr)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
-	u32        i = 0;
-
-	qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
-	qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
-	qos_vc_attr->elp_opmode_flags =
-		bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
-
-	/*
-	 * Individual VC info
-	 */
-	while (i < qos_vc_attr->total_vc_count) {
-		qos_vc_attr->vc_info[i].vc_credit =
-			bfa_vc_attr->vc_info[i].vc_credit;
-		qos_vc_attr->vc_info[i].borrow_credit =
-			bfa_vc_attr->vc_info[i].borrow_credit;
-		qos_vc_attr->vc_info[i].priority =
-			bfa_vc_attr->vc_info[i].priority;
-		++i;
-	}
-}
-
-/**
- * Fetch port attributes.
- */
-bfa_status_t
-bfa_fcport_trunk_disable(struct bfa_s *bfa)
-{
-	return BFA_STATUS_OK;
-}
-
-bfa_boolean_t
-bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	*bitmap = fcport->cfg.trunk_ports;
-	return fcport->cfg.trunked;
-}
-
-bfa_boolean_t
-bfa_fcport_is_disabled(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
-		BFA_PPORT_ST_DISABLED;
-
-}
-
-bfa_boolean_t
-bfa_fcport_is_ratelim(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
-
-}
-
-void
-bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
-
-	bfa_trc(bfa, on_off);
-	bfa_trc(bfa, fcport->cfg.qos_enabled);
-
-	bfa_trc(bfa, ioc_type);
-
-	if (ioc_type == BFA_IOC_TYPE_FC) {
-		fcport->cfg.qos_enabled = on_off;
-		/**
-		 * Notify fcpim of the change in QoS state
-		 */
-		bfa_fcpim_update_ioredirect(bfa);
-	}
-}
-
-void
-bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, on_off);
-	bfa_trc(bfa, fcport->cfg.ratelimit);
-
-	fcport->cfg.ratelimit = on_off;
-	if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
-		fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
-}
-
-/**
- * Configure default minimum ratelim speed
- */
-bfa_status_t
-bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, speed);
-
-	/*
-	 * Auto and speeds greater than the supported speed, are invalid
-	 */
-	if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
-		bfa_trc(bfa, fcport->speed_sup);
-		return BFA_STATUS_UNSUPP_SPEED;
-	}
-
-	fcport->cfg.trl_def_speed = speed;
-
-	return BFA_STATUS_OK;
-}
-
-/**
- * Get default minimum ratelim speed
- */
-enum bfa_pport_speed
-bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, fcport->cfg.trl_def_speed);
-	return fcport->cfg.trl_def_speed;
-
-}
-
-void
-bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, status);
-	bfa_trc(bfa, fcport->diag_busy);
-
-	fcport->diag_busy = status;
-}
-
-void
-bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
-		 bfa_boolean_t link_e2e_beacon)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	bfa_trc(bfa, beacon);
-	bfa_trc(bfa, link_e2e_beacon);
-	bfa_trc(bfa, fcport->beacon);
-	bfa_trc(bfa, fcport->link_e2e_beacon);
-
-	fcport->beacon = beacon;
-	fcport->link_e2e_beacon = link_e2e_beacon;
-}
-
-bfa_boolean_t
-bfa_fcport_is_linkup(struct bfa_s *bfa)
-{
-	return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
-}
-
-bfa_boolean_t
-bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
-{
-	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
-
-	return fcport->cfg.qos_enabled;
-}

+ 1565 - 46
drivers/scsi/bfa/bfa_fcs.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -19,35 +19,28 @@
  *  bfa_fcs.c BFA FCS main
  *  bfa_fcs.c BFA FCS main
  */
  */
 
 
-#include <fcs/bfa_fcs.h>
-#include "fcs_port.h"
-#include "fcs_uf.h"
-#include "fcs_vport.h"
-#include "fcs_rport.h"
-#include "fcs_fabric.h"
-#include "fcs_fcpim.h"
-#include "fcs_fcptm.h"
-#include "fcbuild.h"
-#include "fcs.h"
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
 #include "bfad_drv.h"
 #include "bfad_drv.h"
-#include <fcb/bfa_fcb.h>
+
+BFA_TRC_FILE(FCS, FCS);
 
 
 /**
 /**
  * FCS sub-modules
  * FCS sub-modules
  */
  */
 struct bfa_fcs_mod_s {
 struct bfa_fcs_mod_s {
 	void		(*attach) (struct bfa_fcs_s *fcs);
 	void		(*attach) (struct bfa_fcs_s *fcs);
-	void            (*modinit) (struct bfa_fcs_s *fcs);
-	void            (*modexit) (struct bfa_fcs_s *fcs);
+	void		(*modinit) (struct bfa_fcs_s *fcs);
+	void		(*modexit) (struct bfa_fcs_s *fcs);
 };
 };
 
 
 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
 
 
 static struct bfa_fcs_mod_s fcs_modules[] = {
 static struct bfa_fcs_mod_s fcs_modules[] = {
-	{ bfa_fcs_pport_attach, NULL, NULL },
+	{ bfa_fcs_port_attach, NULL, NULL },
 	{ bfa_fcs_uf_attach, NULL, NULL },
 	{ bfa_fcs_uf_attach, NULL, NULL },
 	{ bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
 	{ bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
-	 bfa_fcs_fabric_modexit },
+	  bfa_fcs_fabric_modexit },
 };
 };
 
 
 /**
 /**
@@ -57,8 +50,8 @@ static struct bfa_fcs_mod_s fcs_modules[] = {
 static void
 static void
 bfa_fcs_exit_comp(void *fcs_cbarg)
 bfa_fcs_exit_comp(void *fcs_cbarg)
 {
 {
-	struct bfa_fcs_s *fcs = fcs_cbarg;
-	struct bfad_s *bfad = fcs->bfad;
+	struct bfa_fcs_s      *fcs = fcs_cbarg;
+	struct bfad_s         *bfad = fcs->bfad;
 
 
 	complete(&bfad->comp);
 	complete(&bfad->comp);
 }
 }
@@ -74,9 +67,9 @@ bfa_fcs_exit_comp(void *fcs_cbarg)
  */
  */
 void
 void
 bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
-			bfa_boolean_t min_cfg)
+	       bfa_boolean_t min_cfg)
 {
 {
-	int             i;
+	int		i;
 	struct bfa_fcs_mod_s  *mod;
 	struct bfa_fcs_mod_s  *mod;
 
 
 	fcs->bfa = bfa;
 	fcs->bfa = bfa;
@@ -86,7 +79,7 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 	bfa_attach_fcs(bfa);
 	bfa_attach_fcs(bfa);
 	fcbuild_init();
 	fcbuild_init();
 
 
-	for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
+	for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
 		mod = &fcs_modules[i];
 		mod = &fcs_modules[i];
 		if (mod->attach)
 		if (mod->attach)
 			mod->attach(fcs);
 			mod->attach(fcs);
@@ -99,11 +92,11 @@ bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 void
 void
 bfa_fcs_init(struct bfa_fcs_s *fcs)
 bfa_fcs_init(struct bfa_fcs_s *fcs)
 {
 {
-	int i, npbc_vports;
+	int		i, npbc_vports;
 	struct bfa_fcs_mod_s  *mod;
 	struct bfa_fcs_mod_s  *mod;
 	struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
 	struct bfi_pbc_vport_s pbc_vports[BFI_PBC_MAX_VPORTS];
 
 
-	for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
+	for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
 		mod = &fcs_modules[i];
 		mod = &fcs_modules[i];
 		if (mod->modinit)
 		if (mod->modinit)
 			mod->modinit(fcs);
 			mod->modinit(fcs);
@@ -111,7 +104,7 @@ bfa_fcs_init(struct bfa_fcs_s *fcs)
 	/* Initialize pbc vports */
 	/* Initialize pbc vports */
 	if (!fcs->min_cfg) {
 	if (!fcs->min_cfg) {
 		npbc_vports =
 		npbc_vports =
-			bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
+		    bfa_iocfc_get_pbc_vports(fcs->bfa, pbc_vports);
 		for (i = 0; i < npbc_vports; i++)
 		for (i = 0; i < npbc_vports; i++)
 			bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
 			bfa_fcb_pbc_vport_create(fcs->bfa->bfad, pbc_vports[i]);
 	}
 	}
@@ -127,12 +120,13 @@ bfa_fcs_start(struct bfa_fcs_s *fcs)
 }
 }
 
 
 /**
 /**
- * 		FCS driver details initialization.
+ *	brief
+ *		FCS driver details initialization.
  *
  *
- * 	param[in]		fcs		FCS instance
- * 	param[in]		driver_info	Driver Details
+ *	param[in]		fcs		FCS instance
+ *	param[in]		driver_info	Driver Details
  *
  *
- * 	return None
+ *	return None
  */
  */
 void
 void
 bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
@@ -145,13 +139,13 @@ bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 }
 }
 
 
 /**
 /**
- *      @brief
- *              FCS FDMI Driver Parameter Initialization
+ *	brief
+ *		FCS FDMI Driver Parameter Initialization
  *
  *
- *      @param[in]              fcs             FCS instance
- *      @param[in]              fdmi_enable     TRUE/FALSE
+ *	param[in]		fcs		FCS instance
+ *	param[in]		fdmi_enable	TRUE/FALSE
  *
  *
- *      @return None
+ *	return None
  */
  */
 void
 void
 bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
 bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
@@ -160,22 +154,24 @@ bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
 	fcs->fdmi_enabled = fdmi_enable;
 	fcs->fdmi_enabled = fdmi_enable;
 
 
 }
 }
-
 /**
 /**
- * 		FCS instance cleanup and exit.
+ *	brief
+ *		FCS instance cleanup and exit.
  *
  *
- * 	param[in]		fcs			FCS instance
- * 	return None
+ *	param[in]		fcs			FCS instance
+ *	return None
  */
  */
 void
 void
 bfa_fcs_exit(struct bfa_fcs_s *fcs)
 bfa_fcs_exit(struct bfa_fcs_s *fcs)
 {
 {
 	struct bfa_fcs_mod_s  *mod;
 	struct bfa_fcs_mod_s  *mod;
-	int i;
+	int		nmods, i;
 
 
 	bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
 	bfa_wc_init(&fcs->wc, bfa_fcs_exit_comp, fcs);
 
 
-	for (i = 0; i < ARRAY_SIZE(fcs_modules); i++) {
+	nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
+
+	for (i = 0; i < nmods; i++) {
 
 
 		mod = &fcs_modules[i];
 		mod = &fcs_modules[i];
 		if (mod->modexit) {
 		if (mod->modexit) {
@@ -194,24 +190,1547 @@ bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod)
 	fcs->trcmod = trcmod;
 	fcs->trcmod = trcmod;
 }
 }
 
 
-
 void
 void
-bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod)
+bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
+{
+	bfa_wc_down(&fcs->wc);
+}
+
+/**
+ * Fabric module implementation.
+ */
+
+#define BFA_FCS_FABRIC_RETRY_DELAY	(2000)	/* Milliseconds */
+#define BFA_FCS_FABRIC_CLEANUP_DELAY	(10000)	/* Milliseconds */
+
+#define bfa_fcs_fabric_set_opertype(__fabric) do {			\
+		if (bfa_fcport_get_topology((__fabric)->fcs->bfa)	\
+		    == BFA_PORT_TOPOLOGY_P2P)				\
+			(__fabric)->oper_type = BFA_PORT_TYPE_NPORT;	\
+		else							\
+			(__fabric)->oper_type = BFA_PORT_TYPE_NLPORT;	\
+} while (0)
+
+/*
+ * forward declarations
+ */
+static void bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_delay(void *cbarg);
+static void bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_delete_comp(void *cbarg);
+static void bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric,
+				      struct fchs_s *fchs, u16 len);
+static void bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
+					 struct fchs_s *fchs, u16 len);
+static void bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric);
+static void bfa_fcs_fabric_flogiacc_comp(void *fcsarg,
+					 struct bfa_fcxp_s *fcxp, void *cbarg,
+					 bfa_status_t status,
+					 u32 rsp_len,
+					 u32 resid_len,
+					 struct fchs_s *rspfchs);
+/**
+ *  fcs_fabric_sm fabric state machine functions
+ */
+
+/**
+ * Fabric state machine events
+ */
+enum bfa_fcs_fabric_event {
+	BFA_FCS_FABRIC_SM_CREATE	= 1,	/*  create from driver	      */
+	BFA_FCS_FABRIC_SM_DELETE	= 2,	/*  delete from driver	      */
+	BFA_FCS_FABRIC_SM_LINK_DOWN	= 3,	/*  link down from port      */
+	BFA_FCS_FABRIC_SM_LINK_UP	= 4,	/*  link up from port	      */
+	BFA_FCS_FABRIC_SM_CONT_OP	= 5,	/*  flogi/auth continue op   */
+	BFA_FCS_FABRIC_SM_RETRY_OP	= 6,	/*  flogi/auth retry op      */
+	BFA_FCS_FABRIC_SM_NO_FABRIC	= 7,	/*  from flogi/auth	      */
+	BFA_FCS_FABRIC_SM_PERF_EVFP	= 8,	/*  from flogi/auth	      */
+	BFA_FCS_FABRIC_SM_ISOLATE	= 9,	/*  from EVFP processing     */
+	BFA_FCS_FABRIC_SM_NO_TAGGING	= 10,	/*  no VFT tagging from EVFP */
+	BFA_FCS_FABRIC_SM_DELAYED	= 11,	/*  timeout delay event      */
+	BFA_FCS_FABRIC_SM_AUTH_FAILED	= 12,	/*  auth failed	      */
+	BFA_FCS_FABRIC_SM_AUTH_SUCCESS	= 13,	/*  auth successful	      */
+	BFA_FCS_FABRIC_SM_DELCOMP	= 14,	/*  all vports deleted event */
+	BFA_FCS_FABRIC_SM_LOOPBACK	= 15,	/*  Received our own FLOGI   */
+	BFA_FCS_FABRIC_SM_START		= 16,	/*  from driver	      */
+};
+
+static void	bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
+					 enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
+					  enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
+					enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
+					      enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
+				       enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+					      enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+					 enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
+				       enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
+					    enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+static void	bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
+					   enum bfa_fcs_fabric_event event);
+/**
+ *   Beginning state before fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_uninit(struct bfa_fcs_fabric_s *fabric,
+			 enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_CREATE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
+		bfa_fcs_fabric_init(fabric);
+		bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   Beginning state before fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_created(struct bfa_fcs_fabric_s *fabric,
+			  enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_START:
+		if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+			bfa_fcs_fabric_login(fabric);
+		} else
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+		bfa_fcs_modexit_comp(fabric->fcs);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   Link is down, awaiting LINK UP event from port. This is also the
+ *   first state at fabric creation.
+ */
+static void
+bfa_fcs_fabric_sm_linkdown(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+		bfa_fcs_fabric_login(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_RETRY_OP:
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   FLOGI is in progress, awaiting FLOGI reply.
+ */
+static void
+bfa_fcs_fabric_sm_flogi(struct bfa_fcs_fabric_s *fabric,
+			enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_CONT_OP:
+
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+					   fabric->bb_credit);
+		fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
+
+		if (fabric->auth_reqd && fabric->is_auth) {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth);
+			bfa_trc(fabric->fcs, event);
+		} else {
+			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+			bfa_fcs_fabric_notify_online(fabric);
+		}
+		break;
+
+	case BFA_FCS_FABRIC_SM_RETRY_OP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi_retry);
+		bfa_timer_start(fabric->fcs->bfa, &fabric->delay_timer,
+				bfa_fcs_fabric_delay, fabric,
+				BFA_FCS_FABRIC_RETRY_DELAY);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LOOPBACK:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_loopback);
+		bfa_lps_discard(fabric->lps);
+		bfa_fcs_fabric_set_opertype(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_NO_FABRIC:
+		fabric->fab_type = BFA_FCS_FABRIC_N2N;
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+					   fabric->bb_credit);
+		bfa_fcs_fabric_notify_online(fabric);
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_lps_discard(fabric->lps);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_lps_discard(fabric->lps);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+
+static void
+bfa_fcs_fabric_sm_flogi_retry(struct bfa_fcs_fabric_s *fabric,
+			      enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_DELAYED:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
+		bfa_fcs_fabric_login(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_timer_stop(&fabric->delay_timer);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_timer_stop(&fabric->delay_timer);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   Authentication is in progress, awaiting authentication results.
+ */
+static void
+bfa_fcs_fabric_sm_auth(struct bfa_fcs_fabric_s *fabric,
+		       enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+		bfa_lps_discard(fabric->lps);
+		break;
+
+	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_online);
+		bfa_fcs_fabric_notify_online(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_PERF_EVFP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_lps_discard(fabric->lps);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   Authentication failed
+ */
+static void
+bfa_fcs_fabric_sm_auth_failed(struct bfa_fcs_fabric_s *fabric,
+			      enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   Port is in loopback mode.
+ */
+static void
+bfa_fcs_fabric_sm_loopback(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   There is no attached fabric - private loop or NPort-to-NPort topology.
+ */
+static void
+bfa_fcs_fabric_sm_nofabric(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_lps_discard(fabric->lps);
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_NO_FABRIC:
+		bfa_trc(fabric->fcs, fabric->bb_credit);
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa,
+					   fabric->bb_credit);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   Fabric is online - normal operating state.
+ */
+static void
+bfa_fcs_fabric_sm_online(struct bfa_fcs_fabric_s *fabric,
+			 enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_linkdown);
+		bfa_lps_discard(fabric->lps);
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_DELETE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_deleting);
+		bfa_fcs_fabric_delete(fabric);
+		break;
+
+	case BFA_FCS_FABRIC_SM_AUTH_FAILED:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+		bfa_lps_discard(fabric->lps);
+		break;
+
+	case BFA_FCS_FABRIC_SM_AUTH_SUCCESS:
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   Exchanging virtual fabric parameters.
+ */
+static void
+bfa_fcs_fabric_sm_evfp(struct bfa_fcs_fabric_s *fabric,
+		       enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_CONT_OP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_evfp_done);
+		break;
+
+	case BFA_FCS_FABRIC_SM_ISOLATE:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_isolated);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
+}
+
+/**
+ *   EVFP exchange complete and VFT tagging is enabled.
+ */
+static void
+bfa_fcs_fabric_sm_evfp_done(struct bfa_fcs_fabric_s *fabric,
+			    enum bfa_fcs_fabric_event event)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+}
+
+/**
+ *   Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
+ */
+static void
+bfa_fcs_fabric_sm_isolated(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
+{
+	struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
+	char	pwwn_ptr[BFA_STRING_32];
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+	wwn2str(pwwn_ptr, fabric->bport.port_cfg.pwwn);
+
+	BFA_LOG(KERN_INFO, bfad, log_level,
+		"Port is isolated due to VF_ID mismatch. "
+		"PWWN: %s Port VF_ID: %04x switch port VF_ID: %04x.",
+		pwwn_ptr, fabric->fcs->port_vfid,
+		fabric->event_arg.swp_vfid);
+}
+
+/**
+ *   Fabric is being deleted, awaiting vport delete completions.
+ */
+static void
+bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
+			   enum bfa_fcs_fabric_event event)
 {
 {
-	fcs->logm = logmod;
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, event);
+
+	switch (event) {
+	case BFA_FCS_FABRIC_SM_DELCOMP:
+		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+		bfa_fcs_modexit_comp(fabric->fcs);
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_UP:
+		break;
+
+	case BFA_FCS_FABRIC_SM_LINK_DOWN:
+		bfa_fcs_fabric_notify_offline(fabric);
+		break;
+
+	default:
+		bfa_sm_fault(fabric->fcs, event);
+	}
 }
 }
 
 
 
 
+
+/**
+ *  fcs_fabric_private fabric private functions
+ */
+
+static void
+bfa_fcs_fabric_init(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+
+	port_cfg->roles = BFA_LPORT_ROLE_FCP_IM;
+	port_cfg->nwwn = bfa_ioc_get_nwwn(&fabric->fcs->bfa->ioc);
+	port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
+}
+
+/**
+ * Port Symbolic Name Creation for base port.
+ */
 void
 void
-bfa_fcs_aen_init(struct bfa_fcs_s *fcs, struct bfa_aen_s *aen)
+bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
 {
 {
-	fcs->aen = aen;
+	struct bfa_lport_cfg_s *port_cfg = &fabric->bport.port_cfg;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
+	struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
+
+	bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
+
+	/* Model name/number */
+	strncpy((char *)&port_cfg->sym_name, model,
+		BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
+	strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+		sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+	/* Driver Version */
+	strncat((char *)&port_cfg->sym_name, (char *)driver_info->version,
+		BFA_FCS_PORT_SYMBNAME_VERSION_SZ);
+	strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+		sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+	/* Host machine name */
+	strncat((char *)&port_cfg->sym_name,
+		(char *)driver_info->host_machine_name,
+		BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ);
+	strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+		sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+	/*
+	 * Host OS Info :
+	 * If OS Patch Info is not there, do not truncate any bytes from the
+	 * OS name string and instead copy the entire OS info string (64 bytes).
+	 */
+	if (driver_info->host_os_patch[0] == '\0') {
+		strncat((char *)&port_cfg->sym_name,
+			(char *)driver_info->host_os_name,
+			BFA_FCS_OS_STR_LEN);
+		strncat((char *)&port_cfg->sym_name,
+			BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+	} else {
+		strncat((char *)&port_cfg->sym_name,
+			(char *)driver_info->host_os_name,
+			BFA_FCS_PORT_SYMBNAME_OSINFO_SZ);
+		strncat((char *)&port_cfg->sym_name,
+			BFA_FCS_PORT_SYMBNAME_SEPARATOR,
+			sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
+
+		/* Append host OS Patch Info */
+		strncat((char *)&port_cfg->sym_name,
+			(char *)driver_info->host_os_patch,
+			BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ);
+	}
+
+	/* null terminate */
+	port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
 }
 }
 
 
+/**
+ * bfa lps login completion callback
+ */
 void
 void
-bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs)
+bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status)
 {
 {
-	bfa_wc_down(&fcs->wc);
+	struct bfa_fcs_fabric_s *fabric = uarg;
+
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_trc(fabric->fcs, status);
+
+	switch (status) {
+	case BFA_STATUS_OK:
+		fabric->stats.flogi_accepts++;
+		break;
+
+	case BFA_STATUS_INVALID_MAC:
+		/* Only for CNA */
+		fabric->stats.flogi_acc_err++;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+
+		return;
+
+	case BFA_STATUS_EPROTOCOL:
+		switch (bfa_lps_get_extstatus(fabric->lps)) {
+		case BFA_EPROTO_BAD_ACCEPT:
+			fabric->stats.flogi_acc_err++;
+			break;
+
+		case BFA_EPROTO_UNKNOWN_RSP:
+			fabric->stats.flogi_unknown_rsp++;
+			break;
+
+		default:
+			break;
+		}
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+
+		return;
+
+	case BFA_STATUS_FABRIC_RJT:
+		fabric->stats.flogi_rejects++;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+		return;
+
+	default:
+		fabric->stats.flogi_rsp_err++;
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_RETRY_OP);
+		return;
+	}
+
+	fabric->bb_credit = bfa_lps_get_peer_bbcredit(fabric->lps);
+	bfa_trc(fabric->fcs, fabric->bb_credit);
+
+	if (!bfa_lps_is_brcd_fabric(fabric->lps))
+		fabric->fabric_name =  bfa_lps_get_peer_nwwn(fabric->lps);
+
+	/*
+	 * Check port type. It should be 1 = F-port.
+	 */
+	if (bfa_lps_is_fport(fabric->lps)) {
+		fabric->bport.pid = bfa_lps_get_pid(fabric->lps);
+		fabric->is_npiv = bfa_lps_is_npiv_en(fabric->lps);
+		fabric->is_auth = bfa_lps_is_authreq(fabric->lps);
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CONT_OP);
+	} else {
+		/*
+		 * Nport-2-Nport direct attached
+		 */
+		fabric->bport.port_topo.pn2n.rem_port_wwn =
+			bfa_lps_get_peer_pwwn(fabric->lps);
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
+	}
+
+	bfa_trc(fabric->fcs, fabric->bport.pid);
+	bfa_trc(fabric->fcs, fabric->is_npiv);
+	bfa_trc(fabric->fcs, fabric->is_auth);
+}
+/**
+ *		Allocate and send FLOGI.
+ */
+static void
+bfa_fcs_fabric_login(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_s		*bfa = fabric->fcs->bfa;
+	struct bfa_lport_cfg_s	*pcfg = &fabric->bport.port_cfg;
+	u8			alpa = 0;
+
+	if (bfa_fcport_get_topology(bfa) == BFA_PORT_TOPOLOGY_LOOP)
+		alpa = bfa_fcport_get_myalpa(bfa);
+
+	bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
+		      pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
+
+	fabric->stats.flogi_sent++;
+}
+
+static void
+bfa_fcs_fabric_notify_online(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe, *qen;
+
+	bfa_trc(fabric->fcs, fabric->fabric_name);
+
+	bfa_fcs_fabric_set_opertype(fabric);
+	fabric->stats.fabric_onlines++;
+
+	/**
+	 * notify online event to base and then virtual ports
+	 */
+	bfa_fcs_lport_online(&fabric->bport);
+
+	list_for_each_safe(qe, qen, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		bfa_fcs_vport_online(vport);
+	}
+}
+
+static void
+bfa_fcs_fabric_notify_offline(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe, *qen;
+
+	bfa_trc(fabric->fcs, fabric->fabric_name);
+	fabric->stats.fabric_offlines++;
+
+	/**
+	 * notify offline event first to vports and then base port.
+	 */
+	list_for_each_safe(qe, qen, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		bfa_fcs_vport_offline(vport);
+	}
+
+	bfa_fcs_lport_offline(&fabric->bport);
+
+	fabric->fabric_name = 0;
+	fabric->fabric_ip_addr[0] = 0;
+}
+
+static void
+bfa_fcs_fabric_delay(void *cbarg)
+{
+	struct bfa_fcs_fabric_s *fabric = cbarg;
+
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
+}
+
+/**
+ * Delete all vports and wait for vport delete completions.
+ */
+static void
+bfa_fcs_fabric_delete(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe, *qen;
+
+	list_for_each_safe(qe, qen, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		bfa_fcs_vport_fcs_delete(vport);
+	}
+
+	bfa_fcs_lport_delete(&fabric->bport);
+	bfa_wc_wait(&fabric->wc);
 }
 }
 
 
+static void
+bfa_fcs_fabric_delete_comp(void *cbarg)
+{
+	struct bfa_fcs_fabric_s *fabric = cbarg;
+
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
+}
 
 
+/**
+ *  fcs_fabric_public fabric public functions
+ */
+
+/**
+ * Attach time initialization.
+ */
+void
+bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
+{
+	struct bfa_fcs_fabric_s *fabric;
+
+	fabric = &fcs->fabric;
+	bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+
+	/**
+	 * Initialize base fabric.
+	 */
+	fabric->fcs = fcs;
+	INIT_LIST_HEAD(&fabric->vport_q);
+	INIT_LIST_HEAD(&fabric->vf_q);
+	fabric->lps = bfa_lps_alloc(fcs->bfa);
+	bfa_assert(fabric->lps);
+
+	/**
+	 * Initialize fabric delete completion handler. Fabric deletion is
+	 * complete when the last vport delete is complete.
+	 */
+	bfa_wc_init(&fabric->wc, bfa_fcs_fabric_delete_comp, fabric);
+	bfa_wc_up(&fabric->wc); /* For the base port */
+
+	bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
+	bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
+}
+
+void
+bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
+{
+	bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
+	bfa_trc(fcs, 0);
+}
+
+/**
+ *   Module cleanup
+ */
+void
+bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs)
+{
+	struct bfa_fcs_fabric_s *fabric;
+
+	bfa_trc(fcs, 0);
+
+	/**
+	 * Cleanup base fabric.
+	 */
+	fabric = &fcs->fabric;
+	bfa_lps_delete(fabric->lps);
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
+}
+
+/**
+ * Fabric module start -- kick starts FCS actions
+ */
+void
+bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs)
+{
+	struct bfa_fcs_fabric_s *fabric;
+
+	bfa_trc(fcs, 0);
+	fabric = &fcs->fabric;
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
+}
+
+/**
+ *   Suspend fabric activity as part of driver suspend.
+ */
+void
+bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs)
+{
+}
+
+bfa_boolean_t
+bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric)
+{
+	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
+}
+
+bfa_boolean_t
+bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
+{
+	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+}
+
+enum bfa_port_type
+bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
+{
+	return fabric->oper_type;
+}
+
+/**
+ *   Link up notification from BFA physical port module.
+ */
+void
+bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
+}
+
+/**
+ *   Link down notification from BFA physical port module.
+ */
+void
+bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric)
+{
+	bfa_trc(fabric->fcs, fabric->bport.port_cfg.pwwn);
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
+}
+
+/**
+ *   A child vport is being created in the fabric.
+ *
+ *   Call from vport module at vport creation. A list of base port and vports
+ *   belonging to a fabric is maintained to propagate link events.
+ *
+ *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
+ *   param[in] vport  - Vport being created.
+ *
+ *   @return None (always succeeds)
+ */
+void
+bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
+			struct bfa_fcs_vport_s *vport)
+{
+	/**
+	 * - add vport to fabric's vport_q
+	 */
+	bfa_trc(fabric->fcs, fabric->vf_id);
+
+	list_add_tail(&vport->qe, &fabric->vport_q);
+	fabric->num_vports++;
+	bfa_wc_up(&fabric->wc);
+}
+
+/**
+ *   A child vport is being deleted from fabric.
+ *
+ *   Vport is being deleted.
+ */
+void
+bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
+			struct bfa_fcs_vport_s *vport)
+{
+	list_del(&vport->qe);
+	fabric->num_vports--;
+	bfa_wc_down(&fabric->wc);
+}
+
+/**
+ *   Base port is deleted.
+ */
+void
+bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric)
+{
+	bfa_wc_down(&fabric->wc);
+}
+
+
+/**
+ *    Check if fabric is online.
+ *
+ *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
+ *
+ *   @return  TRUE/FALSE
+ */
+int
+bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric)
+{
+	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
+}
+
+/**
+ *	brief
+ *
+ */
+bfa_status_t
+bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf, struct bfa_fcs_s *fcs,
+		     struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
+{
+	bfa_sm_set_state(vf, bfa_fcs_fabric_sm_uninit);
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Lookup for a vport withing a fabric given its pwwn
+ */
+struct bfa_fcs_vport_s *
+bfa_fcs_fabric_vport_lookup(struct bfa_fcs_fabric_s *fabric, wwn_t pwwn)
+{
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe;
+
+	list_for_each(qe, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		if (bfa_fcs_lport_get_pwwn(&vport->lport) == pwwn)
+			return vport;
+	}
+
+	return NULL;
+}
+
+/**
+ *    In a given fabric, return the number of lports.
+ *
+ *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
+ *
+ *   @return : 1 or more.
+ */
+u16
+bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric)
+{
+	return fabric->num_vports;
+}
+
+/*
+ *  Get OUI of the attached switch.
+ *
+ *  Note : Use of this function should be avoided as much as possible.
+ *         This function should be used only if there is any requirement
+*          to check for FOS version below 6.3.
+ *         To check if the attached fabric is a brocade fabric, use
+ *         bfa_lps_is_brcd_fabric() which works for FOS versions 6.3
+ *         or above only.
+ */
+
+u16
+bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric)
+{
+	wwn_t fab_nwwn;
+	u8 *tmp;
+	u16 oui;
+
+	fab_nwwn = bfa_lps_get_peer_nwwn(fabric->lps);
+
+	tmp = (u8 *)&fab_nwwn;
+	oui = (tmp[3] << 8) | tmp[4];
+
+	return oui;
+}
+/**
+ *		Unsolicited frame receive handling.
+ */
+void
+bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
+		       u16 len)
+{
+	u32	pid = fchs->d_id;
+	struct bfa_fcs_vport_s *vport;
+	struct list_head	      *qe;
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+	struct fc_logi_s *flogi = (struct fc_logi_s *) els_cmd;
+
+	bfa_trc(fabric->fcs, len);
+	bfa_trc(fabric->fcs, pid);
+
+	/**
+	 * Look for our own FLOGI frames being looped back. This means an
+	 * external loopback cable is in place. Our own FLOGI frames are
+	 * sometimes looped back when switch port gets temporarily bypassed.
+	 */
+	if ((pid == bfa_os_ntoh3b(FC_FABRIC_PORT)) &&
+	    (els_cmd->els_code == FC_ELS_FLOGI) &&
+	    (flogi->port_name == bfa_fcs_lport_get_pwwn(&fabric->bport))) {
+		bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LOOPBACK);
+		return;
+	}
+
+	/**
+	 * FLOGI/EVFP exchanges should be consumed by base fabric.
+	 */
+	if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
+		bfa_trc(fabric->fcs, pid);
+		bfa_fcs_fabric_process_uf(fabric, fchs, len);
+		return;
+	}
+
+	if (fabric->bport.pid == pid) {
+		/**
+		 * All authentication frames should be routed to auth
+		 */
+		bfa_trc(fabric->fcs, els_cmd->els_code);
+		if (els_cmd->els_code == FC_ELS_AUTH) {
+			bfa_trc(fabric->fcs, els_cmd->els_code);
+			return;
+		}
+
+		bfa_trc(fabric->fcs, *(u8 *) ((u8 *) fchs));
+		bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+		return;
+	}
+
+	/**
+	 * look for a matching local port ID
+	 */
+	list_for_each(qe, &fabric->vport_q) {
+		vport = (struct bfa_fcs_vport_s *) qe;
+		if (vport->lport.pid == pid) {
+			bfa_fcs_lport_uf_recv(&vport->lport, fchs, len);
+			return;
+		}
+	}
+	bfa_trc(fabric->fcs, els_cmd->els_code);
+	bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
+}
+
+/**
+ *		Unsolicited frames to be processed by fabric.
+ */
+static void
+bfa_fcs_fabric_process_uf(struct bfa_fcs_fabric_s *fabric, struct fchs_s *fchs,
+			  u16 len)
+{
+	struct fc_els_cmd_s *els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
+
+	bfa_trc(fabric->fcs, els_cmd->els_code);
+
+	switch (els_cmd->els_code) {
+	case FC_ELS_FLOGI:
+		bfa_fcs_fabric_process_flogi(fabric, fchs, len);
+		break;
+
+	default:
+		/*
+		 * need to generate a LS_RJT
+		 */
+		break;
+	}
+}
+
+/**
+ *	Process	incoming FLOGI
+ */
+static void
+bfa_fcs_fabric_process_flogi(struct bfa_fcs_fabric_s *fabric,
+			struct fchs_s *fchs, u16 len)
+{
+	struct fc_logi_s *flogi = (struct fc_logi_s *) (fchs + 1);
+	struct bfa_fcs_lport_s *bport = &fabric->bport;
+
+	bfa_trc(fabric->fcs, fchs->s_id);
+
+	fabric->stats.flogi_rcvd++;
+	/*
+	 * Check port type. It should be 0 = n-port.
+	 */
+	if (flogi->csp.port_type) {
+		/*
+		 * @todo: may need to send a LS_RJT
+		 */
+		bfa_trc(fabric->fcs, flogi->port_name);
+		fabric->stats.flogi_rejected++;
+		return;
+	}
+
+	fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
+	bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
+	bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
+
+	/*
+	 * Send a Flogi Acc
+	 */
+	bfa_fcs_fabric_send_flogi_acc(fabric);
+	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_NO_FABRIC);
+}
+
+static void
+bfa_fcs_fabric_send_flogi_acc(struct bfa_fcs_fabric_s *fabric)
+{
+	struct bfa_lport_cfg_s *pcfg = &fabric->bport.port_cfg;
+	struct bfa_fcs_lport_n2n_s *n2n_port = &fabric->bport.port_topo.pn2n;
+	struct bfa_s	  *bfa = fabric->fcs->bfa;
+	struct bfa_fcxp_s *fcxp;
+	u16	reqlen;
+	struct fchs_s	fchs;
+
+	fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
+	/**
+	 * Do not expect this failure -- expect remote node to retry
+	 */
+	if (!fcxp)
+		return;
+
+	reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+				    bfa_os_hton3b(FC_FABRIC_PORT),
+				    n2n_port->reply_oxid, pcfg->pwwn,
+				    pcfg->nwwn,
+				    bfa_fcport_get_maxfrsize(bfa),
+				    bfa_fcport_get_rx_bbcredit(bfa));
+
+	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
+		      BFA_FALSE, FC_CLASS_3,
+		      reqlen, &fchs, bfa_fcs_fabric_flogiacc_comp, fabric,
+		      FC_MAX_PDUSZ, 0);
+}
+
+/**
+ *   Flogi Acc completion callback.
+ */
+static void
+bfa_fcs_fabric_flogiacc_comp(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
+			     bfa_status_t status, u32 rsp_len,
+			     u32 resid_len, struct fchs_s *rspfchs)
+{
+	struct bfa_fcs_fabric_s *fabric = cbarg;
+
+	bfa_trc(fabric->fcs, status);
+}
+
+/*
+ *
+ * @param[in] fabric - fabric
+ * @param[in] wwn_t - new fabric name
+ *
+ * @return - none
+ */
+void
+bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
+			       wwn_t fabric_name)
+{
+	struct bfad_s *bfad = (struct bfad_s *)fabric->fcs->bfad;
+	char	pwwn_ptr[BFA_STRING_32];
+	char	fwwn_ptr[BFA_STRING_32];
+
+	bfa_trc(fabric->fcs, fabric_name);
+
+	if (fabric->fabric_name == 0) {
+		/*
+		 * With BRCD switches, we don't get Fabric Name in FLOGI.
+		 * Don't generate a fabric name change event in this case.
+		 */
+		fabric->fabric_name = fabric_name;
+	} else {
+		fabric->fabric_name = fabric_name;
+		wwn2str(pwwn_ptr, bfa_fcs_lport_get_pwwn(&fabric->bport));
+		wwn2str(fwwn_ptr,
+			bfa_fcs_lport_get_fabric_name(&fabric->bport));
+		BFA_LOG(KERN_WARNING, bfad, log_level,
+			"Base port WWN = %s Fabric WWN = %s\n",
+			pwwn_ptr, fwwn_ptr);
+	}
+}
+
+/**
+ *  fcs_vf_api virtual fabrics API
+ */
+
+/**
+ * Enable VF mode.
+ *
+ * @param[in]		fcs		fcs module instance
+ * @param[in]		vf_id		default vf_id of port, FC_VF_ID_NULL
+ *					to use standard default vf_id of 1.
+ *
+ * @retval	BFA_STATUS_OK		vf mode is enabled
+ * @retval	BFA_STATUS_BUSY		Port is active. Port must be disabled
+ *					before VF mode can be enabled.
+ */
+bfa_status_t
+bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
+{
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Disable VF mode.
+ *
+ * @param[in]		fcs		fcs module instance
+ *
+ * @retval	BFA_STATUS_OK		vf mode is disabled
+ * @retval	BFA_STATUS_BUSY		VFs are present and being used. All
+ *					VFs must be deleted before disabling
+ *					VF mode.
+ */
+bfa_status_t
+bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
+{
+	return BFA_STATUS_OK;
+}
+
+/**
+ *  Create a new VF instance.
+ *
+ *  A new VF is created using the given VF configuration. A VF is identified
+ *  by VF id. No duplicate VF creation is allowed with the same VF id. Once
+ *  a VF is created, VF is automatically started after link initialization
+ *  and EVFP exchange is completed.
+ *
+ *	param[in] vf	 -	FCS vf data structure. Memory is
+ *				allocated by caller (driver)
+ *	param[in] fcs	 -	FCS module
+ *	param[in] vf_cfg -	VF configuration
+ *	param[in] vf_drv -	Opaque handle back to the driver's
+ *				virtual vf structure
+ *
+ *	retval BFA_STATUS_OK VF creation is successful
+ *	retval BFA_STATUS_FAILED VF creation failed
+ *	retval BFA_STATUS_EEXIST A VF exists with the given vf_id
+ */
+bfa_status_t
+bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
+		  struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
+{
+	bfa_trc(fcs, vf_id);
+	return BFA_STATUS_OK;
+}
+
+/**
+ *	Use this function to delete a BFA VF object. VF object should
+ *	be stopped before this function call.
+ *
+ *	param[in] vf - pointer to bfa_vf_t.
+ *
+ *	retval BFA_STATUS_OK	On vf deletion success
+ *	retval BFA_STATUS_BUSY VF is not in a stopped state
+ *	retval BFA_STATUS_INPROGRESS VF deletion in in progress
+ */
+bfa_status_t
+bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
+{
+	bfa_trc(vf->fcs, vf->vf_id);
+	return BFA_STATUS_OK;
+}
+
+
+/**
+ *	Returns attributes of the given VF.
+ *
+ *	param[in]	vf	pointer to bfa_vf_t.
+ *	param[out] vf_attr	vf attributes returned
+ *
+ *	return None
+ */
+void
+bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
+{
+	bfa_trc(vf->fcs, vf->vf_id);
+}
+
+/**
+ *	Return statistics associated with the given vf.
+ *
+ *	param[in] vf		pointer to bfa_vf_t.
+ *	param[out] vf_stats	vf statistics returned
+ *
+ *	@return None
+ */
+void
+bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
+{
+	bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
+}
+
+/**
+ *	clear statistics associated with the given vf.
+ *
+ *	param[in]	vf	pointer to bfa_vf_t.
+ *
+ *	@return None
+ */
+void
+bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
+{
+	bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
+}
+
+/**
+ *	Returns FCS vf structure for a given vf_id.
+ *
+ *	param[in]	vf_id - VF_ID
+ *
+ *	return
+ *	If lookup succeeds, retuns fcs vf object, otherwise returns NULL
+ */
+bfa_fcs_vf_t   *
+bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id)
+{
+	bfa_trc(fcs, vf_id);
+	if (vf_id == FC_VF_ID_NULL)
+		return &fcs->fabric;
+
+	return NULL;
+}
+
+/**
+ *	Return the list of VFs configured.
+ *
+ *	param[in]	fcs	fcs module instance
+ *	param[out]	vf_ids	returned list of vf_ids
+ *	param[in,out]	nvfs	in:size of vf_ids array,
+ *				out:total elements present,
+ *				actual elements returned is limited by the size
+ *
+ *	return Driver VF structure
+ */
+void
+bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
+{
+	bfa_trc(fcs, *nvfs);
+}
+
+/**
+ *	Return the list of all VFs visible from fabric.
+ *
+ *	param[in]	fcs	fcs module instance
+ *	param[out]	vf_ids	returned list of vf_ids
+ *	param[in,out]	nvfs	in:size of vf_ids array,
+ *				out:total elements present,
+ *				actual elements returned is limited by the size
+ *
+ *	return Driver VF structure
+ */
+void
+bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
+{
+	bfa_trc(fcs, *nvfs);
+}
+
+/**
+ *	Return the list of local logical ports present in the given VF.
+ *
+ *	param[in]	vf	vf for which logical ports are returned
+ *	param[out]	lpwwn	returned logical port wwn list
+ *	param[in,out]	nlports	in:size of lpwwn list;
+ *				out:total elements present,
+ *				actual elements returned is limited by the size
+ */
+void
+bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
+{
+	struct list_head	*qe;
+	struct bfa_fcs_vport_s *vport;
+	int	i;
+	struct bfa_fcs_s      *fcs;
+
+	if (vf == NULL || lpwwn == NULL || *nlports == 0)
+		return;
+
+	fcs = vf->fcs;
+
+	bfa_trc(fcs, vf->vf_id);
+	bfa_trc(fcs, (u32) *nlports);
+
+	i = 0;
+	lpwwn[i++] = vf->bport.port_cfg.pwwn;
+
+	list_for_each(qe, &vf->vport_q) {
+		if (i >= *nlports)
+			break;
+
+		vport = (struct bfa_fcs_vport_s *) qe;
+		lpwwn[i++] = vport->lport.port_cfg.pwwn;
+	}
+
+	bfa_trc(fcs, i);
+	*nlports = i;
+}
+
+/**
+ * BFA FCS PPORT ( physical port)
+ */
+static void
+bfa_fcs_port_event_handler(void *cbarg, enum bfa_port_linkstate event)
+{
+	struct bfa_fcs_s      *fcs = cbarg;
+
+	bfa_trc(fcs, event);
+
+	switch (event) {
+	case BFA_PORT_LINKUP:
+		bfa_fcs_fabric_link_up(&fcs->fabric);
+		break;
+
+	case BFA_PORT_LINKDOWN:
+		bfa_fcs_fabric_link_down(&fcs->fabric);
+		break;
+
+	default:
+		bfa_assert(0);
+	}
+}
+
+void
+bfa_fcs_port_attach(struct bfa_fcs_s *fcs)
+{
+	bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
+}
+
+/**
+ * BFA FCS UF ( Unsolicited Frames)
+ */
+
+/**
+ *		BFA callback for unsolicited frame receive handler.
+ *
+ * @param[in]		cbarg		callback arg for receive handler
+ * @param[in]		uf		unsolicited frame descriptor
+ *
+ * @return None
+ */
+static void
+bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
+{
+	struct bfa_fcs_s	*fcs = (struct bfa_fcs_s *) cbarg;
+	struct fchs_s	*fchs = bfa_uf_get_frmbuf(uf);
+	u16	len = bfa_uf_get_frmlen(uf);
+	struct fc_vft_s *vft;
+	struct bfa_fcs_fabric_s *fabric;
+
+	/**
+	 * check for VFT header
+	 */
+	if (fchs->routing == FC_RTG_EXT_HDR &&
+	    fchs->cat_info == FC_CAT_VFT_HDR) {
+		bfa_stats(fcs, uf.tagged);
+		vft = bfa_uf_get_frmbuf(uf);
+		if (fcs->port_vfid == vft->vf_id)
+			fabric = &fcs->fabric;
+		else
+			fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
+
+		/**
+		 * drop frame if vfid is unknown
+		 */
+		if (!fabric) {
+			bfa_assert(0);
+			bfa_stats(fcs, uf.vfid_unknown);
+			bfa_uf_free(uf);
+			return;
+		}
+
+		/**
+		 * skip vft header
+		 */
+		fchs = (struct fchs_s *) (vft + 1);
+		len -= sizeof(struct fc_vft_s);
+
+		bfa_trc(fcs, vft->vf_id);
+	} else {
+		bfa_stats(fcs, uf.untagged);
+		fabric = &fcs->fabric;
+	}
+
+	bfa_trc(fcs, ((u32 *) fchs)[0]);
+	bfa_trc(fcs, ((u32 *) fchs)[1]);
+	bfa_trc(fcs, ((u32 *) fchs)[2]);
+	bfa_trc(fcs, ((u32 *) fchs)[3]);
+	bfa_trc(fcs, ((u32 *) fchs)[4]);
+	bfa_trc(fcs, ((u32 *) fchs)[5]);
+	bfa_trc(fcs, len);
+
+	bfa_fcs_fabric_uf_recv(fabric, fchs, len);
+	bfa_uf_free(uf);
+}
+
+void
+bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
+{
+	bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
+}

+ 779 - 0
drivers/scsi/bfa/bfa_fcs.h

@@ -0,0 +1,779 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_FCS_H__
+#define __BFA_FCS_H__
+
+#include "bfa_cs.h"
+#include "bfa_defs.h"
+#include "bfa_defs_fcs.h"
+#include "bfa_modules.h"
+#include "bfa_fc.h"
+
+#define BFA_FCS_OS_STR_LEN		64
+
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_FCS_FCS		= 1,
+	BFA_TRC_FCS_PORT	= 2,
+	BFA_TRC_FCS_RPORT	= 3,
+	BFA_TRC_FCS_FCPIM	= 4,
+};
+
+
+struct bfa_fcs_s;
+
+#define __fcs_min_cfg(__fcs)       ((__fcs)->min_cfg)
+void bfa_fcs_modexit_comp(struct bfa_fcs_s *fcs);
+
+#define BFA_FCS_BRCD_SWITCH_OUI  0x051e
+#define N2N_LOCAL_PID	    0x010000
+#define N2N_REMOTE_PID		0x020000
+#define	BFA_FCS_RETRY_TIMEOUT 2000
+#define BFA_FCS_PID_IS_WKA(pid)  ((bfa_os_ntoh3b(pid) > 0xFFF000) ?  1 : 0)
+
+
+
+struct bfa_fcs_lport_ns_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+};
+
+
+struct bfa_fcs_lport_scn_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+};
+
+
+struct bfa_fcs_lport_fdmi_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_ms_s *ms;	/*  parent ms */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+	u8	retry_cnt;	/*  retry count */
+	u8	rsvd[3];
+};
+
+
+struct bfa_fcs_lport_ms_s {
+	bfa_sm_t        sm;		/*  state machine */
+	struct bfa_timer_s timer;
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+	struct bfa_fcs_lport_fdmi_s fdmi;	/*  FDMI component of MS */
+	u8         retry_cnt;	/*  retry count */
+	u8	rsvd[3];
+};
+
+
+struct bfa_fcs_lport_fab_s {
+	struct bfa_fcs_lport_ns_s ns;	/*  NS component of port */
+	struct bfa_fcs_lport_scn_s scn;	/*  scn component of port */
+	struct bfa_fcs_lport_ms_s ms;	/*  MS component of port */
+};
+
+#define	MAX_ALPA_COUNT	127
+
+struct bfa_fcs_lport_loop_s {
+	u8         num_alpa;	/*  Num of ALPA entries in the map */
+	u8         alpa_pos_map[MAX_ALPA_COUNT];	/*  ALPA Positional
+							 *Map */
+	struct bfa_fcs_lport_s *port;	/*  parent port */
+};
+
+struct bfa_fcs_lport_n2n_s {
+	u32        rsvd;
+	u16        reply_oxid;	/*  ox_id from the req flogi to be
+					 *used in flogi acc */
+	wwn_t           rem_port_wwn;	/*  Attached port's wwn */
+};
+
+
+union bfa_fcs_lport_topo_u {
+	struct bfa_fcs_lport_fab_s pfab;
+	struct bfa_fcs_lport_loop_s ploop;
+	struct bfa_fcs_lport_n2n_s pn2n;
+};
+
+
+struct bfa_fcs_lport_s {
+	struct list_head         qe;	/*  used by port/vport */
+	bfa_sm_t               sm;	/*  state machine */
+	struct bfa_fcs_fabric_s *fabric;	/*  parent fabric */
+	struct bfa_lport_cfg_s  port_cfg;	/*  port configuration */
+	struct bfa_timer_s link_timer;	/*  timer for link offline */
+	u32        pid:24;	/*  FC address */
+	u8         lp_tag;		/*  lport tag */
+	u16        num_rports;	/*  Num of r-ports */
+	struct list_head         rport_q; /*  queue of discovered r-ports */
+	struct bfa_fcs_s *fcs;	/*  FCS instance */
+	union bfa_fcs_lport_topo_u port_topo;	/*  fabric/loop/n2n details */
+	struct bfad_port_s *bfad_port;	/*  driver peer instance */
+	struct bfa_fcs_vport_s *vport;	/*  NULL for base ports */
+	struct bfa_fcxp_s *fcxp;
+	struct bfa_fcxp_wqe_s fcxp_wqe;
+	struct bfa_lport_stats_s stats;
+	struct bfa_wc_s        wc;	/*  waiting counter for events */
+};
+#define BFA_FCS_GET_HAL_FROM_PORT(port)  (port->fcs->bfa)
+#define BFA_FCS_GET_NS_FROM_PORT(port)  (&port->port_topo.pfab.ns)
+#define BFA_FCS_GET_SCN_FROM_PORT(port)  (&port->port_topo.pfab.scn)
+#define BFA_FCS_GET_MS_FROM_PORT(port)  (&port->port_topo.pfab.ms)
+#define BFA_FCS_GET_FDMI_FROM_PORT(port)  (&port->port_topo.pfab.ms.fdmi)
+#define	BFA_FCS_VPORT_IS_INITIATOR_MODE(port) \
+		(port->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
+
+/*
+ * forward declaration
+ */
+struct bfad_vf_s;
+
+enum bfa_fcs_fabric_type {
+	BFA_FCS_FABRIC_UNKNOWN = 0,
+	BFA_FCS_FABRIC_SWITCHED = 1,
+	BFA_FCS_FABRIC_N2N = 2,
+};
+
+
+struct bfa_fcs_fabric_s {
+	struct list_head   qe;		/*  queue element */
+	bfa_sm_t	 sm;		/*  state machine */
+	struct bfa_fcs_s *fcs;		/*  FCS instance */
+	struct bfa_fcs_lport_s  bport;	/*  base logical port */
+	enum bfa_fcs_fabric_type fab_type; /*  fabric type */
+	enum bfa_port_type oper_type;	/*  current link topology */
+	u8         is_vf;		/*  is virtual fabric? */
+	u8         is_npiv;	/*  is NPIV supported ? */
+	u8         is_auth;	/*  is Security/Auth supported ? */
+	u16        bb_credit;	/*  BB credit from fabric */
+	u16        vf_id;		/*  virtual fabric ID */
+	u16        num_vports;	/*  num vports */
+	u16        rsvd;
+	struct list_head         vport_q;	/*  queue of virtual ports */
+	struct list_head         vf_q;	/*  queue of virtual fabrics */
+	struct bfad_vf_s      *vf_drv;	/*  driver vf structure */
+	struct bfa_timer_s link_timer;	/*  Link Failure timer. Vport */
+	wwn_t           fabric_name;	/*  attached fabric name */
+	bfa_boolean_t   auth_reqd;	/*  authentication required	*/
+	struct bfa_timer_s delay_timer;	/*  delay timer		*/
+	union {
+		u16        swp_vfid;/*  switch port VF id		*/
+	} event_arg;
+	struct bfa_wc_s        wc;	/*  wait counter for delete	*/
+	struct bfa_vf_stats_s	stats;	/*  fabric/vf stats		*/
+	struct bfa_lps_s	*lps;	/*  lport login services	*/
+	u8	fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ];
+					/*  attached fabric's ip addr  */
+};
+
+#define bfa_fcs_fabric_npiv_capable(__f)    ((__f)->is_npiv)
+#define bfa_fcs_fabric_is_switched(__f)			\
+	((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
+
+/**
+ *   The design calls for a single implementation of base fabric and vf.
+ */
+#define bfa_fcs_vf_t struct bfa_fcs_fabric_s
+
+struct bfa_vf_event_s {
+	u32        undefined;
+};
+
+struct bfa_fcs_s;
+struct bfa_fcs_fabric_s;
+
+/*
+ * @todo : need to move to a global config file.
+ * Maximum Rports supported per port (physical/logical).
+ */
+#define BFA_FCS_MAX_RPORTS_SUPP  256	/* @todo : tentative value */
+
+#define bfa_fcs_lport_t struct bfa_fcs_lport_s
+
+/**
+ * Symbolic Name related defines
+ *  Total bytes 255.
+ *  Physical Port's symbolic name 128 bytes.
+ *  For Vports, Vport's symbolic name is appended to the Physical port's
+ *  Symbolic Name.
+ *
+ *  Physical Port's symbolic name Format : (Total 128 bytes)
+ *  Adapter Model number/name : 12 bytes
+ *  Driver Version     : 10 bytes
+ *  Host Machine Name  : 30 bytes
+ *  Host OS Info	   : 48 bytes
+ *  Host OS PATCH Info : 16 bytes
+ *  ( remaining 12 bytes reserved to be used for separator)
+ */
+#define BFA_FCS_PORT_SYMBNAME_SEPARATOR			" | "
+
+#define BFA_FCS_PORT_SYMBNAME_MODEL_SZ			12
+#define BFA_FCS_PORT_SYMBNAME_VERSION_SZ		10
+#define BFA_FCS_PORT_SYMBNAME_MACHINENAME_SZ		30
+#define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ			48
+#define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ		16
+
+/**
+ * Get FC port ID for a logical port.
+ */
+#define bfa_fcs_lport_get_fcid(_lport)	((_lport)->pid)
+#define bfa_fcs_lport_get_pwwn(_lport)	((_lport)->port_cfg.pwwn)
+#define bfa_fcs_lport_get_nwwn(_lport)	((_lport)->port_cfg.nwwn)
+#define bfa_fcs_lport_get_psym_name(_lport)	((_lport)->port_cfg.sym_name)
+#define bfa_fcs_lport_is_initiator(_lport)			\
+	((_lport)->port_cfg.roles & BFA_LPORT_ROLE_FCP_IM)
+#define bfa_fcs_lport_get_nrports(_lport)	\
+	((_lport) ? (_lport)->num_rports : 0)
+
+static inline struct bfad_port_s *
+bfa_fcs_lport_get_drvport(struct bfa_fcs_lport_s *port)
+{
+	return port->bfad_port;
+}
+
+#define bfa_fcs_lport_get_opertype(_lport)	((_lport)->fabric->oper_type)
+#define bfa_fcs_lport_get_fabric_name(_lport)	((_lport)->fabric->fabric_name)
+#define bfa_fcs_lport_get_fabric_ipaddr(_lport)		\
+		((_lport)->fabric->fabric_ip_addr)
+
+/**
+ * bfa fcs port public functions
+ */
+
+bfa_boolean_t   bfa_fcs_lport_is_online(struct bfa_fcs_lport_s *port);
+struct bfa_fcs_lport_s *bfa_fcs_get_base_port(struct bfa_fcs_s *fcs);
+void bfa_fcs_lport_get_rports(struct bfa_fcs_lport_s *port,
+			      wwn_t rport_wwns[], int *nrports);
+
+wwn_t bfa_fcs_lport_get_rport(struct bfa_fcs_lport_s *port, wwn_t wwn,
+			      int index, int nrports, bfa_boolean_t bwwn);
+
+struct bfa_fcs_lport_s *bfa_fcs_lookup_port(struct bfa_fcs_s *fcs,
+					    u16 vf_id, wwn_t lpwwn);
+
+void bfa_fcs_lport_get_info(struct bfa_fcs_lport_s *port,
+			    struct bfa_lport_info_s *port_info);
+void bfa_fcs_lport_get_attr(struct bfa_fcs_lport_s *port,
+			    struct bfa_lport_attr_s *port_attr);
+void bfa_fcs_lport_get_stats(struct bfa_fcs_lport_s *fcs_port,
+			     struct bfa_lport_stats_s *port_stats);
+void bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port);
+enum bfa_port_speed bfa_fcs_lport_get_rport_max_speed(
+			struct bfa_fcs_lport_s *port);
+
+/* MS FCS routines */
+void bfa_fcs_lport_ms_init(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_offline(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_online(struct bfa_fcs_lport_s *port);
+void bfa_fcs_lport_ms_fabric_rscn(struct bfa_fcs_lport_s *port);
+
+/* FDMI FCS routines */
+void bfa_fcs_lport_fdmi_init(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_fdmi_offline(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_fdmi_online(struct bfa_fcs_lport_ms_s *ms);
+void bfa_fcs_lport_uf_recv(struct bfa_fcs_lport_s *lport, struct fchs_s *fchs,
+				     u16 len);
+void bfa_fcs_lport_attach(struct bfa_fcs_lport_s *lport, struct bfa_fcs_s *fcs,
+			u16 vf_id, struct bfa_fcs_vport_s *vport);
+void bfa_fcs_lport_init(struct bfa_fcs_lport_s *lport,
+				struct bfa_lport_cfg_s *port_cfg);
+void            bfa_fcs_lport_online(struct bfa_fcs_lport_s *port);
+void            bfa_fcs_lport_offline(struct bfa_fcs_lport_s *port);
+void            bfa_fcs_lport_delete(struct bfa_fcs_lport_s *port);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pid(
+		struct bfa_fcs_lport_s *port, u32 pid);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_pwwn(
+		struct bfa_fcs_lport_s *port, wwn_t pwwn);
+struct bfa_fcs_rport_s *bfa_fcs_lport_get_rport_by_nwwn(
+		struct bfa_fcs_lport_s *port, wwn_t nwwn);
+void            bfa_fcs_lport_add_rport(struct bfa_fcs_lport_s *port,
+				       struct bfa_fcs_rport_s *rport);
+void            bfa_fcs_lport_del_rport(struct bfa_fcs_lport_s *port,
+				       struct bfa_fcs_rport_s *rport);
+void bfa_fcs_lport_modinit(struct bfa_fcs_s *fcs);
+void bfa_fcs_lport_modexit(struct bfa_fcs_s *fcs);
+void            bfa_fcs_lport_ns_init(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_ns_offline(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_ns_online(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_ns_query(struct bfa_fcs_lport_s *port);
+void            bfa_fcs_lport_scn_init(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_scn_offline(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_scn_online(struct bfa_fcs_lport_s *vport);
+void            bfa_fcs_lport_scn_process_rscn(struct bfa_fcs_lport_s *port,
+					      struct fchs_s *rx_frame, u32 len);
+
+struct bfa_fcs_vport_s {
+	struct list_head		qe;		/*  queue elem	*/
+	bfa_sm_t		sm;		/*  state machine	*/
+	bfa_fcs_lport_t		lport;		/*  logical port	*/
+	struct bfa_timer_s	timer;
+	struct bfad_vport_s	*vport_drv;	/*  Driver private	*/
+	struct bfa_vport_stats_s vport_stats;	/*  vport statistics	*/
+	struct bfa_lps_s	*lps;		/*  Lport login service*/
+	int			fdisc_retries;
+};
+
+#define bfa_fcs_vport_get_port(vport)			\
+	((struct bfa_fcs_lport_s  *)(&vport->port))
+
+/**
+ * bfa fcs vport public functions
+ */
+bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
+				  struct bfa_fcs_s *fcs, u16 vf_id,
+				  struct bfa_lport_cfg_s *port_cfg,
+				  struct bfad_vport_s *vport_drv);
+bfa_status_t bfa_fcs_pbc_vport_create(struct bfa_fcs_vport_s *vport,
+				      struct bfa_fcs_s *fcs, u16 vf_id,
+				      struct bfa_lport_cfg_s *port_cfg,
+				      struct bfad_vport_s *vport_drv);
+bfa_boolean_t bfa_fcs_is_pbc_vport(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_delete(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_start(struct bfa_fcs_vport_s *vport);
+bfa_status_t bfa_fcs_vport_stop(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_get_attr(struct bfa_fcs_vport_s *vport,
+			    struct bfa_vport_attr_s *vport_attr);
+void bfa_fcs_vport_get_stats(struct bfa_fcs_vport_s *vport,
+			     struct bfa_vport_stats_s *vport_stats);
+void bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport);
+struct bfa_fcs_vport_s *bfa_fcs_vport_lookup(struct bfa_fcs_s *fcs,
+					     u16 vf_id, wwn_t vpwwn);
+void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
+void bfa_fcs_vport_fcs_delete(struct bfa_fcs_vport_s *vport);
+
+#define BFA_FCS_RPORT_DEF_DEL_TIMEOUT	90	/* in secs */
+#define BFA_FCS_RPORT_MAX_RETRIES	(5)
+
+/*
+ * forward declarations
+ */
+struct bfad_rport_s;
+
+struct bfa_fcs_itnim_s;
+struct bfa_fcs_tin_s;
+struct bfa_fcs_iprp_s;
+
+/* Rport Features (RPF) */
+struct bfa_fcs_rpf_s {
+	bfa_sm_t	sm;	/*  state machine */
+	struct bfa_fcs_rport_s *rport;	/*  parent rport */
+	struct bfa_timer_s	timer;	/*  general purpose timer */
+	struct bfa_fcxp_s	*fcxp;	/*  FCXP needed for discarding */
+	struct bfa_fcxp_wqe_s	fcxp_wqe; /*  fcxp wait queue element */
+	int	rpsc_retries;	/*  max RPSC retry attempts */
+	enum bfa_port_speed	rpsc_speed;
+	/*  Current Speed from RPSC. O if RPSC fails */
+	enum bfa_port_speed	assigned_speed;
+	/**
+	 * Speed assigned by the user.  will be used if RPSC is
+	 * not supported by the rport.
+	 */
+};
+
+struct bfa_fcs_rport_s {
+	struct list_head	qe;	/*  used by port/vport */
+	struct bfa_fcs_lport_s *port;	/*  parent FCS port */
+	struct bfa_fcs_s	*fcs;	/*  fcs instance */
+	struct bfad_rport_s	*rp_drv;	/*  driver peer instance */
+	u32	pid;	/*  port ID of rport */
+	u16	maxfrsize;	/*  maximum frame size */
+	u16	reply_oxid;	/*  OX_ID of inbound requests */
+	enum fc_cos	fc_cos;	/*  FC classes of service supp */
+	bfa_boolean_t	cisc;	/*  CISC capable device */
+	bfa_boolean_t	prlo;	/*  processing prlo or LOGO */
+	wwn_t	pwwn;	/*  port wwn of rport */
+	wwn_t	nwwn;	/*  node wwn of rport */
+	struct bfa_rport_symname_s psym_name; /*  port symbolic name  */
+	bfa_sm_t	sm;		/*  state machine */
+	struct bfa_timer_s timer;	/*  general purpose timer */
+	struct bfa_fcs_itnim_s *itnim;	/*  ITN initiator mode role */
+	struct bfa_fcs_tin_s *tin;	/*  ITN initiator mode role */
+	struct bfa_fcs_iprp_s *iprp;	/*  IP/FC role */
+	struct bfa_rport_s *bfa_rport;	/*  BFA Rport */
+	struct bfa_fcxp_s *fcxp;	/*  FCXP needed for discarding */
+	int	plogi_retries;	/*  max plogi retry attempts */
+	int	ns_retries;	/*  max NS query retry attempts */
+	struct bfa_fcxp_wqe_s	fcxp_wqe; /*  fcxp wait queue element */
+	struct bfa_rport_stats_s stats;	/*  rport stats */
+	enum bfa_rport_function	scsi_function;  /*  Initiator/Target */
+	struct bfa_fcs_rpf_s rpf;	/* Rport features module */
+};
+
+static inline struct bfa_rport_s *
+bfa_fcs_rport_get_halrport(struct bfa_fcs_rport_s *rport)
+{
+	return rport->bfa_rport;
+}
+
+/**
+ * bfa fcs rport API functions
+ */
+bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
+			       struct bfa_fcs_rport_s *rport,
+			       struct bfad_rport_s *rport_drv);
+bfa_status_t bfa_fcs_rport_remove(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
+			    struct bfa_rport_attr_s *attr);
+void bfa_fcs_rport_get_stats(struct bfa_fcs_rport_s *rport,
+			     struct bfa_rport_stats_s *stats);
+void bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport);
+struct bfa_fcs_rport_s *bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port,
+					     wwn_t rpwwn);
+struct bfa_fcs_rport_s *bfa_fcs_rport_lookup_by_nwwn(
+	struct bfa_fcs_lport_s *port, wwn_t rnwwn);
+void bfa_fcs_rport_set_del_timeout(u8 rport_tmo);
+
+void bfa_fcs_rport_set_speed(struct bfa_fcs_rport_s *rport,
+			     enum bfa_port_speed speed);
+void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
+	 struct fchs_s *fchs, u16 len);
+void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
+
+struct bfa_fcs_rport_s *bfa_fcs_rport_create(struct bfa_fcs_lport_s *port,
+	 u32 pid);
+void bfa_fcs_rport_delete(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_online(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_offline(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *rx_fchs,
+			 struct fc_logi_s *plogi_rsp);
+void bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port,
+				struct fchs_s *rx_fchs,
+				struct fc_logi_s *plogi);
+void bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
+			 struct fc_logi_s *plogi);
+void bfa_fcs_rport_logo_imp(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, u16 ox_id);
+
+void bfa_fcs_rport_itnim_ack(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_itntm_ack(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_rport_fcptm_offline_done(struct bfa_fcs_rport_s *rport);
+int  bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport);
+struct bfa_fcs_rport_s *bfa_fcs_rport_create_by_wwn(
+			struct bfa_fcs_lport_s *port, wwn_t wwn);
+void  bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport);
+void  bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport);
+void  bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport);
+
+/*
+ * forward declarations
+ */
+struct bfad_itnim_s;
+
+struct bfa_fcs_itnim_s {
+	bfa_sm_t		sm;		/*  state machine */
+	struct bfa_fcs_rport_s	*rport;		/*  parent remote rport  */
+	struct bfad_itnim_s	*itnim_drv;	/*  driver peer instance */
+	struct bfa_fcs_s	*fcs;		/*  fcs instance	*/
+	struct bfa_timer_s	timer;		/*  timer functions	*/
+	struct bfa_itnim_s	*bfa_itnim;	/*  BFA itnim struct	*/
+	u32		prli_retries;	/*  max prli retry attempts */
+	bfa_boolean_t		seq_rec;	/*  seq recovery support */
+	bfa_boolean_t		rec_support;	/*  REC supported	*/
+	bfa_boolean_t		conf_comp;	/*  FCP_CONF	support */
+	bfa_boolean_t		task_retry_id;	/*  task retry id supp	*/
+	struct bfa_fcxp_wqe_s	fcxp_wqe;	/*  wait qelem for fcxp  */
+	struct bfa_fcxp_s	*fcxp;		/*  FCXP in use	*/
+	struct bfa_itnim_stats_s	stats;	/*  itn statistics	*/
+};
+#define bfa_fcs_fcxp_alloc(__fcs)	\
+	bfa_fcxp_alloc(NULL, (__fcs)->bfa, 0, 0, NULL, NULL, NULL, NULL)
+
+#define bfa_fcs_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg) \
+	bfa_fcxp_alloc_wait(__bfa, __wqe, __alloc_cbfn, __alloc_cbarg, \
+					NULL, 0, 0, NULL, NULL, NULL, NULL)
+
+static inline struct bfad_port_s *
+bfa_fcs_itnim_get_drvport(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->port->bfad_port;
+}
+
+
+static inline struct bfa_fcs_lport_s *
+bfa_fcs_itnim_get_port(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->port;
+}
+
+
+static inline wwn_t
+bfa_fcs_itnim_get_nwwn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->nwwn;
+}
+
+
+static inline wwn_t
+bfa_fcs_itnim_get_pwwn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->pwwn;
+}
+
+
+static inline u32
+bfa_fcs_itnim_get_fcid(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->pid;
+}
+
+
+static inline	u32
+bfa_fcs_itnim_get_maxfrsize(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->maxfrsize;
+}
+
+
+static inline	enum fc_cos
+bfa_fcs_itnim_get_cos(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->rport->fc_cos;
+}
+
+
+static inline struct bfad_itnim_s *
+bfa_fcs_itnim_get_drvitn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->itnim_drv;
+}
+
+
+static inline struct bfa_itnim_s *
+bfa_fcs_itnim_get_halitn(struct bfa_fcs_itnim_s *itnim)
+{
+	return itnim->bfa_itnim;
+}
+
+/**
+ * bfa fcs FCP Initiator mode API functions
+ */
+void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
+			    struct bfa_itnim_attr_s *attr);
+void bfa_fcs_itnim_get_stats(struct bfa_fcs_itnim_s *itnim,
+			     struct bfa_itnim_stats_s *stats);
+struct bfa_fcs_itnim_s *bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port,
+					     wwn_t rpwwn);
+bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+				    struct bfa_itnim_attr_s *attr);
+bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
+				     struct bfa_itnim_stats_s *stats);
+bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port,
+				       wwn_t rpwwn);
+struct bfa_fcs_itnim_s *bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport);
+void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim);
+bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim);
+void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
+			struct fchs_s *fchs, u16 len);
+
+#define	BFA_FCS_FDMI_SUPORTED_SPEEDS  (FDMI_TRANS_SPEED_1G  |	\
+				       FDMI_TRANS_SPEED_2G |	\
+				       FDMI_TRANS_SPEED_4G |	\
+				       FDMI_TRANS_SPEED_8G)
+
+/*
+ * HBA Attribute Block : BFA internal representation. Note : Some variable
+ * sizes have been trimmed to suit BFA For Ex : Model will be "Brocade". Based
+ * on this the size has been reduced to 16 bytes from the standard's 64 bytes.
+ */
+struct bfa_fcs_fdmi_hba_attr_s {
+	wwn_t           node_name;
+	u8         manufacturer[64];
+	u8         serial_num[64];
+	u8         model[16];
+	u8         model_desc[256];
+	u8         hw_version[8];
+	u8         driver_version[8];
+	u8         option_rom_ver[BFA_VERSION_LEN];
+	u8         fw_version[8];
+	u8         os_name[256];
+	u32        max_ct_pyld;
+};
+
+/*
+ * Port Attribute Block
+ */
+struct bfa_fcs_fdmi_port_attr_s {
+	u8         supp_fc4_types[32];	/* supported FC4 types */
+	u32        supp_speed;	/* supported speed */
+	u32        curr_speed;	/* current Speed */
+	u32        max_frm_size;	/* max frame size */
+	u8         os_device_name[256];	/* OS device Name */
+	u8         host_name[256];	/* host name */
+};
+
+struct bfa_fcs_stats_s {
+	struct {
+		u32	untagged; /*  untagged receive frames */
+		u32	tagged;	/*  tagged receive frames */
+		u32	vfid_unknown;	/*  VF id is unknown */
+	} uf;
+};
+
+struct bfa_fcs_driver_info_s {
+	u8	 version[BFA_VERSION_LEN];		/* Driver Version */
+	u8	 host_machine_name[BFA_FCS_OS_STR_LEN];
+	u8	 host_os_name[BFA_FCS_OS_STR_LEN]; /* OS name and version */
+	u8	 host_os_patch[BFA_FCS_OS_STR_LEN]; /* patch or service pack */
+	u8	 os_device_name[BFA_FCS_OS_STR_LEN]; /* Driver Device Name */
+};
+
+struct bfa_fcs_s {
+	struct bfa_s	  *bfa;	/*  corresponding BFA bfa instance */
+	struct bfad_s	      *bfad; /*  corresponding BDA driver instance */
+	struct bfa_trc_mod_s  *trcmod;	/*  tracing module */
+	bfa_boolean_t	vf_enabled;	/*  VF mode is enabled */
+	bfa_boolean_t	fdmi_enabled;	/*  FDMI is enabled */
+	bfa_boolean_t min_cfg;		/* min cfg enabled/disabled */
+	u16	port_vfid;	/*  port default VF ID */
+	struct bfa_fcs_driver_info_s driver_info;
+	struct bfa_fcs_fabric_s fabric; /*  base fabric state machine */
+	struct bfa_fcs_stats_s	stats;	/*  FCS statistics */
+	struct bfa_wc_s		wc;	/*  waiting counter */
+};
+
+/*
+ * bfa fcs API functions
+ */
+void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa,
+		    struct bfad_s *bfad,
+		    bfa_boolean_t min_cfg);
+void bfa_fcs_init(struct bfa_fcs_s *fcs);
+void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
+			      struct bfa_fcs_driver_info_s *driver_info);
+void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
+void bfa_fcs_exit(struct bfa_fcs_s *fcs);
+void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
+void		bfa_fcs_start(struct bfa_fcs_s *fcs);
+
+/**
+ * bfa fcs vf public functions
+ */
+bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
+bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
+bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
+			       u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
+			       struct bfad_vf_s *vf_drv);
+bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
+void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
+void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
+void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
+void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
+			  struct bfa_vf_stats_s *vf_stats);
+void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
+void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
+bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
+u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
+
+/*
+ * fabric protected interface functions
+ */
+void bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_link_up(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_link_down(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
+	struct bfa_fcs_vport_s *vport);
+void bfa_fcs_fabric_delvport(struct bfa_fcs_fabric_s *fabric,
+	struct bfa_fcs_vport_s *vport);
+int bfa_fcs_fabric_is_online(struct bfa_fcs_fabric_s *fabric);
+struct bfa_fcs_vport_s *bfa_fcs_fabric_vport_lookup(
+		struct bfa_fcs_fabric_s *fabric, wwn_t pwwn);
+void bfa_fcs_fabric_modstart(struct bfa_fcs_s *fcs);
+void bfa_fcs_fabric_uf_recv(struct bfa_fcs_fabric_s *fabric,
+		struct fchs_s *fchs, u16 len);
+bfa_boolean_t	bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
+bfa_boolean_t	bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
+enum bfa_port_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
+void	bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
+void	bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
+bfa_status_t	bfa_fcs_fabric_addvf(struct bfa_fcs_fabric_s *vf,
+			struct bfa_fcs_s *fcs, struct bfa_lport_cfg_s *port_cfg,
+			struct bfad_vf_s *vf_drv);
+void bfa_fcs_fabric_set_fabric_name(struct bfa_fcs_fabric_s *fabric,
+	       wwn_t fabric_name);
+u16 bfa_fcs_fabric_get_switch_oui(struct bfa_fcs_fabric_s *fabric);
+void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
+void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
+
+/**
+ * BFA FCS callback interfaces
+ */
+
+/**
+ * fcb Main fcs callbacks
+ */
+
+struct bfad_port_s;
+struct bfad_vf_s;
+struct bfad_vport_s;
+struct bfad_rport_s;
+
+/**
+ * lport callbacks
+ */
+struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
+				      struct bfa_fcs_lport_s *port,
+				      enum bfa_lport_role roles,
+				      struct bfad_vf_s *vf_drv,
+				      struct bfad_vport_s *vp_drv);
+void bfa_fcb_lport_delete(struct bfad_s *bfad, enum bfa_lport_role roles,
+			  struct bfad_vf_s *vf_drv,
+			  struct bfad_vport_s *vp_drv);
+
+/**
+ * vport callbacks
+ */
+void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
+
+/**
+ * rport callbacks
+ */
+bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
+				 struct bfa_fcs_rport_s **rport,
+				 struct bfad_rport_s **rport_drv);
+
+/**
+ * itnim callbacks
+ */
+void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
+			 struct bfad_itnim_s **itnim_drv);
+void bfa_fcb_itnim_free(struct bfad_s *bfad,
+			struct bfad_itnim_s *itnim_drv);
+void bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv);
+void bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
+
+#endif /* __BFA_FCS_H__ */

+ 98 - 139
drivers/scsi/bfa/fcpim.c → drivers/scsi/bfa/bfa_fcs_fcpim.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -19,36 +19,24 @@
  *  fcpim.c - FCP initiator mode i-t nexus state machine
  *  fcpim.c - FCP initiator mode i-t nexus state machine
  */
  */
 
 
-#include <bfa.h>
-#include <bfa_svc.h>
-#include "fcs_fcpim.h"
-#include "fcs_rport.h"
-#include "fcs_lport.h"
-#include "fcs_trcmod.h"
-#include "fcs_fcxp.h"
-#include "fcs.h"
-#include <fcs/bfa_fcs_fcpim.h>
-#include <fcb/bfa_fcb_fcpim.h>
-#include <aen/bfa_aen_itnim.h>
+#include "bfa_fcs.h"
+#include "bfa_fcbuild.h"
+#include "bfad_drv.h"
+#include "bfad_im.h"
 
 
 BFA_TRC_FILE(FCS, FCPIM);
 BFA_TRC_FILE(FCS, FCPIM);
 
 
 /*
 /*
  * forward declarations
  * forward declarations
  */
  */
-static void     bfa_fcs_itnim_timeout(void *arg);
-static void     bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
-static void     bfa_fcs_itnim_send_prli(void *itnim_cbarg,
+static void	bfa_fcs_itnim_timeout(void *arg);
+static void	bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim);
+static void	bfa_fcs_itnim_send_prli(void *itnim_cbarg,
 					struct bfa_fcxp_s *fcxp_alloced);
 					struct bfa_fcxp_s *fcxp_alloced);
-static void     bfa_fcs_itnim_prli_response(void *fcsarg,
-					    struct bfa_fcxp_s *fcxp,
-					    void *cbarg,
-					    bfa_status_t req_status,
-					    u32 rsp_len,
-					    u32 resid_len,
-					    struct fchs_s *rsp_fchs);
-static void     bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
-				       enum bfa_itnim_aen_event event);
+static void	bfa_fcs_itnim_prli_response(void *fcsarg,
+			 struct bfa_fcxp_s *fcxp, void *cbarg,
+			    bfa_status_t req_status, u32 rsp_len,
+			    u32 resid_len, struct fchs_s *rsp_fchs);
 
 
 /**
 /**
  *  fcs_itnim_sm FCS itnim state machine events
  *  fcs_itnim_sm FCS itnim state machine events
@@ -61,28 +49,28 @@ enum bfa_fcs_itnim_event {
 	BFA_FCS_ITNIM_SM_RSP_OK = 4,	/*  good response */
 	BFA_FCS_ITNIM_SM_RSP_OK = 4,	/*  good response */
 	BFA_FCS_ITNIM_SM_RSP_ERROR = 5,	/*  error response */
 	BFA_FCS_ITNIM_SM_RSP_ERROR = 5,	/*  error response */
 	BFA_FCS_ITNIM_SM_TIMEOUT = 6,	/*  delay timeout */
 	BFA_FCS_ITNIM_SM_TIMEOUT = 6,	/*  delay timeout */
-	BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7,	/*  BFA online callback */
-	BFA_FCS_ITNIM_SM_HCB_ONLINE = 8,	/*  BFA offline callback */
+	BFA_FCS_ITNIM_SM_HCB_OFFLINE = 7, /*  BFA online callback */
+	BFA_FCS_ITNIM_SM_HCB_ONLINE = 8, /*  BFA offline callback */
 	BFA_FCS_ITNIM_SM_INITIATOR = 9,	/*  rport is initiator */
 	BFA_FCS_ITNIM_SM_INITIATOR = 9,	/*  rport is initiator */
 	BFA_FCS_ITNIM_SM_DELETE = 10,	/*  delete event from rport */
 	BFA_FCS_ITNIM_SM_DELETE = 10,	/*  delete event from rport */
 	BFA_FCS_ITNIM_SM_PRLO = 11,	/*  delete event from rport */
 	BFA_FCS_ITNIM_SM_PRLO = 11,	/*  delete event from rport */
 };
 };
 
 
-static void     bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
 					 enum bfa_fcs_itnim_event event);
 					 enum bfa_fcs_itnim_event event);
-static void     bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
 					   enum bfa_fcs_itnim_event event);
 					   enum bfa_fcs_itnim_event event);
-static void     bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
 				      enum bfa_fcs_itnim_event event);
 				      enum bfa_fcs_itnim_event event);
-static void     bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
 					    enum bfa_fcs_itnim_event event);
 					    enum bfa_fcs_itnim_event event);
-static void     bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 					    enum bfa_fcs_itnim_event event);
 					    enum bfa_fcs_itnim_event event);
-static void     bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
 					enum bfa_fcs_itnim_event event);
 					enum bfa_fcs_itnim_event event);
-static void     bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
 					     enum bfa_fcs_itnim_event event);
 					     enum bfa_fcs_itnim_event event);
-static void     bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
+static void	bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
 					   enum bfa_fcs_itnim_event event);
 					   enum bfa_fcs_itnim_event event);
 
 
 static struct bfa_sm_table_s itnim_sm_table[] = {
 static struct bfa_sm_table_s itnim_sm_table[] = {
@@ -102,7 +90,7 @@ static struct bfa_sm_table_s itnim_sm_table[] = {
 
 
 static void
 static void
 bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
 bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
-			 enum bfa_fcs_itnim_event event)
+		 enum bfa_fcs_itnim_event event)
 {
 {
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, event);
 	bfa_trc(itnim->fcs, event);
@@ -134,7 +122,7 @@ bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim,
 
 
 static void
 static void
 bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
 bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
-			   enum bfa_fcs_itnim_event event)
+		 enum bfa_fcs_itnim_event event)
 {
 {
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, event);
 	bfa_trc(itnim->fcs, event);
@@ -168,7 +156,7 @@ bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim,
 
 
 static void
 static void
 bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
 bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim,
-		      enum bfa_fcs_itnim_event event)
+		 enum bfa_fcs_itnim_event event)
 {
 {
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, event);
 	bfa_trc(itnim->fcs, event);
@@ -233,6 +221,7 @@ bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim,
 		}
 		}
 		break;
 		break;
 
 
+
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline);
 		bfa_timer_stop(&itnim->timer);
 		bfa_timer_stop(&itnim->timer);
@@ -259,6 +248,10 @@ static void
 bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 			    enum bfa_fcs_itnim_event event)
 			    enum bfa_fcs_itnim_event event)
 {
 {
+	struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+	char	lpwwn_buf[BFA_STRING_32];
+	char	rpwwn_buf[BFA_STRING_32];
+
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, event);
 	bfa_trc(itnim->fcs, event);
 
 
@@ -266,7 +259,11 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 	case BFA_FCS_ITNIM_SM_HCB_ONLINE:
 	case BFA_FCS_ITNIM_SM_HCB_ONLINE:
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online);
 		bfa_fcb_itnim_online(itnim->itnim_drv);
 		bfa_fcb_itnim_online(itnim->itnim_drv);
-		bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE);
+		wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
+		wwn2str(rpwwn_buf, itnim->rport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+		"Target (WWN = %s) is online for initiator (WWN = %s)\n",
+		rpwwn_buf, lpwwn_buf);
 		break;
 		break;
 
 
 	case BFA_FCS_ITNIM_SM_OFFLINE:
 	case BFA_FCS_ITNIM_SM_OFFLINE:
@@ -287,8 +284,12 @@ bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim,
 
 
 static void
 static void
 bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
 bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
-			enum bfa_fcs_itnim_event event)
+		 enum bfa_fcs_itnim_event event)
 {
 {
+	struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad;
+	char	lpwwn_buf[BFA_STRING_32];
+	char	rpwwn_buf[BFA_STRING_32];
+
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, event);
 	bfa_trc(itnim->fcs, event);
 
 
@@ -297,10 +298,16 @@ bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim,
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
 		bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline);
 		bfa_fcb_itnim_offline(itnim->itnim_drv);
 		bfa_fcb_itnim_offline(itnim->itnim_drv);
 		bfa_itnim_offline(itnim->bfa_itnim);
 		bfa_itnim_offline(itnim->bfa_itnim);
-		if (bfa_fcs_port_is_online(itnim->rport->port) == BFA_TRUE)
-			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT);
+		wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port));
+		wwn2str(rpwwn_buf, itnim->rport->pwwn);
+		if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE)
+			BFA_LOG(KERN_ERR, bfad, log_level,
+			"Target (WWN = %s) connectivity lost for "
+			"initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf);
 		else
 		else
-			bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE);
+			BFA_LOG(KERN_INFO, bfad, log_level,
+			"Target (WWN = %s) offlined by initiator (WWN = %s)\n",
+			rpwwn_buf, lpwwn_buf);
 		break;
 		break;
 
 
 	case BFA_FCS_ITNIM_SM_DELETE:
 	case BFA_FCS_ITNIM_SM_DELETE:
@@ -343,7 +350,7 @@ bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim,
  */
  */
 static void
 static void
 bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
 bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
-			   enum bfa_fcs_itnim_event event)
+		 enum bfa_fcs_itnim_event event)
 {
 {
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, event);
 	bfa_trc(itnim->fcs, event);
@@ -369,71 +376,34 @@ bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim,
 	}
 	}
 }
 }
 
 
-
-
-/**
- *  itnim_private FCS ITNIM private interfaces
- */
-
-static void
-bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim,
-		       enum bfa_itnim_aen_event event)
-{
-	struct bfa_fcs_rport_s *rport = itnim->rport;
-	union bfa_aen_data_u aen_data;
-	struct bfa_log_mod_s *logmod = rport->fcs->logm;
-	wwn_t           lpwwn = bfa_fcs_port_get_pwwn(rport->port);
-	wwn_t           rpwwn = rport->pwwn;
-	char            lpwwn_ptr[BFA_STRING_32];
-	char            rpwwn_ptr[BFA_STRING_32];
-
-	/*
-	 * Don't post events for well known addresses
-	 */
-	if (BFA_FCS_PID_IS_WKA(rport->pid))
-		return;
-
-	wwn2str(lpwwn_ptr, lpwwn);
-	wwn2str(rpwwn_ptr, rpwwn);
-
-	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
-		rpwwn_ptr, lpwwn_ptr);
-
-	aen_data.itnim.vf_id = rport->port->fabric->vf_id;
-	aen_data.itnim.ppwwn =
-		bfa_fcs_port_get_pwwn(bfa_fcs_get_base_port(itnim->fcs));
-	aen_data.itnim.lpwwn = lpwwn;
-	aen_data.itnim.rpwwn = rpwwn;
-}
-
 static void
 static void
 bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced)
 {
 {
 	struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
 	struct bfa_fcs_itnim_s *itnim = itnim_cbarg;
 	struct bfa_fcs_rport_s *rport = itnim->rport;
 	struct bfa_fcs_rport_s *rport = itnim->rport;
-	struct bfa_fcs_port_s *port = rport->port;
-	struct fchs_s          fchs;
+	struct bfa_fcs_lport_s *port = rport->port;
+	struct fchs_s	fchs;
 	struct bfa_fcxp_s *fcxp;
 	struct bfa_fcxp_s *fcxp;
-	int             len;
+	int		len;
 
 
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 
 
 	fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
 	fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
 	if (!fcxp) {
 	if (!fcxp) {
 		itnim->stats.fcxp_alloc_wait++;
 		itnim->stats.fcxp_alloc_wait++;
-		bfa_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
+		bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe,
 				    bfa_fcs_itnim_send_prli, itnim);
 				    bfa_fcs_itnim_send_prli, itnim);
 		return;
 		return;
 	}
 	}
 	itnim->fcxp = fcxp;
 	itnim->fcxp = fcxp;
 
 
-	len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid,
-			    bfa_fcs_port_get_fcid(port), 0);
+	len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
+			    itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0);
 
 
 	bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
 	bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag,
 		      BFA_FALSE, FC_CLASS_3, len, &fchs,
 		      BFA_FALSE, FC_CLASS_3, len, &fchs,
-		      bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ,
-		      FC_ELS_TOV);
+		      bfa_fcs_itnim_prli_response, (void *)itnim,
+		      FC_MAX_PDUSZ, FC_ELS_TOV);
 
 
 	itnim->stats.prli_sent++;
 	itnim->stats.prli_sent++;
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT);
@@ -444,10 +414,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 			    bfa_status_t req_status, u32 rsp_len,
 			    bfa_status_t req_status, u32 rsp_len,
 			    u32 resid_len, struct fchs_s *rsp_fchs)
 			    u32 resid_len, struct fchs_s *rsp_fchs)
 {
 {
-	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg;
-	struct fc_els_cmd_s   *els_cmd;
-	struct fc_prli_s      *prli_resp;
-	struct fc_ls_rjt_s    *ls_rjt;
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
+	struct fc_els_cmd_s *els_cmd;
+	struct fc_prli_s *prli_resp;
+	struct fc_ls_rjt_s *ls_rjt;
 	struct fc_prli_params_s *sparams;
 	struct fc_prli_params_s *sparams;
 
 
 	bfa_trc(itnim->fcs, req_status);
 	bfa_trc(itnim->fcs, req_status);
@@ -475,7 +445,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 			if (prli_resp->parampage.servparams.initiator) {
 			if (prli_resp->parampage.servparams.initiator) {
 				bfa_trc(itnim->fcs, prli_resp->parampage.type);
 				bfa_trc(itnim->fcs, prli_resp->parampage.type);
 				itnim->rport->scsi_function =
 				itnim->rport->scsi_function =
-					BFA_RPORT_INITIATOR;
+					 BFA_RPORT_INITIATOR;
 				itnim->stats.prli_rsp_acc++;
 				itnim->stats.prli_rsp_acc++;
 				bfa_sm_send_event(itnim,
 				bfa_sm_send_event(itnim,
 						  BFA_FCS_ITNIM_SM_RSP_OK);
 						  BFA_FCS_ITNIM_SM_RSP_OK);
@@ -488,10 +458,10 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 		itnim->rport->scsi_function = BFA_RPORT_TARGET;
 		itnim->rport->scsi_function = BFA_RPORT_TARGET;
 
 
 		sparams = &prli_resp->parampage.servparams;
 		sparams = &prli_resp->parampage.servparams;
-		itnim->seq_rec = sparams->retry;
-		itnim->rec_support = sparams->rec_support;
+		itnim->seq_rec	     = sparams->retry;
+		itnim->rec_support   = sparams->rec_support;
 		itnim->task_retry_id = sparams->task_retry_id;
 		itnim->task_retry_id = sparams->task_retry_id;
-		itnim->conf_comp = sparams->confirm;
+		itnim->conf_comp     = sparams->confirm;
 
 
 		itnim->stats.prli_rsp_acc++;
 		itnim->stats.prli_rsp_acc++;
 		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
 		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK);
@@ -509,7 +479,7 @@ bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
 static void
 static void
 bfa_fcs_itnim_timeout(void *arg)
 bfa_fcs_itnim_timeout(void *arg)
 {
 {
-	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)arg;
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg;
 
 
 	itnim->stats.timeout++;
 	itnim->stats.timeout++;
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT);
@@ -529,16 +499,16 @@ bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim)
  */
  */
 
 
 /**
 /**
- * 	Called by rport when a new rport is created.
+ *	Called by rport when a new rport is created.
  *
  *
  * @param[in] rport	-  remote port.
  * @param[in] rport	-  remote port.
  */
  */
 struct bfa_fcs_itnim_s *
 struct bfa_fcs_itnim_s *
 bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
 bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
 {
 {
-	struct bfa_fcs_port_s *port = rport->port;
+	struct bfa_fcs_lport_s *port = rport->port;
 	struct bfa_fcs_itnim_s *itnim;
 	struct bfa_fcs_itnim_s *itnim;
-	struct bfad_itnim_s *itnim_drv;
+	struct bfad_itnim_s   *itnim_drv;
 	struct bfa_itnim_s *bfa_itnim;
 	struct bfa_itnim_s *bfa_itnim;
 
 
 	/*
 	/*
@@ -560,7 +530,8 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
 	/*
 	/*
 	 * call BFA to create the itnim
 	 * call BFA to create the itnim
 	 */
 	 */
-	bfa_itnim = bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
+	bfa_itnim =
+		bfa_itnim_create(port->fcs->bfa, rport->bfa_rport, itnim);
 
 
 	if (bfa_itnim == NULL) {
 	if (bfa_itnim == NULL) {
 		bfa_trc(port->fcs, rport->pwwn);
 		bfa_trc(port->fcs, rport->pwwn);
@@ -569,10 +540,10 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
 		return NULL;
 		return NULL;
 	}
 	}
 
 
-	itnim->bfa_itnim = bfa_itnim;
-	itnim->seq_rec = BFA_FALSE;
-	itnim->rec_support = BFA_FALSE;
-	itnim->conf_comp = BFA_FALSE;
+	itnim->bfa_itnim     = bfa_itnim;
+	itnim->seq_rec	     = BFA_FALSE;
+	itnim->rec_support   = BFA_FALSE;
+	itnim->conf_comp     = BFA_FALSE;
 	itnim->task_retry_id = BFA_FALSE;
 	itnim->task_retry_id = BFA_FALSE;
 
 
 	/*
 	/*
@@ -584,7 +555,7 @@ bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport)
 }
 }
 
 
 /**
 /**
- * 	Called by rport to delete  the instance of FCPIM.
+ *	Called by rport to delete  the instance of FCPIM.
  *
  *
  * @param[in] rport	-  remote port.
  * @param[in] rport	-  remote port.
  */
  */
@@ -607,8 +578,8 @@ bfa_fcs_itnim_rport_online(struct bfa_fcs_itnim_s *itnim)
 		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
 		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_ONLINE);
 	} else {
 	} else {
 		/*
 		/*
-		 * For well known addresses, we set the itnim to initiator
-		 * state
+		 *  For well known addresses, we set the itnim to initiator
+		 *  state
 		 */
 		 */
 		itnim->stats.initiator++;
 		itnim->stats.initiator++;
 		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
 		bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
@@ -651,7 +622,6 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
 
 
 	default:
 	default:
 		return BFA_STATUS_NO_FCPIM_NEXUS;
 		return BFA_STATUS_NO_FCPIM_NEXUS;
-
 	}
 	}
 }
 }
 
 
@@ -661,7 +631,7 @@ bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim)
 void
 void
 bfa_cb_itnim_online(void *cbarg)
 bfa_cb_itnim_online(void *cbarg)
 {
 {
-	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cbarg;
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg;
 
 
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
@@ -673,7 +643,7 @@ bfa_cb_itnim_online(void *cbarg)
 void
 void
 bfa_cb_itnim_offline(void *cb_arg)
 bfa_cb_itnim_offline(void *cb_arg)
 {
 {
-	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
 
 
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
@@ -686,7 +656,7 @@ bfa_cb_itnim_offline(void *cb_arg)
 void
 void
 bfa_cb_itnim_tov_begin(void *cb_arg)
 bfa_cb_itnim_tov_begin(void *cb_arg)
 {
 {
-	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
 
 
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 }
 }
@@ -697,14 +667,15 @@ bfa_cb_itnim_tov_begin(void *cb_arg)
 void
 void
 bfa_cb_itnim_tov(void *cb_arg)
 bfa_cb_itnim_tov(void *cb_arg)
 {
 {
-	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
+	struct bfad_itnim_s *itnim_drv = itnim->itnim_drv;
 
 
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
-	bfa_fcb_itnim_tov(itnim->itnim_drv);
+	itnim_drv->state = ITNIM_STATE_TIMEOUT;
 }
 }
 
 
 /**
 /**
- * 		BFA notification to FCS/driver for second level error recovery.
+ *		BFA notification to FCS/driver for second level error recovery.
  *
  *
  * Atleast one I/O request has timedout and target is unresponsive to
  * Atleast one I/O request has timedout and target is unresponsive to
  * repeated abort requests. Second level error recovery should be initiated
  * repeated abort requests. Second level error recovery should be initiated
@@ -713,7 +684,7 @@ bfa_cb_itnim_tov(void *cb_arg)
 void
 void
 bfa_cb_itnim_sler(void *cb_arg)
 bfa_cb_itnim_sler(void *cb_arg)
 {
 {
-	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
+	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg;
 
 
 	itnim->stats.sler++;
 	itnim->stats.sler++;
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
@@ -721,7 +692,7 @@ bfa_cb_itnim_sler(void *cb_arg)
 }
 }
 
 
 struct bfa_fcs_itnim_s *
 struct bfa_fcs_itnim_s *
-bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
+bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 {
 {
 	struct bfa_fcs_rport_s *rport;
 	struct bfa_fcs_rport_s *rport;
 	rport = bfa_fcs_rport_lookup(port, rpwwn);
 	rport = bfa_fcs_rport_lookup(port, rpwwn);
@@ -734,7 +705,7 @@ bfa_fcs_itnim_lookup(struct bfa_fcs_port_s *port, wwn_t rpwwn)
 }
 }
 
 
 bfa_status_t
 bfa_status_t
-bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
+bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
 		       struct bfa_itnim_attr_s *attr)
 		       struct bfa_itnim_attr_s *attr)
 {
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 	struct bfa_fcs_itnim_s *itnim = NULL;
@@ -744,18 +715,16 @@ bfa_fcs_itnim_attr_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
 	if (itnim == NULL)
 	if (itnim == NULL)
 		return BFA_STATUS_NO_FCPIM_NEXUS;
 		return BFA_STATUS_NO_FCPIM_NEXUS;
 
 
-	attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm);
-	attr->retry = itnim->seq_rec;
-	attr->rec_support = itnim->rec_support;
-	attr->conf_comp = itnim->conf_comp;
+	attr->state	    = bfa_sm_to_state(itnim_sm_table, itnim->sm);
+	attr->retry	    = itnim->seq_rec;
+	attr->rec_support   = itnim->rec_support;
+	attr->conf_comp	    = itnim->conf_comp;
 	attr->task_retry_id = itnim->task_retry_id;
 	attr->task_retry_id = itnim->task_retry_id;
-	bfa_os_memset(&attr->io_latency, 0, sizeof(struct bfa_itnim_latency_s));
-
 	return BFA_STATUS_OK;
 	return BFA_STATUS_OK;
 }
 }
 
 
 bfa_status_t
 bfa_status_t
-bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
+bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn,
 			struct bfa_itnim_stats_s *stats)
 			struct bfa_itnim_stats_s *stats)
 {
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 	struct bfa_fcs_itnim_s *itnim = NULL;
@@ -773,7 +742,7 @@ bfa_fcs_itnim_stats_get(struct bfa_fcs_port_s *port, wwn_t rpwwn,
 }
 }
 
 
 bfa_status_t
 bfa_status_t
-bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn)
+bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
 {
 {
 	struct bfa_fcs_itnim_s *itnim = NULL;
 	struct bfa_fcs_itnim_s *itnim = NULL;
 
 
@@ -789,10 +758,10 @@ bfa_fcs_itnim_stats_clear(struct bfa_fcs_port_s *port, wwn_t rpwwn)
 }
 }
 
 
 void
 void
-bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
-		      u16 len)
+bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim,
+			struct fchs_s *fchs, u16 len)
 {
 {
-	struct fc_els_cmd_s   *els_cmd;
+	struct fc_els_cmd_s *els_cmd;
 
 
 	bfa_trc(itnim->fcs, fchs->type);
 	bfa_trc(itnim->fcs, fchs->type);
 
 
@@ -812,13 +781,3 @@ bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
 		bfa_assert(0);
 		bfa_assert(0);
 	}
 	}
 }
 }
-
-void
-bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim)
-{
-}
-
-void
-bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
-{
-}

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 263 - 262
drivers/scsi/bfa/bfa_fcs_lport.c


+ 0 - 61
drivers/scsi/bfa/bfa_fcs_port.c

@@ -1,61 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  bfa_fcs_pport.c BFA FCS PPORT ( physical port)
- */
-
-#include <fcs/bfa_fcs.h>
-#include <bfa_svc.h>
-#include <fcs/bfa_fcs_fabric.h>
-#include "fcs_trcmod.h"
-#include "fcs.h"
-#include "fcs_fabric.h"
-#include "fcs_port.h"
-
-BFA_TRC_FILE(FCS, PPORT);
-
-static void
-bfa_fcs_pport_event_handler(void *cbarg, bfa_pport_event_t event)
-{
-	struct bfa_fcs_s      *fcs = cbarg;
-
-	bfa_trc(fcs, event);
-
-	switch (event) {
-	case BFA_PPORT_LINKUP:
-		bfa_fcs_fabric_link_up(&fcs->fabric);
-		break;
-
-	case BFA_PPORT_LINKDOWN:
-		bfa_fcs_fabric_link_down(&fcs->fabric);
-		break;
-
-	case BFA_PPORT_TRUNK_LINKDOWN:
-		bfa_assert(0);
-		break;
-
-	default:
-		bfa_assert(0);
-	}
-}
-
-void
-bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
-{
-	bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
-}

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 250 - 271
drivers/scsi/bfa/bfa_fcs_rport.c


+ 0 - 99
drivers/scsi/bfa/bfa_fcs_uf.c

@@ -1,99 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  bfa_fcs_uf.c BFA FCS UF ( Unsolicited Frames)
- */
-
-#include <fcs/bfa_fcs.h>
-#include <bfa_svc.h>
-#include <fcs/bfa_fcs_fabric.h>
-#include "fcs.h"
-#include "fcs_trcmod.h"
-#include "fcs_fabric.h"
-#include "fcs_uf.h"
-
-BFA_TRC_FILE(FCS, UF);
-
-/**
- * 		BFA callback for unsolicited frame receive handler.
- *
- * @param[in]		cbarg		callback arg for receive handler
- * @param[in]		uf		unsolicited frame descriptor
- *
- * @return None
- */
-static void
-bfa_fcs_uf_recv(void *cbarg, struct bfa_uf_s *uf)
-{
-	struct bfa_fcs_s      *fcs = (struct bfa_fcs_s *) cbarg;
-	struct fchs_s         *fchs = bfa_uf_get_frmbuf(uf);
-	u16        len = bfa_uf_get_frmlen(uf);
-	struct fc_vft_s       *vft;
-	struct bfa_fcs_fabric_s *fabric;
-
-	/**
-	 * check for VFT header
-	 */
-	if (fchs->routing == FC_RTG_EXT_HDR &&
-		fchs->cat_info == FC_CAT_VFT_HDR) {
-		bfa_stats(fcs, uf.tagged);
-		vft = bfa_uf_get_frmbuf(uf);
-		if (fcs->port_vfid == vft->vf_id)
-			fabric = &fcs->fabric;
-		else
-			fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
-
-		/**
-		 * drop frame if vfid is unknown
-		 */
-		if (!fabric) {
-			bfa_assert(0);
-			bfa_stats(fcs, uf.vfid_unknown);
-			bfa_uf_free(uf);
-			return;
-		}
-
-		/**
-		 * skip vft header
-		 */
-		fchs = (struct fchs_s *) (vft + 1);
-		len -= sizeof(struct fc_vft_s);
-
-		bfa_trc(fcs, vft->vf_id);
-	} else {
-		bfa_stats(fcs, uf.untagged);
-		fabric = &fcs->fabric;
-	}
-
-	bfa_trc(fcs, ((u32 *) fchs)[0]);
-	bfa_trc(fcs, ((u32 *) fchs)[1]);
-	bfa_trc(fcs, ((u32 *) fchs)[2]);
-	bfa_trc(fcs, ((u32 *) fchs)[3]);
-	bfa_trc(fcs, ((u32 *) fchs)[4]);
-	bfa_trc(fcs, ((u32 *) fchs)[5]);
-	bfa_trc(fcs, len);
-
-	bfa_fcs_fabric_uf_recv(fabric, fchs, len);
-	bfa_uf_free(uf);
-}
-
-void
-bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
-{
-	bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
-}

+ 0 - 774
drivers/scsi/bfa/bfa_fcxp.c

@@ -1,774 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include <bfi/bfi_uf.h>
-#include <cs/bfa_debug.h>
-
-BFA_TRC_FILE(HAL, FCXP);
-BFA_MODULE(fcxp);
-
-/**
- * forward declarations
- */
-static void     __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
-static void     hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
-				 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
-static void     hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
-				 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
-static void	bfa_fcxp_qresume(void *cbarg);
-static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
-			       struct bfi_fcxp_send_req_s *send_req);
-
-/**
- *  fcxp_pvt BFA FCXP private functions
- */
-
-static void
-claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
-{
-	u8        *dm_kva = NULL;
-	u64        dm_pa;
-	u32        buf_pool_sz;
-
-	dm_kva = bfa_meminfo_dma_virt(mi);
-	dm_pa = bfa_meminfo_dma_phys(mi);
-
-	buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
-
-	/*
-	 * Initialize the fcxp req payload list
-	 */
-	mod->req_pld_list_kva = dm_kva;
-	mod->req_pld_list_pa = dm_pa;
-	dm_kva += buf_pool_sz;
-	dm_pa += buf_pool_sz;
-	bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
-
-	/*
-	 * Initialize the fcxp rsp payload list
-	 */
-	buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
-	mod->rsp_pld_list_kva = dm_kva;
-	mod->rsp_pld_list_pa = dm_pa;
-	dm_kva += buf_pool_sz;
-	dm_pa += buf_pool_sz;
-	bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
-
-	bfa_meminfo_dma_virt(mi) = dm_kva;
-	bfa_meminfo_dma_phys(mi) = dm_pa;
-}
-
-static void
-claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
-{
-	u16        i;
-	struct bfa_fcxp_s *fcxp;
-
-	fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
-	bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
-
-	INIT_LIST_HEAD(&mod->fcxp_free_q);
-	INIT_LIST_HEAD(&mod->fcxp_active_q);
-
-	mod->fcxp_list = fcxp;
-
-	for (i = 0; i < mod->num_fcxps; i++) {
-		fcxp->fcxp_mod = mod;
-		fcxp->fcxp_tag = i;
-
-		list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
-		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
-		fcxp->reqq_waiting = BFA_FALSE;
-
-		fcxp = fcxp + 1;
-	}
-
-	bfa_meminfo_kva(mi) = (void *)fcxp;
-}
-
-static void
-bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-		u32 *dm_len)
-{
-	u16        num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
-
-	if (num_fcxp_reqs == 0)
-		return;
-
-	/*
-	 * Account for req/rsp payload
-	 */
-	*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
-	if (cfg->drvcfg.min_cfg)
-		*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
-	else
-		*dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
-
-	/*
-	 * Account for fcxp structs
-	 */
-	*ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
-}
-
-static void
-bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		    struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
-{
-	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
-
-	bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
-	mod->bfa = bfa;
-	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
-
-	/**
-	 * Initialize FCXP request and response payload sizes.
-	 */
-	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
-	if (!cfg->drvcfg.min_cfg)
-		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
-
-	INIT_LIST_HEAD(&mod->wait_q);
-
-	claim_fcxp_req_rsp_mem(mod, meminfo);
-	claim_fcxps_mem(mod, meminfo);
-}
-
-static void
-bfa_fcxp_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcxp_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcxp_stop(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_fcxp_iocdisable(struct bfa_s *bfa)
-{
-	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
-	struct bfa_fcxp_s *fcxp;
-	struct list_head        *qe, *qen;
-
-	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
-		fcxp = (struct bfa_fcxp_s *) qe;
-		if (fcxp->caller == NULL) {
-			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
-					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
-			bfa_fcxp_free(fcxp);
-		} else {
-			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
-			bfa_cb_queue(bfa, &fcxp->hcb_qe,
-				      __bfa_fcxp_send_cbfn, fcxp);
-		}
-	}
-}
-
-static struct bfa_fcxp_s *
-bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
-{
-	struct bfa_fcxp_s *fcxp;
-
-	bfa_q_deq(&fm->fcxp_free_q, &fcxp);
-
-	if (fcxp)
-		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
-
-	return fcxp;
-}
-
-static void
-bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
-{
-	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
-	struct bfa_fcxp_wqe_s *wqe;
-
-	bfa_q_deq(&mod->wait_q, &wqe);
-	if (wqe) {
-		bfa_trc(mod->bfa, fcxp->fcxp_tag);
-		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
-		return;
-	}
-
-	bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
-	list_del(&fcxp->qe);
-	list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
-}
-
-static void
-bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
-		       bfa_status_t req_status, u32 rsp_len,
-		       u32 resid_len, struct fchs_s *rsp_fchs)
-{
-	/* discarded fcxp completion */
-}
-
-static void
-__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_fcxp_s *fcxp = cbarg;
-
-	if (complete) {
-		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
-				fcxp->rsp_status, fcxp->rsp_len,
-				fcxp->residue_len, &fcxp->rsp_fchs);
-	} else {
-		bfa_fcxp_free(fcxp);
-	}
-}
-
-static void
-hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
-{
-	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
-	struct bfa_fcxp_s 	*fcxp;
-	u16		fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
-
-	bfa_trc(bfa, fcxp_tag);
-
-	fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
-
-	/**
-	 * @todo f/w should not set residue to non-0 when everything
-	 *       is received.
-	 */
-	if (fcxp_rsp->req_status == BFA_STATUS_OK)
-		fcxp_rsp->residue_len = 0;
-	else
-		fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
-
-	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
-
-	bfa_assert(fcxp->send_cbfn != NULL);
-
-	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
-
-	if (fcxp->send_cbfn != NULL) {
-		if (fcxp->caller == NULL) {
-			bfa_trc(mod->bfa, fcxp->fcxp_tag);
-
-			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
-					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
-					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
-			/*
-			 * fcxp automatically freed on return from the callback
-			 */
-			bfa_fcxp_free(fcxp);
-		} else {
-			bfa_trc(mod->bfa, fcxp->fcxp_tag);
-			fcxp->rsp_status = fcxp_rsp->req_status;
-			fcxp->rsp_len = fcxp_rsp->rsp_len;
-			fcxp->residue_len = fcxp_rsp->residue_len;
-			fcxp->rsp_fchs = fcxp_rsp->fchs;
-
-			bfa_cb_queue(bfa, &fcxp->hcb_qe,
-				      __bfa_fcxp_send_cbfn, fcxp);
-		}
-	} else {
-		bfa_trc(bfa, fcxp_tag);
-	}
-}
-
-static void
-hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
-{
-	union bfi_addr_u      sga_zero = { {0} };
-
-	sge->sg_len = reqlen;
-	sge->flags = BFI_SGE_DATA_LAST;
-	bfa_dma_addr_set(sge[0].sga, req_pa);
-	bfa_sge_to_be(sge);
-	sge++;
-
-	sge->sga = sga_zero;
-	sge->sg_len = reqlen;
-	sge->flags = BFI_SGE_PGDLEN;
-	bfa_sge_to_be(sge);
-}
-
-static void
-hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
-		 struct fchs_s *fchs)
-{
-	/*
-	 * TODO: TX ox_id
-	 */
-	if (reqlen > 0) {
-		if (fcxp->use_ireqbuf) {
-			u32        pld_w0 =
-				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
-
-			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
-				BFA_PL_EID_TX,
-				reqlen + sizeof(struct fchs_s), fchs, pld_w0);
-		} else {
-			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
-				BFA_PL_EID_TX, reqlen + sizeof(struct fchs_s),
-				fchs);
-		}
-	} else {
-		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
-			       reqlen + sizeof(struct fchs_s), fchs);
-	}
-}
-
-static void
-hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
-		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
-{
-	if (fcxp_rsp->rsp_len > 0) {
-		if (fcxp->use_irspbuf) {
-			u32        pld_w0 =
-				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
-
-			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
-					      BFA_PL_EID_RX,
-					      (u16) fcxp_rsp->rsp_len,
-					      &fcxp_rsp->fchs, pld_w0);
-		} else {
-			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
-				       BFA_PL_EID_RX,
-				       (u16) fcxp_rsp->rsp_len,
-				       &fcxp_rsp->fchs);
-		}
-	} else {
-		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
-			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
-	}
-}
-
-/**
- * Handler to resume sending fcxp when space in available in cpe queue.
- */
-static void
-bfa_fcxp_qresume(void *cbarg)
-{
-	struct bfa_fcxp_s		*fcxp = cbarg;
-	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
-	struct bfi_fcxp_send_req_s	*send_req;
-
-	fcxp->reqq_waiting = BFA_FALSE;
-	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
-	bfa_fcxp_queue(fcxp, send_req);
-}
-
-/**
- * Queue fcxp send request to foimrware.
- */
-static void
-bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
-{
-	struct bfa_s      		*bfa = fcxp->fcxp_mod->bfa;
-	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
-	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
-	struct bfa_rport_s		*rport = reqi->bfa_rport;
-
-	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
-			bfa_lpuid(bfa));
-
-	send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
-	if (rport) {
-		send_req->rport_fw_hndl = rport->fw_handle;
-		send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
-		if (send_req->max_frmsz == 0)
-			send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
-	} else {
-		send_req->rport_fw_hndl = 0;
-		send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
-	}
-
-	send_req->vf_id = bfa_os_htons(reqi->vf_id);
-	send_req->lp_tag = reqi->lp_tag;
-	send_req->class = reqi->class;
-	send_req->rsp_timeout = rspi->rsp_timeout;
-	send_req->cts = reqi->cts;
-	send_req->fchs = reqi->fchs;
-
-	send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
-	send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
-
-	/*
-	 * setup req sgles
-	 */
-	if (fcxp->use_ireqbuf == 1) {
-		hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
-					BFA_FCXP_REQ_PLD_PA(fcxp));
-	} else {
-		if (fcxp->nreq_sgles > 0) {
-			bfa_assert(fcxp->nreq_sgles == 1);
-			hal_fcxp_set_local_sges(send_req->req_sge,
-						reqi->req_tot_len,
-						fcxp->req_sga_cbfn(fcxp->caller,
-								   0));
-		} else {
-			bfa_assert(reqi->req_tot_len == 0);
-			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
-		}
-	}
-
-	/*
-	 * setup rsp sgles
-	 */
-	if (fcxp->use_irspbuf == 1) {
-		bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
-
-		hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
-					BFA_FCXP_RSP_PLD_PA(fcxp));
-
-	} else {
-		if (fcxp->nrsp_sgles > 0) {
-			bfa_assert(fcxp->nrsp_sgles == 1);
-			hal_fcxp_set_local_sges(send_req->rsp_sge,
-						rspi->rsp_maxlen,
-						fcxp->rsp_sga_cbfn(fcxp->caller,
-								   0));
-		} else {
-			bfa_assert(rspi->rsp_maxlen == 0);
-			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
-		}
-	}
-
-	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
-
-	bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
-
-	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
-	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
-}
-
-
-/**
- *  hal_fcxp_api BFA FCXP API
- */
-
-/**
- * Allocate an FCXP instance to send a response or to send a request
- * that has a response. Request/response buffers are allocated by caller.
- *
- * @param[in]	bfa		BFA bfa instance
- * @param[in]	nreq_sgles	Number of SG elements required for request
- * 				buffer. 0, if fcxp internal buffers are	used.
- * 				Use bfa_fcxp_get_reqbuf() to get the
- * 				internal req buffer.
- * @param[in]	req_sgles	SG elements describing request buffer. Will be
- * 				copied in by BFA and hence can be freed on
- * 				return from this function.
- * @param[in]	get_req_sga	function ptr to be called to get a request SG
- * 				Address (given the sge index).
- * @param[in]	get_req_sglen	function ptr to be called to get a request SG
- * 				len (given the sge index).
- * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
- * 				Address (given the sge index).
- * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
- * 				len (given the sge index).
- *
- * @return FCXP instance. NULL on failure.
- */
-struct bfa_fcxp_s *
-bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
-			int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
-			bfa_fcxp_get_sglen_t req_sglen_cbfn,
-			bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
-			bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
-{
-	struct bfa_fcxp_s *fcxp = NULL;
-	u32        nreq_sgpg, nrsp_sgpg;
-
-	bfa_assert(bfa != NULL);
-
-	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
-	if (fcxp == NULL)
-		return NULL;
-
-	bfa_trc(bfa, fcxp->fcxp_tag);
-
-	fcxp->caller = caller;
-
-	if (nreq_sgles == 0) {
-		fcxp->use_ireqbuf = 1;
-	} else {
-		bfa_assert(req_sga_cbfn != NULL);
-		bfa_assert(req_sglen_cbfn != NULL);
-
-		fcxp->use_ireqbuf = 0;
-		fcxp->req_sga_cbfn = req_sga_cbfn;
-		fcxp->req_sglen_cbfn = req_sglen_cbfn;
-
-		fcxp->nreq_sgles = nreq_sgles;
-
-		/*
-		 * alloc required sgpgs
-		 */
-		if (nreq_sgles > BFI_SGE_INLINE) {
-			nreq_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
-
-			if (bfa_sgpg_malloc(bfa, &fcxp->req_sgpg_q, nreq_sgpg)
-			    != BFA_STATUS_OK) {
-				/*
-				 * TODO
-				 */
-			}
-		}
-	}
-
-	if (nrsp_sgles == 0) {
-		fcxp->use_irspbuf = 1;
-	} else {
-		bfa_assert(rsp_sga_cbfn != NULL);
-		bfa_assert(rsp_sglen_cbfn != NULL);
-
-		fcxp->use_irspbuf = 0;
-		fcxp->rsp_sga_cbfn = rsp_sga_cbfn;
-		fcxp->rsp_sglen_cbfn = rsp_sglen_cbfn;
-
-		fcxp->nrsp_sgles = nrsp_sgles;
-		/*
-		 * alloc required sgpgs
-		 */
-		if (nrsp_sgles > BFI_SGE_INLINE) {
-			nrsp_sgpg = BFA_SGPG_NPAGE(nreq_sgles);
-
-			if (bfa_sgpg_malloc
-			    (bfa, &fcxp->rsp_sgpg_q, nrsp_sgpg)
-			    != BFA_STATUS_OK) {
-				/* bfa_sgpg_wait(bfa, &fcxp->rsp_sgpg_wqe,
-				nrsp_sgpg); */
-				/*
-				 * TODO
-				 */
-			}
-		}
-	}
-
-	return fcxp;
-}
-
-/**
- * Get the internal request buffer pointer
- *
- * @param[in]	fcxp	BFA fcxp pointer
- *
- * @return 		pointer to the internal request buffer
- */
-void *
-bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
-{
-	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
-	void	*reqbuf;
-
-	bfa_assert(fcxp->use_ireqbuf == 1);
-	reqbuf = ((u8 *)mod->req_pld_list_kva) +
-			fcxp->fcxp_tag * mod->req_pld_sz;
-	return reqbuf;
-}
-
-u32
-bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
-{
-	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
-
-	return mod->req_pld_sz;
-}
-
-/**
- * Get the internal response buffer pointer
- *
- * @param[in]	fcxp	BFA fcxp pointer
- *
- * @return		pointer to the internal request buffer
- */
-void *
-bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
-{
-	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
-	void	*rspbuf;
-
-	bfa_assert(fcxp->use_irspbuf == 1);
-
-	rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
-			fcxp->fcxp_tag * mod->rsp_pld_sz;
-	return rspbuf;
-}
-
-/**
- * 		Free the BFA FCXP
- *
- * @param[in]	fcxp			BFA fcxp pointer
- *
- * @return 		void
- */
-void
-bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
-{
-	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
-
-	bfa_assert(fcxp != NULL);
-	bfa_trc(mod->bfa, fcxp->fcxp_tag);
-	bfa_fcxp_put(fcxp);
-}
-
-/**
- * Send a FCXP request
- *
- * @param[in]	fcxp	BFA fcxp pointer
- * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
- * @param[in]	vf_id	virtual Fabric ID
- * @param[in]	lp_tag  lport tag
- * @param[in]	cts	use Continous sequence
- * @param[in]	cos	fc Class of Service
- * @param[in]	reqlen	request length, does not include FCHS length
- * @param[in]	fchs	fc Header Pointer. The header content will be copied
- *			in by BFA.
- *
- * @param[in]	cbfn	call back function to be called on receiving
- * 								the response
- * @param[in]	cbarg	arg for cbfn
- * @param[in]	rsp_timeout
- *			response timeout
- *
- * @return		bfa_status_t
- */
-void
-bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
-		u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
-		u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
-		void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
-{
-	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
-	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
-	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
-	struct bfi_fcxp_send_req_s	*send_req;
-
-	bfa_trc(bfa, fcxp->fcxp_tag);
-
-	/**
-	 * setup request/response info
-	 */
-	reqi->bfa_rport = rport;
-	reqi->vf_id = vf_id;
-	reqi->lp_tag = lp_tag;
-	reqi->class = cos;
-	rspi->rsp_timeout = rsp_timeout;
-	reqi->cts = cts;
-	reqi->fchs = *fchs;
-	reqi->req_tot_len = reqlen;
-	rspi->rsp_maxlen = rsp_maxlen;
-	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
-	fcxp->send_cbarg = cbarg;
-
-	/**
-	 * If no room in CPE queue, wait for space in request queue
-	 */
-	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
-	if (!send_req) {
-		bfa_trc(bfa, fcxp->fcxp_tag);
-		fcxp->reqq_waiting = BFA_TRUE;
-		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
-		return;
-	}
-
-	bfa_fcxp_queue(fcxp, send_req);
-}
-
-/**
- * Abort a BFA FCXP
- *
- * @param[in]	fcxp	BFA fcxp pointer
- *
- * @return 		void
- */
-bfa_status_t
-bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
-{
-	bfa_assert(0);
-	return BFA_STATUS_OK;
-}
-
-void
-bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
-			bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg)
-{
-	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
-
-	bfa_assert(list_empty(&mod->fcxp_free_q));
-
-	wqe->alloc_cbfn = alloc_cbfn;
-	wqe->alloc_cbarg = alloc_cbarg;
-	list_add_tail(&wqe->qe, &mod->wait_q);
-}
-
-void
-bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
-{
-	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
-
-	bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
-	list_del(&wqe->qe);
-}
-
-void
-bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
-{
-	/**
-	 * If waiting for room in request queue, cancel reqq wait
-	 * and free fcxp.
-	 */
-	if (fcxp->reqq_waiting) {
-		fcxp->reqq_waiting = BFA_FALSE;
-		bfa_reqq_wcancel(&fcxp->reqq_wqe);
-		bfa_fcxp_free(fcxp);
-		return;
-	}
-
-	fcxp->send_cbfn = bfa_fcxp_null_comp;
-}
-
-
-
-/**
- *  hal_fcxp_public BFA FCXP public functions
- */
-
-void
-bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
-{
-	switch (msg->mhdr.msg_id) {
-	case BFI_FCXP_I2H_SEND_RSP:
-		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
-		break;
-
-	default:
-		bfa_trc(bfa, msg->mhdr.msg_id);
-		bfa_assert(0);
-	}
-}
-
-u32
-bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
-{
-	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
-
-	return mod->rsp_pld_sz;
-}
-
-

+ 0 - 138
drivers/scsi/bfa/bfa_fcxp_priv.h

@@ -1,138 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_FCXP_PRIV_H__
-#define __BFA_FCXP_PRIV_H__
-
-#include <cs/bfa_sm.h>
-#include <protocol/fc.h>
-#include <bfa_svc.h>
-#include <bfi/bfi_fcxp.h>
-
-#define BFA_FCXP_MIN     	(1)
-#define BFA_FCXP_MAX_IBUF_SZ	(2 * 1024 + 256)
-#define BFA_FCXP_MAX_LBUF_SZ	(4 * 1024 + 256)
-
-struct bfa_fcxp_mod_s {
-	struct bfa_s      *bfa;		/*  backpointer to BFA */
-	struct bfa_fcxp_s *fcxp_list;	/*  array of FCXPs */
-	u16        num_fcxps;	/*  max num FCXP requests */
-	struct list_head fcxp_free_q;	/*  free FCXPs */
-	struct list_head fcxp_active_q;	/*  active FCXPs */
-	void	*req_pld_list_kva;	/*  list of FCXP req pld */
-	u64 req_pld_list_pa;	/*  list of FCXP req pld */
-	void *rsp_pld_list_kva;		/*  list of FCXP resp pld */
-	u64 rsp_pld_list_pa;	/*  list of FCXP resp pld */
-	struct list_head  wait_q;		/*  wait queue for free fcxp */
-	u32	req_pld_sz;
-	u32	rsp_pld_sz;
-};
-
-#define BFA_FCXP_MOD(__bfa)		(&(__bfa)->modules.fcxp_mod)
-#define BFA_FCXP_FROM_TAG(__mod, __tag)	(&(__mod)->fcxp_list[__tag])
-
-typedef void    (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
-				   void *cb_arg, bfa_status_t req_status,
-				   u32 rsp_len, u32 resid_len,
-				   struct fchs_s *rsp_fchs);
-
-/**
- * Information needed for a FCXP request
- */
-struct bfa_fcxp_req_info_s {
-	struct bfa_rport_s *bfa_rport;	/*  Pointer to the bfa rport that was
-					 *returned from bfa_rport_create().
-					 *This could be left NULL for WKA or for
-					 *FCXP interactions before the rport
-					 *nexus is established
-					 */
-	struct fchs_s   fchs;	/*  request FC header structure */
-	u8 cts;		/*  continous sequence */
-	u8 class;		/*  FC class for the request/response */
-	u16 max_frmsz;	/*  max send frame size */
-	u16 vf_id;		/*  vsan tag if applicable */
-	u8	lp_tag;		/*  lport tag */
-	u32 req_tot_len;	/*  request payload total length */
-};
-
-struct bfa_fcxp_rsp_info_s {
-	struct fchs_s rsp_fchs;		/*  Response frame's FC header will
-					 * be *sent back in this field */
-	u8         rsp_timeout;	/*  timeout in seconds, 0-no response
-					 */
-	u8         rsvd2[3];
-	u32        rsp_maxlen;	/*  max response length expected */
-};
-
-struct bfa_fcxp_s {
-	struct list_head 	qe;		/*  fcxp queue element */
-	bfa_sm_t        sm;             /*  state machine */
-	void           	*caller;	/*  driver or fcs */
-	struct bfa_fcxp_mod_s *fcxp_mod;
-					/*  back pointer to fcxp mod */
-	u16        fcxp_tag;	/*  internal tag */
-	struct bfa_fcxp_req_info_s req_info;
-					/*  request info */
-	struct bfa_fcxp_rsp_info_s rsp_info;
-					/*  response info */
-	u8 	use_ireqbuf;	/*  use internal req buf */
-	u8         use_irspbuf;	/*  use internal rsp buf */
-	u32        nreq_sgles;	/*  num request SGLEs */
-	u32        nrsp_sgles;	/*  num response SGLEs */
-	struct list_head req_sgpg_q;	/*  SG pages for request buf */
-	struct list_head req_sgpg_wqe;	/*  wait queue for req SG page */
-	struct list_head rsp_sgpg_q;	/*  SG pages for response buf */
-	struct list_head rsp_sgpg_wqe;	/*  wait queue for rsp SG page */
-
-	bfa_fcxp_get_sgaddr_t req_sga_cbfn;
-					/*  SG elem addr user function */
-	bfa_fcxp_get_sglen_t req_sglen_cbfn;
-					/*  SG elem len user function */
-	bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
-					/*  SG elem addr user function */
-	bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
-					/*  SG elem len user function */
-	bfa_cb_fcxp_send_t send_cbfn;   /*  send completion callback */
-	void		*send_cbarg;	/*  callback arg */
-	struct bfa_sge_s   req_sge[BFA_FCXP_MAX_SGES];
-					/*  req SG elems */
-	struct bfa_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];
-					/*  rsp SG elems */
-	u8         rsp_status;	/*  comp: rsp status */
-	u32        rsp_len;	/*  comp: actual response len */
-	u32        residue_len;	/*  comp: residual rsp length */
-	struct fchs_s          rsp_fchs;	/*  comp: response fchs */
-	struct bfa_cb_qe_s    hcb_qe;	/*  comp: callback qelem */
-	struct bfa_reqq_wait_s	reqq_wqe;
-	bfa_boolean_t	reqq_waiting;
-};
-
-#define BFA_FCXP_REQ_PLD(_fcxp) 	(bfa_fcxp_get_reqbuf(_fcxp))
-
-#define BFA_FCXP_RSP_FCHS(_fcxp) 	(&((_fcxp)->rsp_info.fchs))
-#define BFA_FCXP_RSP_PLD(_fcxp) 	(bfa_fcxp_get_rspbuf(_fcxp))
-
-#define BFA_FCXP_REQ_PLD_PA(_fcxp)					\
-	((_fcxp)->fcxp_mod->req_pld_list_pa +				\
-		((_fcxp)->fcxp_mod->req_pld_sz  * (_fcxp)->fcxp_tag))
-
-#define BFA_FCXP_RSP_PLD_PA(_fcxp) 					\
-	((_fcxp)->fcxp_mod->rsp_pld_list_pa +				\
-		((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
-
-void	bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-#endif /* __BFA_FCXP_PRIV_H__ */

+ 0 - 44
drivers/scsi/bfa/bfa_fwimg_priv.h

@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_FWIMG_PRIV_H__
-#define __BFA_FWIMG_PRIV_H__
-
-#define	BFI_FLASH_CHUNK_SZ		256	/*  Flash chunk size */
-#define	BFI_FLASH_CHUNK_SZ_WORDS	(BFI_FLASH_CHUNK_SZ/sizeof(u32))
-
-/**
- * BFI FW image type
- */
-enum {
-	BFI_IMAGE_CB_FC,
-	BFI_IMAGE_CT_FC,
-	BFI_IMAGE_CT_CNA,
-	BFI_IMAGE_MAX,
-};
-
-extern u32 *bfi_image_get_chunk(int type, uint32_t off);
-extern u32 bfi_image_get_size(int type);
-extern u32 bfi_image_ct_fc_size;
-extern u32 bfi_image_ct_cna_size;
-extern u32 bfi_image_cb_fc_size;
-extern u32 *bfi_image_ct_fc;
-extern u32 *bfi_image_ct_cna;
-extern u32 *bfi_image_cb_fc;
-
-
-#endif /* __BFA_FWIMG_PRIV_H__ */

+ 4 - 4
drivers/scsi/bfa/bfa_hw_cb.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,15 +15,15 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#include <bfa_priv.h>
-#include <bfi/bfi_cbreg.h>
+#include "bfa_modules.h"
+#include "bfi_cbreg.h"
 
 
 void
 void
 bfa_hwcb_reginit(struct bfa_s *bfa)
 bfa_hwcb_reginit(struct bfa_s *bfa)
 {
 {
 	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
 	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
 	bfa_os_addr_t		kva = bfa_ioc_bar0(&bfa->ioc);
 	bfa_os_addr_t		kva = bfa_ioc_bar0(&bfa->ioc);
-	int             	i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+	int			i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
 
 
 	if (fn == 0) {
 	if (fn == 0) {
 		bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
 		bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);

+ 5 - 6
drivers/scsi/bfa/bfa_hw_ct.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,9 +15,8 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#include <bfa_priv.h>
-#include <bfi/bfi_ctreg.h>
-#include <bfa_ioc.h>
+#include "bfa_modules.h"
+#include "bfi_ctreg.h"
 
 
 BFA_TRC_FILE(HAL, IOCFC_CT);
 BFA_TRC_FILE(HAL, IOCFC_CT);
 
 
@@ -53,7 +52,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
 {
 {
 	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
 	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
 	bfa_os_addr_t		kva = bfa_ioc_bar0(&bfa->ioc);
 	bfa_os_addr_t		kva = bfa_ioc_bar0(&bfa->ioc);
-	int             	i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
+	int			i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
 
 
 	if (fn == 0) {
 	if (fn == 0) {
 		bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
 		bfa_regs->intr_status = (kva + HOSTFN0_INT_STATUS);
@@ -87,7 +86,7 @@ bfa_hwct_reginit(struct bfa_s *bfa)
 void
 void
 bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
 bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
 {
 {
-	u32 r32;
+	u32	r32;
 
 
 	r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
 	r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
 	bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
 	bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);

+ 0 - 270
drivers/scsi/bfa/bfa_intr.c

@@ -1,270 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-#include <bfa.h>
-#include <bfi/bfi_ctreg.h>
-#include <bfa_port_priv.h>
-#include <bfa_intr_priv.h>
-#include <cs/bfa_debug.h>
-
-BFA_TRC_FILE(HAL, INTR);
-
-static void
-bfa_msix_errint(struct bfa_s *bfa, u32 intr)
-{
-	bfa_ioc_error_isr(&bfa->ioc);
-}
-
-static void
-bfa_msix_lpu(struct bfa_s *bfa)
-{
-	bfa_ioc_mbox_isr(&bfa->ioc);
-}
-
-static void
-bfa_reqq_resume(struct bfa_s *bfa, int qid)
-{
-	struct list_head *waitq, *qe, *qen;
-	struct bfa_reqq_wait_s *wqe;
-
-	waitq = bfa_reqq(bfa, qid);
-	list_for_each_safe(qe, qen, waitq) {
-		/**
-		 * Callback only as long as there is room in request queue
-		 */
-		if (bfa_reqq_full(bfa, qid))
-			break;
-
-		list_del(qe);
-		wqe = (struct bfa_reqq_wait_s *) qe;
-		wqe->qresume(wqe->cbarg);
-	}
-}
-
-void
-bfa_msix_all(struct bfa_s *bfa, int vec)
-{
-	bfa_intx(bfa);
-}
-
-/**
- *  hal_intr_api
- */
-bfa_boolean_t
-bfa_intx(struct bfa_s *bfa)
-{
-	u32        intr, qintr;
-	int             queue;
-
-	intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
-	if (!intr)
-		return BFA_FALSE;
-
-	/**
-	 * RME completion queue interrupt
-	 */
-	qintr = intr & __HFN_INT_RME_MASK;
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
-
-	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
-		if (intr & (__HFN_INT_RME_Q0 << queue))
-			bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
-	}
-	intr &= ~qintr;
-	if (!intr)
-		return BFA_TRUE;
-
-	/**
-	 * CPE completion queue interrupt
-	 */
-	qintr = intr & __HFN_INT_CPE_MASK;
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
-
-	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
-		if (intr & (__HFN_INT_CPE_Q0 << queue))
-			bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
-	}
-	intr &= ~qintr;
-	if (!intr)
-		return BFA_TRUE;
-
-	bfa_msix_lpu_err(bfa, intr);
-
-	return BFA_TRUE;
-}
-
-void
-bfa_isr_enable(struct bfa_s *bfa)
-{
-	u32        intr_unmask;
-	int             pci_func = bfa_ioc_pcifn(&bfa->ioc);
-
-	bfa_trc(bfa, pci_func);
-
-	bfa_msix_install(bfa);
-	intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
-		       __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
-		       __HFN_INT_LL_HALT);
-
-	if (pci_func == 0)
-		intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
-				__HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
-				__HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
-				__HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
-				__HFN_INT_MBOX_LPU0);
-	else
-		intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
-				__HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
-				__HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
-				__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
-				__HFN_INT_MBOX_LPU1);
-
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
-	bfa->iocfc.intr_mask = ~intr_unmask;
-	bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
-}
-
-void
-bfa_isr_disable(struct bfa_s *bfa)
-{
-	bfa_isr_mode_set(bfa, BFA_FALSE);
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
-	bfa_msix_uninstall(bfa);
-}
-
-void
-bfa_msix_reqq(struct bfa_s *bfa, int qid)
-{
-	struct list_head *waitq;
-
-	qid &= (BFI_IOC_MAX_CQS - 1);
-
-	bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
-
-	/**
-	 * Resume any pending requests in the corresponding reqq.
-	 */
-	waitq = bfa_reqq(bfa, qid);
-	if (!list_empty(waitq))
-		bfa_reqq_resume(bfa, qid);
-}
-
-void
-bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
-{
-	bfa_trc(bfa, m->mhdr.msg_class);
-	bfa_trc(bfa, m->mhdr.msg_id);
-	bfa_trc(bfa, m->mhdr.mtag.i2htok);
-	bfa_assert(0);
-	bfa_trc_stop(bfa->trcmod);
-}
-
-void
-bfa_msix_rspq(struct bfa_s *bfa, int qid)
-{
-	struct bfi_msg_s *m;
-	u32 pi, ci;
-	struct list_head *waitq;
-
-	bfa_trc_fp(bfa, qid);
-
-	qid &= (BFI_IOC_MAX_CQS - 1);
-
-	bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
-
-	ci = bfa_rspq_ci(bfa, qid);
-	pi = bfa_rspq_pi(bfa, qid);
-
-	bfa_trc_fp(bfa, ci);
-	bfa_trc_fp(bfa, pi);
-
-	if (bfa->rme_process) {
-		while (ci != pi) {
-			m = bfa_rspq_elem(bfa, qid, ci);
-			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
-
-			bfa_isrs[m->mhdr.msg_class] (bfa, m);
-
-			CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
-		}
-	}
-
-	/**
-	 * update CI
-	 */
-	bfa_rspq_ci(bfa, qid) = pi;
-	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
-	bfa_os_mmiowb();
-
-	/**
-	 * Resume any pending requests in the corresponding reqq.
-	 */
-	waitq = bfa_reqq(bfa, qid);
-	if (!list_empty(waitq))
-		bfa_reqq_resume(bfa, qid);
-}
-
-void
-bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
-{
-	u32 intr, curr_value;
-
-	intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
-
-	if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
-		bfa_msix_lpu(bfa);
-
-	intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
-		__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
-
-	if (intr) {
-		if (intr & __HFN_INT_LL_HALT) {
-			/**
-			 * If LL_HALT bit is set then FW Init Halt LL Port
-			 * Register needs to be cleared as well so Interrupt
-			 * Status Register will be cleared.
-			 */
-			curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
-			curr_value &= ~__FW_INIT_HALT_P;
-			bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
-		}
-
-		if (intr & __HFN_INT_ERR_PSS) {
-			/**
-			 * ERR_PSS bit needs to be cleared as well in case
-			 * interrups are shared so driver's interrupt handler is
-			 * still called eventhough it is already masked out.
-			 */
-			curr_value = bfa_reg_read(
-				bfa->ioc.ioc_regs.pss_err_status_reg);
-			curr_value &= __PSS_ERR_STATUS_SET;
-			bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
-				curr_value);
-		}
-
-		bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
-		bfa_msix_errint(bfa, intr);
-	}
-}
-
-void
-bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func)
-{
-	bfa_isrs[mc] = isr_func;
-}
-
-

+ 0 - 117
drivers/scsi/bfa/bfa_intr_priv.h

@@ -1,117 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_INTR_PRIV_H__
-#define __BFA_INTR_PRIV_H__
-
-/**
- * Message handler
- */
-typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
-void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
-void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
-
-
-#define bfa_reqq_pi(__bfa, __reqq)	((__bfa)->iocfc.req_cq_pi[__reqq])
-#define bfa_reqq_ci(__bfa, __reqq)					\
-	(*(u32 *)((__bfa)->iocfc.req_cq_shadow_ci[__reqq].kva))
-
-#define bfa_reqq_full(__bfa, __reqq)				\
-	(((bfa_reqq_pi(__bfa, __reqq) + 1) &			\
-	  ((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1)) ==	\
-	 bfa_reqq_ci(__bfa, __reqq))
-
-#define bfa_reqq_next(__bfa, __reqq)				\
-	(bfa_reqq_full(__bfa, __reqq) ? NULL :			\
-	 ((void *)((struct bfi_msg_s *)((__bfa)->iocfc.req_cq_ba[__reqq].kva) \
-			  + bfa_reqq_pi((__bfa), (__reqq)))))
-
-#define bfa_reqq_produce(__bfa, __reqq)	do {				\
-	(__bfa)->iocfc.req_cq_pi[__reqq]++;				\
-	(__bfa)->iocfc.req_cq_pi[__reqq] &=				\
-		((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1);      \
-	bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq],		\
-				(__bfa)->iocfc.req_cq_pi[__reqq]);      \
-	bfa_os_mmiowb();      \
-} while (0)
-
-#define bfa_rspq_pi(__bfa, __rspq)					\
-	(*(u32 *)((__bfa)->iocfc.rsp_cq_shadow_pi[__rspq].kva))
-
-#define bfa_rspq_ci(__bfa, __rspq)	((__bfa)->iocfc.rsp_cq_ci[__rspq])
-#define bfa_rspq_elem(__bfa, __rspq, __ci)				\
-	(&((struct bfi_msg_s *)((__bfa)->iocfc.rsp_cq_ba[__rspq].kva))[__ci])
-
-#define CQ_INCR(__index, __size) do {					\
-			(__index)++;					\
-			(__index) &= ((__size) - 1);      \
-} while (0)
-
-/**
- * Queue element to wait for room in request queue. FIFO order is
- * maintained when fullfilling requests.
- */
-struct bfa_reqq_wait_s {
-	struct list_head 	qe;
-	void		(*qresume) (void *cbarg);
-	void		*cbarg;
-};
-
-/**
- * Circular queue usage assignments
- */
-enum {
-	BFA_REQQ_IOC	= 0,	/*  all low-priority IOC msgs	*/
-	BFA_REQQ_FCXP	= 0,	/*  all FCXP messages		*/
-	BFA_REQQ_LPS	= 0,	/*  all lport service msgs	*/
-	BFA_REQQ_PORT	= 0,	/*  all port messages		*/
-	BFA_REQQ_FLASH	= 0,	/*  for flash module		*/
-	BFA_REQQ_DIAG	= 0,	/*  for diag module		*/
-	BFA_REQQ_RPORT	= 0,	/*  all port messages		*/
-	BFA_REQQ_SBOOT	= 0,	/*  all san boot messages	*/
-	BFA_REQQ_QOS_LO	= 1,	/*  all low priority IO	*/
-	BFA_REQQ_QOS_MD	= 2,	/*  all medium priority IO	*/
-	BFA_REQQ_QOS_HI	= 3,	/*  all high priority IO	*/
-};
-
-static inline void
-bfa_reqq_winit(struct bfa_reqq_wait_s *wqe, void (*qresume) (void *cbarg),
-			void *cbarg)
-{
-	wqe->qresume = qresume;
-	wqe->cbarg = cbarg;
-}
-
-#define bfa_reqq(__bfa, __reqq)	(&(__bfa)->reqq_waitq[__reqq])
-
-/**
- * static inline void
- * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
- */
-#define bfa_reqq_wait(__bfa, __reqq, __wqe) do {			\
-									\
-		struct list_head *waitq = bfa_reqq(__bfa, __reqq);      \
-									\
-		bfa_assert(((__reqq) < BFI_IOC_MAX_CQS));      \
-		bfa_assert((__wqe)->qresume && (__wqe)->cbarg);      \
-									\
-		list_add_tail(&(__wqe)->qe, waitq);      \
-} while (0)
-
-#define bfa_reqq_wcancel(__wqe)	list_del(&(__wqe)->qe)
-
-#endif /* __BFA_INTR_PRIV_H__ */

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 627 - 187
drivers/scsi/bfa/bfa_ioc.c


+ 196 - 52
drivers/scsi/bfa/bfa_ioc.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -18,18 +18,74 @@
 #ifndef __BFA_IOC_H__
 #ifndef __BFA_IOC_H__
 #define __BFA_IOC_H__
 #define __BFA_IOC_H__
 
 
-#include <cs/bfa_sm.h>
-#include <bfi/bfi.h>
-#include <bfi/bfi_ioc.h>
-#include <bfi/bfi_boot.h>
-#include <bfa_timer.h>
+#include "bfa_os_inc.h"
+#include "bfa_cs.h"
+#include "bfi.h"
+
+/**
+ * BFA timer declarations
+ */
+typedef void (*bfa_timer_cbfn_t)(void *);
+
+/**
+ * BFA timer data structure
+ */
+struct bfa_timer_s {
+	struct list_head	qe;
+	bfa_timer_cbfn_t timercb;
+	void		*arg;
+	int		timeout;	/**< in millisecs. */
+};
+
+/**
+ * Timer module structure
+ */
+struct bfa_timer_mod_s {
+	struct list_head timer_q;
+};
+
+#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
+
+void bfa_timer_beat(struct bfa_timer_mod_s *mod);
+void bfa_timer_init(struct bfa_timer_mod_s *mod);
+void bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
+			bfa_timer_cbfn_t timercb, void *arg,
+			unsigned int timeout);
+void bfa_timer_stop(struct bfa_timer_s *timer);
+
+/**
+ * Generic Scatter Gather Element used by driver
+ */
+struct bfa_sge_s {
+	u32	sg_len;
+	void		*sg_addr;
+};
+
+#define bfa_sge_word_swap(__sge) do {					     \
+	((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]);      \
+	((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]);      \
+	((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]);      \
+} while (0)
+
+#define bfa_swap_words(_x)  (	\
+	((_x) << 32) | ((_x) >> 32))
+
+#ifdef __BIGENDIAN
+#define bfa_sge_to_be(_x)
+#define bfa_sge_to_le(_x)	bfa_sge_word_swap(_x)
+#define bfa_sgaddr_le(_x)	bfa_swap_words(_x)
+#else
+#define	bfa_sge_to_be(_x)	bfa_sge_word_swap(_x)
+#define bfa_sge_to_le(_x)
+#define bfa_sgaddr_le(_x)	(_x)
+#endif
 
 
 /**
 /**
  * PCI device information required by IOC
  * PCI device information required by IOC
  */
  */
 struct bfa_pcidev_s {
 struct bfa_pcidev_s {
-	int             pci_slot;
-	u8         pci_func;
+	int		pci_slot;
+	u8		pci_func;
 	u16	device_id;
 	u16	device_id;
 	bfa_os_addr_t   pci_bar_kva;
 	bfa_os_addr_t   pci_bar_kva;
 };
 };
@@ -39,13 +95,18 @@ struct bfa_pcidev_s {
  * Address
  * Address
  */
  */
 struct bfa_dma_s {
 struct bfa_dma_s {
-	void		*kva;	/*! Kernel virtual address	*/
-	u64	pa;	/*! Physical address		*/
+	void		*kva;	/* ! Kernel virtual address	*/
+	u64	pa;	/* ! Physical address		*/
 };
 };
 
 
 #define BFA_DMA_ALIGN_SZ	256
 #define BFA_DMA_ALIGN_SZ	256
 #define BFA_ROUNDUP(_l, _s)	(((_l) + ((_s) - 1)) & ~((_s) - 1))
 #define BFA_ROUNDUP(_l, _s)	(((_l) + ((_s) - 1)) & ~((_s) - 1))
 
 
+/**
+ * smem size for Crossbow and Catapult
+ */
+#define BFI_SMEM_CB_SIZE	0x200000U	/* ! 2MB for crossbow	*/
+#define BFI_SMEM_CT_SIZE	0x280000U	/* ! 2.5MB for catapult	*/
 
 
 
 
 #define bfa_dma_addr_set(dma_addr, pa)	\
 #define bfa_dma_addr_set(dma_addr, pa)	\
@@ -101,7 +162,7 @@ struct bfa_ioc_regs_s {
  * IOC Mailbox structures
  * IOC Mailbox structures
  */
  */
 struct bfa_mbox_cmd_s {
 struct bfa_mbox_cmd_s {
-	struct list_head		qe;
+	struct list_head	qe;
 	u32	msg[BFI_IOC_MSGSZ];
 	u32	msg[BFI_IOC_MSGSZ];
 };
 };
 
 
@@ -110,8 +171,8 @@ struct bfa_mbox_cmd_s {
  */
  */
 typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
 typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
 struct bfa_ioc_mbox_mod_s {
 struct bfa_ioc_mbox_mod_s {
-	struct list_head	cmd_q;		/*  pending mbox queue	*/
-	int		nmclass;	/*  number of handlers */
+	struct list_head		cmd_q;	/*  pending mbox queue	*/
+	int			nmclass;	/*  number of handlers */
 	struct {
 	struct {
 		bfa_ioc_mbox_mcfunc_t	cbfn;	/*  message handlers	*/
 		bfa_ioc_mbox_mcfunc_t	cbfn;	/*  message handlers	*/
 		void			*cbarg;
 		void			*cbarg;
@@ -149,49 +210,54 @@ struct bfa_ioc_hbfail_notify_s {
 	(__notify)->cbarg = (__cbarg);      \
 	(__notify)->cbarg = (__cbarg);      \
 } while (0)
 } while (0)
 
 
+struct bfa_iocpf_s {
+	bfa_fsm_t		fsm;
+	struct bfa_ioc_s	*ioc;
+	u32		retry_count;
+	bfa_boolean_t		auto_recover;
+};
+
 struct bfa_ioc_s {
 struct bfa_ioc_s {
 	bfa_fsm_t		fsm;
 	bfa_fsm_t		fsm;
 	struct bfa_s		*bfa;
 	struct bfa_s		*bfa;
 	struct bfa_pcidev_s	pcidev;
 	struct bfa_pcidev_s	pcidev;
-	struct bfa_timer_mod_s 	*timer_mod;
-	struct bfa_timer_s 	ioc_timer;
-	struct bfa_timer_s 	sem_timer;
+	struct bfa_timer_mod_s	*timer_mod;
+	struct bfa_timer_s	ioc_timer;
+	struct bfa_timer_s	sem_timer;
+	struct bfa_timer_s	hb_timer;
 	u32		hb_count;
 	u32		hb_count;
-	u32		retry_count;
 	struct list_head		hb_notify_q;
 	struct list_head		hb_notify_q;
 	void			*dbg_fwsave;
 	void			*dbg_fwsave;
 	int			dbg_fwsave_len;
 	int			dbg_fwsave_len;
 	bfa_boolean_t		dbg_fwsave_once;
 	bfa_boolean_t		dbg_fwsave_once;
 	enum bfi_mclass		ioc_mc;
 	enum bfi_mclass		ioc_mc;
-	struct bfa_ioc_regs_s 	ioc_regs;
+	struct bfa_ioc_regs_s	ioc_regs;
 	struct bfa_trc_mod_s	*trcmod;
 	struct bfa_trc_mod_s	*trcmod;
-	struct bfa_aen_s	*aen;
-	struct bfa_log_mod_s	*logm;
 	struct bfa_ioc_drv_stats_s	stats;
 	struct bfa_ioc_drv_stats_s	stats;
-	bfa_boolean_t		auto_recover;
 	bfa_boolean_t		fcmode;
 	bfa_boolean_t		fcmode;
 	bfa_boolean_t		ctdev;
 	bfa_boolean_t		ctdev;
 	bfa_boolean_t		cna;
 	bfa_boolean_t		cna;
 	bfa_boolean_t		pllinit;
 	bfa_boolean_t		pllinit;
+	bfa_boolean_t		stats_busy;	/*  outstanding stats */
 	u8			port_id;
 	u8			port_id;
-
 	struct bfa_dma_s	attr_dma;
 	struct bfa_dma_s	attr_dma;
 	struct bfi_ioc_attr_s	*attr;
 	struct bfi_ioc_attr_s	*attr;
 	struct bfa_ioc_cbfn_s	*cbfn;
 	struct bfa_ioc_cbfn_s	*cbfn;
 	struct bfa_ioc_mbox_mod_s mbox_mod;
 	struct bfa_ioc_mbox_mod_s mbox_mod;
-	struct bfa_ioc_hwif_s   *ioc_hwif;
+	struct bfa_ioc_hwif_s	*ioc_hwif;
+	struct bfa_iocpf_s	iocpf;
 };
 };
 
 
 struct bfa_ioc_hwif_s {
 struct bfa_ioc_hwif_s {
-	bfa_status_t    (*ioc_pll_init) (struct bfa_ioc_s *ioc);
-	bfa_boolean_t   (*ioc_firmware_lock)    (struct bfa_ioc_s *ioc);
-	void            (*ioc_firmware_unlock)  (struct bfa_ioc_s *ioc);
-	void		(*ioc_reg_init) (struct bfa_ioc_s *ioc);
-	void		(*ioc_map_port) (struct bfa_ioc_s *ioc);
-	void		(*ioc_isr_mode_set)     (struct bfa_ioc_s *ioc,
-						bfa_boolean_t msix);
-	void            (*ioc_notify_hbfail)    (struct bfa_ioc_s *ioc);
-	void            (*ioc_ownership_reset)  (struct bfa_ioc_s *ioc);
+	bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
+	bfa_boolean_t	(*ioc_firmware_lock)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_firmware_unlock)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_reg_init)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_map_port)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_isr_mode_set)	(struct bfa_ioc_s *ioc,
+					bfa_boolean_t msix);
+	void		(*ioc_notify_hbfail)	(struct bfa_ioc_s *ioc);
+	void		(*ioc_ownership_reset)	(struct bfa_ioc_s *ioc);
 };
 };
 
 
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -206,18 +272,19 @@ struct bfa_ioc_hwif_s {
 #define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_speed_sup(__ioc)	\
 #define bfa_ioc_speed_sup(__ioc)	\
 	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
 	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
-#define bfa_ioc_get_nports(__ioc)       \
+#define bfa_ioc_get_nports(__ioc)	\
 	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
 	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
 
 
-#define bfa_ioc_stats(_ioc, _stats)     ((_ioc)->stats._stats++)
-#define BFA_IOC_FWIMG_MINSZ     (16 * 1024)
-#define BFA_IOC_FWIMG_TYPE(__ioc)                                       \
-	(((__ioc)->ctdev) ?                                             \
-	 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) :     \
+#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ	(16 * 1024)
+#define BFA_IOC_FWIMG_TYPE(__ioc)					\
+	(((__ioc)->ctdev) ?						\
+	 (((__ioc)->fcmode) ? BFI_IMAGE_CT_FC : BFI_IMAGE_CT_CNA) :	\
 	 BFI_IMAGE_CB_FC)
 	 BFI_IMAGE_CB_FC)
-
-#define BFA_IOC_FLASH_CHUNK_NO(off)             (off / BFI_FLASH_CHUNK_SZ_WORDS)
-#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)      (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FW_SMEM_SIZE(__ioc)					\
+	(((__ioc)->ctdev) ? BFI_SMEM_CT_SIZE : BFI_SMEM_CB_SIZE)
+#define BFA_IOC_FLASH_CHUNK_NO(off)		(off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)	(off % BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 
 
 /**
 /**
@@ -235,18 +302,28 @@ void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
 /**
 /**
  * IOC interfaces
  * IOC interfaces
  */
  */
-#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc))
-#define bfa_ioc_isr_mode_set(__ioc, __msix)                     \
+
+#define bfa_ioc_pll_init_asic(__ioc) \
+	((__ioc)->ioc_hwif->ioc_pll_init((__ioc)->pcidev.pci_bar_kva, \
+			   (__ioc)->fcmode))
+
+bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
+bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
+bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
+bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
+
+#define	bfa_ioc_isr_mode_set(__ioc, __msix)			\
 			((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
 			((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
-#define bfa_ioc_ownership_reset(__ioc)                          \
+#define	bfa_ioc_ownership_reset(__ioc)				\
 			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
 			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
 
 
+
 void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
 void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
 void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
 void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
+
 void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
 void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
-		struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
-		struct bfa_trc_mod_s *trcmod,
-		struct bfa_aen_s *aen, struct bfa_log_mod_s *logm);
+		struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod);
+void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 void bfa_ioc_detach(struct bfa_ioc_s *ioc);
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 void bfa_ioc_pci_init(struct bfa_ioc_s *ioc, struct bfa_pcidev_s *pcidev,
 		enum bfi_mclass mc);
 		enum bfi_mclass mc);
@@ -256,21 +333,22 @@ void bfa_ioc_enable(struct bfa_ioc_s *ioc);
 void bfa_ioc_disable(struct bfa_ioc_s *ioc);
 void bfa_ioc_disable(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_intx_claim(struct bfa_ioc_s *ioc);
 
 
-void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
+void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type,
+		u32 boot_param);
 void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
 void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
 void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
 void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
+bfa_boolean_t bfa_ioc_is_initialized(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
-void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
 enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
 enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
 void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
 void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
 void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
 void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
 void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
 void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
 void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
 void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
 void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
 void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
-	char *manufacturer);
+		char *manufacturer);
 void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
 void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
 enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
 enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
 
 
@@ -284,6 +362,8 @@ bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
 void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
 void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
 bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 				 int *trclen);
 				 int *trclen);
+bfa_status_t bfa_ioc_debug_fwcore(struct bfa_ioc_s *ioc, void *buf,
+	u32 *offset, int *buflen);
 u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
 u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
 u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
 u32 bfa_ioc_smem_pgoff(struct bfa_ioc_s *ioc, u32 fmaddr);
 void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
 void bfa_ioc_set_fcmode(struct bfa_ioc_s *ioc);
@@ -297,7 +377,8 @@ void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
 			struct bfi_ioc_image_hdr_s *fwhdr);
 bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
 bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
 			struct bfi_ioc_image_hdr_s *fwhdr);
-void bfa_ioc_aen_post(struct bfa_ioc_s *ioc, enum bfa_ioc_aen_event event);
+bfa_status_t bfa_ioc_fw_stats_get(struct bfa_ioc_s *ioc, void *stats);
+bfa_status_t bfa_ioc_fw_stats_clear(struct bfa_ioc_s *ioc);
 
 
 /*
 /*
  * bfa mfg wwn API functions
  * bfa mfg wwn API functions
@@ -310,5 +391,68 @@ wwn_t bfa_ioc_get_mfg_nwwn(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
 mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc_s *ioc);
 u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
 u64 bfa_ioc_get_adid(struct bfa_ioc_s *ioc);
 
 
-#endif /* __BFA_IOC_H__ */
+/*
+ * F/W Image Size & Chunk
+ */
+extern u32 bfi_image_ct_fc_size;
+extern u32 bfi_image_ct_cna_size;
+extern u32 bfi_image_cb_fc_size;
+extern u32 *bfi_image_ct_fc;
+extern u32 *bfi_image_ct_cna;
+extern u32 *bfi_image_cb_fc;
+
+static inline u32 *
+bfi_image_ct_fc_get_chunk(u32 off)
+{	return (u32 *)(bfi_image_ct_fc + off); }
+
+static inline u32 *
+bfi_image_ct_cna_get_chunk(u32 off)
+{	return (u32 *)(bfi_image_ct_cna + off); }
 
 
+static inline u32 *
+bfi_image_cb_fc_get_chunk(u32 off)
+{	return (u32 *)(bfi_image_cb_fc + off); }
+
+static inline u32*
+bfa_cb_image_get_chunk(int type, u32 off)
+{
+	switch (type) {
+	case BFI_IMAGE_CT_FC:
+		return bfi_image_ct_fc_get_chunk(off);	break;
+	case BFI_IMAGE_CT_CNA:
+		return bfi_image_ct_cna_get_chunk(off);	break;
+	case BFI_IMAGE_CB_FC:
+		return bfi_image_cb_fc_get_chunk(off);	break;
+	default: return 0;
+	}
+}
+
+static inline u32
+bfa_cb_image_get_size(int type)
+{
+	switch (type) {
+	case BFI_IMAGE_CT_FC:
+		return bfi_image_ct_fc_size;	break;
+	case BFI_IMAGE_CT_CNA:
+		return bfi_image_ct_cna_size;	break;
+	case BFI_IMAGE_CB_FC:
+		return bfi_image_cb_fc_size;	break;
+	default: return 0;
+	}
+}
+
+/**
+ * CNA TRCMOD declaration
+ */
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_CNA_PORT	= 1,
+	BFA_TRC_CNA_IOC		= 2,
+	BFA_TRC_CNA_IOC_CB	= 3,
+	BFA_TRC_CNA_IOC_CT	= 4,
+};
+
+#endif /* __BFA_IOC_H__ */

+ 51 - 73
drivers/scsi/bfa/bfa_ioc_cb.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,22 +15,15 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#include <bfa.h>
-#include <bfa_ioc.h>
-#include <bfa_fwimg_priv.h>
-#include <cna/bfa_cna_trcmod.h>
-#include <cs/bfa_debug.h>
-#include <bfi/bfi_ioc.h>
-#include <bfi/bfi_cbreg.h>
-#include <log/bfa_log_hal.h>
-#include <defs/bfa_defs_pci.h>
+#include "bfa_ioc.h"
+#include "bfi_cbreg.h"
+#include "bfa_defs.h"
 
 
 BFA_TRC_FILE(CNA, IOC_CB);
 BFA_TRC_FILE(CNA, IOC_CB);
 
 
 /*
 /*
  * forward declarations
  * forward declarations
  */
  */
-static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
@@ -95,6 +88,7 @@ static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
  * Host <-> LPU mailbox command/status registers
  * Host <-> LPU mailbox command/status registers
  */
  */
 static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
 static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
+
 	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
 	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
 	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
 	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
 };
 };
@@ -154,6 +148,7 @@ bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
 /**
 /**
  * Initialize IOC to port mapping.
  * Initialize IOC to port mapping.
  */
  */
+
 static void
 static void
 bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
 bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
 {
 {
@@ -161,6 +156,7 @@ bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
 	 * For crossbow, port id is same as pci function.
 	 * For crossbow, port id is same as pci function.
 	 */
 	 */
 	ioc->port_id = bfa_ioc_pcifn(ioc);
 	ioc->port_id = bfa_ioc_pcifn(ioc);
+
 	bfa_trc(ioc, ioc->port_id);
 	bfa_trc(ioc, ioc->port_id);
 }
 }
 
 
@@ -172,87 +168,69 @@ bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
 {
 {
 }
 }
 
 
-static bfa_status_t
-bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc)
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
 {
 {
-	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
-	u32	pll_sclk, pll_fclk;
 
 
 	/*
 	/*
-	 *  Hold semaphore so that nobody can access the chip during init.
+	 * Read the hw sem reg to make sure that it is locked
+	 * before we clear it. If it is not locked, writing 1
+	 * will lock it instead of clearing it.
 	 */
 	 */
-	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+	bfa_ioc_hw_sem_release(ioc);
+}
+
+
+
+bfa_status_t
+bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+{
+	u32	pll_sclk, pll_fclk;
 
 
 	pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
 	pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
-			__APP_PLL_212_P0_1(3U) |
-			__APP_PLL_212_JITLMT0_1(3U) |
-			__APP_PLL_212_CNTLMT0_1(3U);
+		__APP_PLL_212_P0_1(3U) |
+		__APP_PLL_212_JITLMT0_1(3U) |
+		__APP_PLL_212_CNTLMT0_1(3U);
 	pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
 	pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
-			__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
-			__APP_PLL_400_JITLMT0_1(3U) |
-			__APP_PLL_400_CNTLMT0_1(3U);
-
+		__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
+		__APP_PLL_400_JITLMT0_1(3U) |
+		__APP_PLL_400_CNTLMT0_1(3U);
 	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
 	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
 	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
 	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-			__APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-			__APP_PLL_212_BYPASS |
-			__APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-			__APP_PLL_400_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-			__APP_PLL_400_BYPASS |
-			__APP_PLL_400_LOGIC_SOFT_RESET);
+	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
+			  __APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
+			  __APP_PLL_212_BYPASS |
+			  __APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
+			  __APP_PLL_400_LOGIC_SOFT_RESET);
+	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
+			  __APP_PLL_400_BYPASS |
+			  __APP_PLL_400_LOGIC_SOFT_RESET);
 	bfa_os_udelay(2);
 	bfa_os_udelay(2);
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-			__APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-			__APP_PLL_400_LOGIC_SOFT_RESET);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-			pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-			pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
-
-	/**
-	 * Wait for PLLs to lock.
-	 */
+	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
+			  __APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
+			  __APP_PLL_400_LOGIC_SOFT_RESET);
+	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
+			  pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
+			  pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
 	bfa_os_udelay(2000);
 	bfa_os_udelay(2000);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
-
-	/*
-	 *  release semaphore.
-	 */
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+	bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
+	bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
 
 
 	return BFA_STATUS_OK;
 	return BFA_STATUS_OK;
 }
 }
-
-/**
- * Cleanup hw semaphore and usecnt registers
- */
-static void
-bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
-{
-
-	/*
-	 * Read the hw sem reg to make sure that it is locked
-	 * before we clear it. If it is not locked, writing 1
-	 * will lock it instead of clearing it.
-	 */
-	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
-}

+ 55 - 82
drivers/scsi/bfa/bfa_ioc_ct.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,22 +15,15 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#include <bfa.h>
-#include <bfa_ioc.h>
-#include <bfa_fwimg_priv.h>
-#include <cna/bfa_cna_trcmod.h>
-#include <cs/bfa_debug.h>
-#include <bfi/bfi_ioc.h>
-#include <bfi/bfi_ctreg.h>
-#include <log/bfa_log_hal.h>
-#include <defs/bfa_defs_pci.h>
+#include "bfa_ioc.h"
+#include "bfi_ctreg.h"
+#include "bfa_defs.h"
 
 
 BFA_TRC_FILE(CNA, IOC_CT);
 BFA_TRC_FILE(CNA, IOC_CT);
 
 
 /*
 /*
  * forward declarations
  * forward declarations
  */
  */
-static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
 static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
 static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
@@ -78,7 +71,8 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
 	/**
 	/**
 	 * If bios boot (flash based) -- do not increment usage count
 	 * If bios boot (flash based) -- do not increment usage count
 	 */
 	 */
-	if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
+	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+						BFA_IOC_FWIMG_MINSZ)
 		return BFA_TRUE;
 		return BFA_TRUE;
 
 
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
@@ -136,7 +130,8 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
 	/**
 	/**
 	 * If bios boot (flash based) -- do not decrement usage count
 	 * If bios boot (flash based) -- do not decrement usage count
 	 */
 	 */
-	if (bfi_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) < BFA_IOC_FWIMG_MINSZ)
+	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
+						BFA_IOC_FWIMG_MINSZ)
 		return;
 		return;
 
 
 	/**
 	/**
@@ -308,16 +303,47 @@ bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
 	bfa_reg_write(rb + FNC_PERS_REG, r32);
 	bfa_reg_write(rb + FNC_PERS_REG, r32);
 }
 }
 
 
-static bfa_status_t
-bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
 {
 {
-	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
-	u32	pll_sclk, pll_fclk, r32;
+
+	if (ioc->cna) {
+		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
+		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	}
 
 
 	/*
 	/*
-	 *  Hold semaphore so that nobody can access the chip during init.
+	 * Read the hw sem reg to make sure that it is locked
+	 * before we clear it. If it is not locked, writing 1
+	 * will lock it instead of clearing it.
 	 */
 	 */
-	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+	bfa_ioc_hw_sem_release(ioc);
+}
+
+
+
+/*
+ * Check the firmware state to know if pll_init has been completed already
+ */
+bfa_boolean_t
+bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
+{
+	if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
+	  (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
+		return BFA_TRUE;
+
+	return BFA_FALSE;
+}
+
+bfa_status_t
+bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+{
+	u32	pll_sclk, pll_fclk, r32;
 
 
 	pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
 	pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
 		__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
 		__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
@@ -327,70 +353,50 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
 		__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
 		__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
 		__APP_PLL_425_JITLMT0_1(3U) |
 		__APP_PLL_425_JITLMT0_1(3U) |
 		__APP_PLL_425_CNTLMT0_1(1U);
 		__APP_PLL_425_CNTLMT0_1(1U);
-
-	/**
-	 *	For catapult, choose operational mode FC/FCoE
-	 */
-	if (ioc->fcmode) {
+	if (fcmode) {
 		bfa_reg_write((rb + OP_MODE), 0);
 		bfa_reg_write((rb + OP_MODE), 0);
 		bfa_reg_write((rb + ETH_MAC_SER_REG),
 		bfa_reg_write((rb + ETH_MAC_SER_REG),
 				__APP_EMS_CMLCKSEL |
 				__APP_EMS_CMLCKSEL |
 				__APP_EMS_REFCKBUFEN2 |
 				__APP_EMS_REFCKBUFEN2 |
 				__APP_EMS_CHANNEL_SEL);
 				__APP_EMS_CHANNEL_SEL);
 	} else {
 	} else {
-		ioc->pllinit = BFA_TRUE;
 		bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
 		bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
 		bfa_reg_write((rb + ETH_MAC_SER_REG),
 		bfa_reg_write((rb + ETH_MAC_SER_REG),
-				 __APP_EMS_REFCKBUFEN1);
+				__APP_EMS_REFCKBUFEN1);
 	}
 	}
-
 	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
 	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
 	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
 	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
 		__APP_PLL_312_LOGIC_SOFT_RESET);
 		__APP_PLL_312_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
 		__APP_PLL_425_LOGIC_SOFT_RESET);
 		__APP_PLL_425_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
 		__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
 		__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
 		__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
 		__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
-
-	/**
-	 * Wait for PLLs to lock.
-	 */
 	bfa_reg_read(rb + HOSTFN0_INT_MSK);
 	bfa_reg_read(rb + HOSTFN0_INT_MSK);
 	bfa_os_udelay(2000);
 	bfa_os_udelay(2000);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
 	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
 		__APP_PLL_312_ENABLE);
 		__APP_PLL_312_ENABLE);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
 		__APP_PLL_425_ENABLE);
 		__APP_PLL_425_ENABLE);
-
-	/**
-	 * PSS memory reset is asserted at power-on-reset. Need to clear
-	 * this before running EDRAM BISTR
-	 */
-	if (ioc->cna) {
+	if (!fcmode) {
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
 	}
 	}
-
 	r32 = bfa_reg_read((rb + PSS_CTL_REG));
 	r32 = bfa_reg_read((rb + PSS_CTL_REG));
 	r32 &= ~__PSS_LMEM_RESET;
 	r32 &= ~__PSS_LMEM_RESET;
 	bfa_reg_write((rb + PSS_CTL_REG), r32);
 	bfa_reg_write((rb + PSS_CTL_REG), r32);
 	bfa_os_udelay(1000);
 	bfa_os_udelay(1000);
-
-	if (ioc->cna) {
+	if (!fcmode) {
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
 		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
 	}
 	}
@@ -398,39 +404,6 @@ bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
 	bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
 	bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
 	bfa_os_udelay(1000);
 	bfa_os_udelay(1000);
 	r32 = bfa_reg_read((rb + MBIST_STAT_REG));
 	r32 = bfa_reg_read((rb + MBIST_STAT_REG));
-	bfa_trc(ioc, r32);
-
-	/**
-	 * Clear BISTR
-	 */
 	bfa_reg_write((rb + MBIST_CTL_REG), 0);
 	bfa_reg_write((rb + MBIST_CTL_REG), 0);
-
-	/*
-	 *  release semaphore.
-	 */
-	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
-
 	return BFA_STATUS_OK;
 	return BFA_STATUS_OK;
 }
 }
-
-/**
- * Cleanup hw semaphore and usecnt registers
- */
-static void
-bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
-{
-
-	if (ioc->cna) {
-		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
-		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
-	}
-
-	/*
-	 * Read the hw sem reg to make sure that it is locked
-	 * before we clear it. If it is not locked, writing 1
-	 * will lock it instead of clearing it.
-	 */
-	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
-	bfa_ioc_hw_sem_release(ioc);
-}

+ 0 - 927
drivers/scsi/bfa/bfa_iocfc.c

@@ -1,927 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <cs/bfa_debug.h>
-#include <bfa_priv.h>
-#include <log/bfa_log_hal.h>
-#include <bfi/bfi_boot.h>
-#include <bfi/bfi_cbreg.h>
-#include <aen/bfa_aen_ioc.h>
-#include <defs/bfa_defs_iocfc.h>
-#include <defs/bfa_defs_pci.h>
-#include "bfa_callback_priv.h"
-#include "bfad_drv.h"
-
-BFA_TRC_FILE(HAL, IOCFC);
-
-/**
- * IOC local definitions
- */
-#define BFA_IOCFC_TOV		5000	/* msecs */
-
-enum {
-	BFA_IOCFC_ACT_NONE	= 0,
-	BFA_IOCFC_ACT_INIT	= 1,
-	BFA_IOCFC_ACT_STOP	= 2,
-	BFA_IOCFC_ACT_DISABLE	= 3,
-};
-
-/*
- * forward declarations
- */
-static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
-static void bfa_iocfc_disable_cbfn(void *bfa_arg);
-static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
-static void bfa_iocfc_reset_cbfn(void *bfa_arg);
-static void bfa_iocfc_stats_clear(void *bfa_arg);
-static void bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d,
-			struct bfa_fw_stats_s *s);
-static void bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete);
-static void bfa_iocfc_stats_clr_timeout(void *bfa_arg);
-static void bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete);
-static void bfa_iocfc_stats_timeout(void *bfa_arg);
-
-static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
-
-/**
- *  bfa_ioc_pvt BFA IOC private functions
- */
-
-static void
-bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
-	int             i, per_reqq_sz, per_rspq_sz;
-
-	per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
-							BFA_DMA_ALIGN_SZ);
-	per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
-							BFA_DMA_ALIGN_SZ);
-
-	/*
-	 * Calculate CQ size
-	 */
-	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-		*dm_len = *dm_len + per_reqq_sz;
-		*dm_len = *dm_len + per_rspq_sz;
-	}
-
-	/*
-	 * Calculate Shadow CI/PI size
-	 */
-	for (i = 0; i < cfg->fwcfg.num_cqs; i++)
-		*dm_len += (2 * BFA_CACHELINE_SZ);
-}
-
-static void
-bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
-{
-	*dm_len +=
-		BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
-	*dm_len +=
-		BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-			    BFA_CACHELINE_SZ);
-	*dm_len += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
-}
-
-/**
- * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
- */
-static void
-bfa_iocfc_send_cfg(void *bfa_arg)
-{
-	struct bfa_s *bfa = bfa_arg;
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfg_req_s cfg_req;
-	struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
-	struct bfa_iocfc_cfg_s  *cfg = &iocfc->cfg;
-	int             i;
-
-	bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
-	bfa_trc(bfa, cfg->fwcfg.num_cqs);
-
-	bfa_iocfc_reset_queues(bfa);
-
-	/**
-	 * initialize IOC configuration info
-	 */
-	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
-	cfg_info->num_cqs = cfg->fwcfg.num_cqs;
-
-	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
-	bfa_dma_be_addr_set(cfg_info->stats_addr, iocfc->stats_pa);
-
-	/**
-	 * dma map REQ and RSP circular queues and shadow pointers
-	 */
-	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-		bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
-				       iocfc->req_cq_ba[i].pa);
-		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
-				       iocfc->req_cq_shadow_ci[i].pa);
-		cfg_info->req_cq_elems[i] =
-			bfa_os_htons(cfg->drvcfg.num_reqq_elems);
-
-		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
-				       iocfc->rsp_cq_ba[i].pa);
-		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
-				       iocfc->rsp_cq_shadow_pi[i].pa);
-		cfg_info->rsp_cq_elems[i] =
-			bfa_os_htons(cfg->drvcfg.num_rspq_elems);
-	}
-
-	/**
-	 * Enable interrupt coalescing if it is driver init path
-	 * and not ioc disable/enable path.
-	 */
-	if (!iocfc->cfgdone)
-		cfg_info->intr_attr.coalesce = BFA_TRUE;
-
-	iocfc->cfgdone = BFA_FALSE;
-
-	/**
-	 * dma map IOC configuration itself
-	 */
-	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
-			bfa_lpuid(bfa));
-	bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
-
-	bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
-			sizeof(struct bfi_iocfc_cfg_req_s));
-}
-
-static void
-bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		    struct bfa_pcidev_s *pcidev)
-{
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-
-	bfa->bfad = bfad;
-	iocfc->bfa = bfa;
-	iocfc->action = BFA_IOCFC_ACT_NONE;
-
-	bfa_os_assign(iocfc->cfg, *cfg);
-
-	/**
-	 * Initialize chip specific handlers.
-	 */
-	if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
-		iocfc->hwif.hw_reginit = bfa_hwct_reginit;
-		iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
-		iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
-		iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
-		iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
-		iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
-		iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
-		iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
-		iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
-	} else {
-		iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
-		iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
-		iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
-		iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
-		iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
-		iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
-		iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
-		iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
-		iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
-	}
-
-	iocfc->hwif.hw_reginit(bfa);
-	bfa->msix.nvecs = 0;
-}
-
-static void
-bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
-		      struct bfa_meminfo_s *meminfo)
-{
-	u8        *dm_kva;
-	u64        dm_pa;
-	int             i, per_reqq_sz, per_rspq_sz;
-	struct bfa_iocfc_s  *iocfc = &bfa->iocfc;
-	int		dbgsz;
-
-	dm_kva = bfa_meminfo_dma_virt(meminfo);
-	dm_pa = bfa_meminfo_dma_phys(meminfo);
-
-	/*
-	 * First allocate dma memory for IOC.
-	 */
-	bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
-	dm_kva += bfa_ioc_meminfo();
-	dm_pa  += bfa_ioc_meminfo();
-
-	/*
-	 * Claim DMA-able memory for the request/response queues and for shadow
-	 * ci/pi registers
-	 */
-	per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
-							BFA_DMA_ALIGN_SZ);
-	per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
-							BFA_DMA_ALIGN_SZ);
-
-	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-		iocfc->req_cq_ba[i].kva = dm_kva;
-		iocfc->req_cq_ba[i].pa = dm_pa;
-		bfa_os_memset(dm_kva, 0, per_reqq_sz);
-		dm_kva += per_reqq_sz;
-		dm_pa += per_reqq_sz;
-
-		iocfc->rsp_cq_ba[i].kva = dm_kva;
-		iocfc->rsp_cq_ba[i].pa = dm_pa;
-		bfa_os_memset(dm_kva, 0, per_rspq_sz);
-		dm_kva += per_rspq_sz;
-		dm_pa += per_rspq_sz;
-	}
-
-	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
-		iocfc->req_cq_shadow_ci[i].kva = dm_kva;
-		iocfc->req_cq_shadow_ci[i].pa = dm_pa;
-		dm_kva += BFA_CACHELINE_SZ;
-		dm_pa += BFA_CACHELINE_SZ;
-
-		iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
-		iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
-		dm_kva += BFA_CACHELINE_SZ;
-		dm_pa += BFA_CACHELINE_SZ;
-	}
-
-	/*
-	 * Claim DMA-able memory for the config info page
-	 */
-	bfa->iocfc.cfg_info.kva = dm_kva;
-	bfa->iocfc.cfg_info.pa = dm_pa;
-	bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
-	dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
-	dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
-
-	/*
-	 * Claim DMA-able memory for the config response
-	 */
-	bfa->iocfc.cfgrsp_dma.kva = dm_kva;
-	bfa->iocfc.cfgrsp_dma.pa = dm_pa;
-	bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
-
-	dm_kva +=
-		BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-			    BFA_CACHELINE_SZ);
-	dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
-			     BFA_CACHELINE_SZ);
-
-	/*
-	 * Claim DMA-able memory for iocfc stats
-	 */
-	bfa->iocfc.stats_kva = dm_kva;
-	bfa->iocfc.stats_pa = dm_pa;
-	bfa->iocfc.fw_stats = (struct bfa_fw_stats_s *) dm_kva;
-	dm_kva += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
-	dm_pa += BFA_ROUNDUP(sizeof(struct bfa_fw_stats_s), BFA_CACHELINE_SZ);
-
-	bfa_meminfo_dma_virt(meminfo) = dm_kva;
-	bfa_meminfo_dma_phys(meminfo) = dm_pa;
-
-	dbgsz = bfa_ioc_debug_trcsz(bfa_auto_recover);
-	if (dbgsz > 0) {
-		bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
-		bfa_meminfo_kva(meminfo) += dbgsz;
-	}
-}
-
-/**
- * Start BFA submodules.
- */
-static void
-bfa_iocfc_start_submod(struct bfa_s *bfa)
-{
-	int             i;
-
-	bfa->rme_process = BFA_TRUE;
-
-	for (i = 0; hal_mods[i]; i++)
-		hal_mods[i]->start(bfa);
-}
-
-/**
- * Disable BFA submodules.
- */
-static void
-bfa_iocfc_disable_submod(struct bfa_s *bfa)
-{
-	int             i;
-
-	for (i = 0; hal_mods[i]; i++)
-		hal_mods[i]->iocdisable(bfa);
-}
-
-static void
-bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
-{
-	struct bfa_s	*bfa = bfa_arg;
-
-	if (complete) {
-		if (bfa->iocfc.cfgdone)
-			bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
-		else
-			bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
-	} else {
-		if (bfa->iocfc.cfgdone)
-			bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
-	}
-}
-
-static void
-bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
-{
-	struct bfa_s  *bfa = bfa_arg;
-	struct bfad_s *bfad = bfa->bfad;
-
-	if (compl)
-		complete(&bfad->comp);
-
-	else
-		bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
-}
-
-static void
-bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
-{
-	struct bfa_s  *bfa = bfa_arg;
-	struct bfad_s *bfad = bfa->bfad;
-
-	if (compl)
-		complete(&bfad->disable_comp);
-}
-
-/**
- * Update BFA configuration from firmware configuration.
- */
-static void
-bfa_iocfc_cfgrsp(struct bfa_s *bfa)
-{
-	struct bfa_iocfc_s		*iocfc	 = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s	*cfgrsp  = iocfc->cfgrsp;
-	struct bfa_iocfc_fwcfg_s	*fwcfg   = &cfgrsp->fwcfg;
-
-	fwcfg->num_cqs        = fwcfg->num_cqs;
-	fwcfg->num_ioim_reqs  = bfa_os_ntohs(fwcfg->num_ioim_reqs);
-	fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
-	fwcfg->num_fcxp_reqs  = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
-	fwcfg->num_uf_bufs    = bfa_os_ntohs(fwcfg->num_uf_bufs);
-	fwcfg->num_rports     = bfa_os_ntohs(fwcfg->num_rports);
-
-	iocfc->cfgdone = BFA_TRUE;
-
-	/**
-	 * Configuration is complete - initialize/start submodules
-	 */
-	bfa_fcport_init(bfa);
-
-	if (iocfc->action == BFA_IOCFC_ACT_INIT)
-		bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
-	else
-		bfa_iocfc_start_submod(bfa);
-}
-
-static void
-bfa_iocfc_stats_clear(void *bfa_arg)
-{
-	struct bfa_s		*bfa = bfa_arg;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-	struct bfi_iocfc_stats_req_s stats_req;
-
-	bfa_timer_start(bfa, &iocfc->stats_timer,
-			    bfa_iocfc_stats_clr_timeout, bfa,
-			    BFA_IOCFC_TOV);
-
-	bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CLEAR_STATS_REQ,
-		bfa_lpuid(bfa));
-	bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
-		sizeof(struct bfi_iocfc_stats_req_s));
-}
-
-static void
-bfa_iocfc_stats_swap(struct bfa_fw_stats_s *d, struct bfa_fw_stats_s *s)
-{
-	u32       *dip = (u32 *) d;
-	u32       *sip = (u32 *) s;
-	int             i;
-
-	for (i = 0; i < (sizeof(struct bfa_fw_stats_s) / sizeof(u32)); i++)
-		dip[i] = bfa_os_ntohl(sip[i]);
-}
-
-static void
-bfa_iocfc_stats_clr_cb(void *bfa_arg, bfa_boolean_t complete)
-{
-	struct bfa_s *bfa = bfa_arg;
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-
-	if (complete) {
-		bfa_ioc_clr_stats(&bfa->ioc);
-		iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
-	} else {
-		iocfc->stats_busy = BFA_FALSE;
-		iocfc->stats_status = BFA_STATUS_OK;
-	}
-}
-
-static void
-bfa_iocfc_stats_clr_timeout(void *bfa_arg)
-{
-	struct bfa_s		*bfa = bfa_arg;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-
-	bfa_trc(bfa, 0);
-
-	iocfc->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_clr_cb, bfa);
-}
-
-static void
-bfa_iocfc_stats_cb(void *bfa_arg, bfa_boolean_t complete)
-{
-	struct bfa_s		*bfa = bfa_arg;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-
-	if (complete) {
-		if (iocfc->stats_status == BFA_STATUS_OK) {
-			bfa_os_memset(iocfc->stats_ret, 0,
-				sizeof(*iocfc->stats_ret));
-			bfa_iocfc_stats_swap(&iocfc->stats_ret->fw_stats,
-				iocfc->fw_stats);
-		}
-		iocfc->stats_cbfn(iocfc->stats_cbarg, iocfc->stats_status);
-	} else {
-		iocfc->stats_busy = BFA_FALSE;
-		iocfc->stats_status = BFA_STATUS_OK;
-	}
-}
-
-static void
-bfa_iocfc_stats_timeout(void *bfa_arg)
-{
-	struct bfa_s		*bfa = bfa_arg;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-
-	bfa_trc(bfa, 0);
-
-	iocfc->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb, bfa);
-}
-
-static void
-bfa_iocfc_stats_query(struct bfa_s *bfa)
-{
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-	struct bfi_iocfc_stats_req_s stats_req;
-
-	bfa_timer_start(bfa, &iocfc->stats_timer,
-			    bfa_iocfc_stats_timeout, bfa, BFA_IOCFC_TOV);
-
-	bfi_h2i_set(stats_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_GET_STATS_REQ,
-			bfa_lpuid(bfa));
-	bfa_ioc_mbox_send(&bfa->ioc, &stats_req,
-		sizeof(struct bfi_iocfc_stats_req_s));
-}
-
-void
-bfa_iocfc_reset_queues(struct bfa_s *bfa)
-{
-	int             q;
-
-	for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
-		bfa_reqq_ci(bfa, q) = 0;
-		bfa_reqq_pi(bfa, q) = 0;
-		bfa_rspq_ci(bfa, q) = 0;
-		bfa_rspq_pi(bfa, q) = 0;
-	}
-}
-
-/**
- * IOC enable request is complete
- */
-static void
-bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
-{
-	struct bfa_s	*bfa = bfa_arg;
-
-	if (status != BFA_STATUS_OK) {
-		bfa_isr_disable(bfa);
-		if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-			bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-				     bfa_iocfc_init_cb, bfa);
-		return;
-	}
-
-	bfa_iocfc_send_cfg(bfa);
-}
-
-/**
- * IOC disable request is complete
- */
-static void
-bfa_iocfc_disable_cbfn(void *bfa_arg)
-{
-	struct bfa_s	*bfa = bfa_arg;
-
-	bfa_isr_disable(bfa);
-	bfa_iocfc_disable_submod(bfa);
-
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
-		bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
-			     bfa);
-	else {
-		bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
-		bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
-			     bfa);
-	}
-}
-
-/**
- * Notify sub-modules of hardware failure.
- */
-static void
-bfa_iocfc_hbfail_cbfn(void *bfa_arg)
-{
-	struct bfa_s	*bfa = bfa_arg;
-
-	bfa->rme_process = BFA_FALSE;
-
-	bfa_isr_disable(bfa);
-	bfa_iocfc_disable_submod(bfa);
-
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-		bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
-			     bfa);
-}
-
-/**
- * Actions on chip-reset completion.
- */
-static void
-bfa_iocfc_reset_cbfn(void *bfa_arg)
-{
-	struct bfa_s	*bfa = bfa_arg;
-
-	bfa_iocfc_reset_queues(bfa);
-	bfa_isr_enable(bfa);
-}
-
-
-
-/**
- *  bfa_ioc_public
- */
-
-/**
- * Query IOC memory requirement information.
- */
-void
-bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-		u32 *dm_len)
-{
-	/* dma memory for IOC */
-	*dm_len += bfa_ioc_meminfo();
-
-	bfa_iocfc_fw_cfg_sz(cfg, dm_len);
-	bfa_iocfc_cqs_sz(cfg, dm_len);
-	*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
-}
-
-/**
- * Query IOC memory requirement information.
- */
-void
-bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		   struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
-{
-	int             i;
-
-	bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
-	bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
-	bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
-	bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
-
-	bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
-		bfa->trcmod, bfa->aen, bfa->logm);
-
-	/**
-	 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
-	 */
-	if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
-		bfa_ioc_set_fcmode(&bfa->ioc);
-
-	bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
-	bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
-
-	bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
-	bfa_iocfc_mem_claim(bfa, cfg, meminfo);
-	bfa_timer_init(&bfa->timer_mod);
-
-	INIT_LIST_HEAD(&bfa->comp_q);
-	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
-		INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
-}
-
-/**
- * Query IOC memory requirement information.
- */
-void
-bfa_iocfc_detach(struct bfa_s *bfa)
-{
-	bfa_ioc_detach(&bfa->ioc);
-}
-
-/**
- * Query IOC memory requirement information.
- */
-void
-bfa_iocfc_init(struct bfa_s *bfa)
-{
-	bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
-	bfa_ioc_enable(&bfa->ioc);
-}
-
-/**
- * IOC start called from bfa_start(). Called to start IOC operations
- * at driver instantiation for this instance.
- */
-void
-bfa_iocfc_start(struct bfa_s *bfa)
-{
-	if (bfa->iocfc.cfgdone)
-		bfa_iocfc_start_submod(bfa);
-}
-
-/**
- * IOC stop called from bfa_stop(). Called only when driver is unloaded
- * for this instance.
- */
-void
-bfa_iocfc_stop(struct bfa_s *bfa)
-{
-	bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
-
-	bfa->rme_process = BFA_FALSE;
-	bfa_ioc_disable(&bfa->ioc);
-}
-
-void
-bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
-{
-	struct bfa_s		*bfa = bfaarg;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-	union bfi_iocfc_i2h_msg_u	*msg;
-
-	msg = (union bfi_iocfc_i2h_msg_u *) m;
-	bfa_trc(bfa, msg->mh.msg_id);
-
-	switch (msg->mh.msg_id) {
-	case BFI_IOCFC_I2H_CFG_REPLY:
-		iocfc->cfg_reply = &msg->cfg_reply;
-		bfa_iocfc_cfgrsp(bfa);
-		break;
-
-	case BFI_IOCFC_I2H_GET_STATS_RSP:
-		if (iocfc->stats_busy == BFA_FALSE
-		    || iocfc->stats_status == BFA_STATUS_ETIMER)
-			break;
-
-		bfa_timer_stop(&iocfc->stats_timer);
-		iocfc->stats_status = BFA_STATUS_OK;
-		bfa_cb_queue(bfa, &iocfc->stats_hcb_qe, bfa_iocfc_stats_cb,
-			      bfa);
-		break;
-	case BFI_IOCFC_I2H_CLEAR_STATS_RSP:
-		/*
-		 * check for timer pop before processing the rsp
-		 */
-		if (iocfc->stats_busy == BFA_FALSE
-		    || iocfc->stats_status == BFA_STATUS_ETIMER)
-			break;
-
-		bfa_timer_stop(&iocfc->stats_timer);
-		iocfc->stats_status = BFA_STATUS_OK;
-		bfa_cb_queue(bfa, &iocfc->stats_hcb_qe,
-			      bfa_iocfc_stats_clr_cb, bfa);
-		break;
-	case BFI_IOCFC_I2H_UPDATEQ_RSP:
-		iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
-		break;
-	default:
-		bfa_assert(0);
-	}
-}
-
-#ifndef BFA_BIOS_BUILD
-void
-bfa_adapter_get_attr(struct bfa_s *bfa, struct bfa_adapter_attr_s *ad_attr)
-{
-	bfa_ioc_get_adapter_attr(&bfa->ioc, ad_attr);
-}
-
-u64
-bfa_adapter_get_id(struct bfa_s *bfa)
-{
-	return bfa_ioc_get_adid(&bfa->ioc);
-}
-
-void
-bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
-{
-	struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
-
-	attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
-
-	attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
-			bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
-			bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
-
-	attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
-			bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
-			bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
-
-	attr->config    = iocfc->cfg;
-
-}
-
-bfa_status_t
-bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
-{
-	struct bfa_iocfc_s		*iocfc = &bfa->iocfc;
-	struct bfi_iocfc_set_intr_req_s *m;
-
-	iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
-	iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
-	iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
-
-	if (!bfa_iocfc_is_operational(bfa))
-		return BFA_STATUS_OK;
-
-	m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
-	if (!m)
-		return BFA_STATUS_DEVBUSY;
-
-	bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
-			bfa_lpuid(bfa));
-	m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
-	m->delay    = iocfc->cfginfo->intr_attr.delay;
-	m->latency  = iocfc->cfginfo->intr_attr.latency;
-
-
-	bfa_trc(bfa, attr->delay);
-	bfa_trc(bfa, attr->latency);
-
-	bfa_reqq_produce(bfa, BFA_REQQ_IOC);
-	return BFA_STATUS_OK;
-}
-
-void
-bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
-{
-	struct bfa_iocfc_s      *iocfc = &bfa->iocfc;
-
-	iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
-	bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
-}
-
-bfa_status_t
-bfa_iocfc_get_stats(struct bfa_s *bfa, struct bfa_iocfc_stats_s *stats,
-		      bfa_cb_ioc_t cbfn, void *cbarg)
-{
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-
-	if (iocfc->stats_busy) {
-		bfa_trc(bfa, iocfc->stats_busy);
-		return BFA_STATUS_DEVBUSY;
-	}
-
-	if (!bfa_iocfc_is_operational(bfa)) {
-		bfa_trc(bfa, 0);
-		return BFA_STATUS_IOC_NON_OP;
-	}
-
-	iocfc->stats_busy = BFA_TRUE;
-	iocfc->stats_ret = stats;
-	iocfc->stats_cbfn = cbfn;
-	iocfc->stats_cbarg = cbarg;
-
-	bfa_iocfc_stats_query(bfa);
-
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_iocfc_clear_stats(struct bfa_s *bfa, bfa_cb_ioc_t cbfn, void *cbarg)
-{
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-
-	if (iocfc->stats_busy) {
-		bfa_trc(bfa, iocfc->stats_busy);
-		return BFA_STATUS_DEVBUSY;
-	}
-
-	if (!bfa_iocfc_is_operational(bfa)) {
-		bfa_trc(bfa, 0);
-		return BFA_STATUS_IOC_NON_OP;
-	}
-
-	iocfc->stats_busy = BFA_TRUE;
-	iocfc->stats_cbfn = cbfn;
-	iocfc->stats_cbarg = cbarg;
-
-	bfa_iocfc_stats_clear(bfa);
-	return BFA_STATUS_OK;
-}
-
-/**
- * Enable IOC after it is disabled.
- */
-void
-bfa_iocfc_enable(struct bfa_s *bfa)
-{
-	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
-		     "IOC Enable");
-	bfa_ioc_enable(&bfa->ioc);
-}
-
-void
-bfa_iocfc_disable(struct bfa_s *bfa)
-{
-	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
-		     "IOC Disable");
-	bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
-
-	bfa->rme_process = BFA_FALSE;
-	bfa_ioc_disable(&bfa->ioc);
-}
-
-
-bfa_boolean_t
-bfa_iocfc_is_operational(struct bfa_s *bfa)
-{
-	return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
-}
-
-/**
- * Return boot target port wwns -- read from boot information in flash.
- */
-void
-bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
-{
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-	int i;
-
-	if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
-		bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
-		*nwwns = cfgrsp->pbc_cfg.nbluns;
-		for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
-			wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
-
-		return;
-	}
-
-	*nwwns = cfgrsp->bootwwns.nwwns;
-	memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
-}
-
-void
-bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
-{
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
-	pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
-	pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
-	memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
-}
-
-int
-bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
-{
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
-
-	memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
-	return cfgrsp->pbc_cfg.nvports;
-}
-
-
-#endif
-
-

+ 0 - 184
drivers/scsi/bfa/bfa_iocfc.h

@@ -1,184 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_IOCFC_H__
-#define __BFA_IOCFC_H__
-
-#include <bfa_ioc.h>
-#include <bfa.h>
-#include <bfi/bfi_iocfc.h>
-#include <bfi/bfi_pbc.h>
-#include <bfa_callback_priv.h>
-
-#define BFA_REQQ_NELEMS_MIN	(4)
-#define BFA_RSPQ_NELEMS_MIN	(4)
-
-struct bfa_iocfc_regs_s {
-	bfa_os_addr_t   intr_status;
-	bfa_os_addr_t   intr_mask;
-	bfa_os_addr_t   cpe_q_pi[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   cpe_q_ci[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   cpe_q_depth[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   cpe_q_ctrl[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_ci[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_pi[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_depth[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_ctrl[BFI_IOC_MAX_CQS];
-};
-
-/**
- * MSIX vector handlers
- */
-#define BFA_MSIX_MAX_VECTORS	22
-typedef void (*bfa_msix_handler_t)(struct bfa_s *bfa, int vec);
-struct bfa_msix_s {
-	int	nvecs;
-	bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
-};
-
-/**
- * Chip specific interfaces
- */
-struct bfa_hwif_s {
-	void (*hw_reginit)(struct bfa_s *bfa);
-	void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
-	void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
-	void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
-	void (*hw_msix_install)(struct bfa_s *bfa);
-	void (*hw_msix_uninstall)(struct bfa_s *bfa);
-	void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
-	void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
-			u32 *nvecs, u32 *maxvec);
-	void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
-			u32 *end);
-};
-typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
-
-struct bfa_iocfc_s {
-	struct bfa_s 		*bfa;
-	struct bfa_iocfc_cfg_s 	cfg;
-	int			action;
-
-	u32        	req_cq_pi[BFI_IOC_MAX_CQS];
-	u32        	rsp_cq_ci[BFI_IOC_MAX_CQS];
-
-	struct bfa_cb_qe_s	init_hcb_qe;
-	struct bfa_cb_qe_s	stop_hcb_qe;
-	struct bfa_cb_qe_s	dis_hcb_qe;
-	struct bfa_cb_qe_s	stats_hcb_qe;
-	bfa_boolean_t		cfgdone;
-
-	struct bfa_dma_s	cfg_info;
-	struct bfi_iocfc_cfg_s *cfginfo;
-	struct bfa_dma_s	cfgrsp_dma;
-	struct bfi_iocfc_cfgrsp_s *cfgrsp;
-	struct bfi_iocfc_cfg_reply_s *cfg_reply;
-
-	u8			*stats_kva;
-	u64		stats_pa;
-	struct bfa_fw_stats_s 	*fw_stats;
-	struct bfa_timer_s 	stats_timer;	/*  timer */
-	struct bfa_iocfc_stats_s *stats_ret;	/*  driver stats location */
-	bfa_status_t		stats_status;	/*  stats/statsclr status */
-	bfa_boolean_t   	stats_busy;	/*  outstanding stats */
-	bfa_cb_ioc_t		stats_cbfn;	/*  driver callback function */
-	void           		*stats_cbarg;	/*  user callback arg */
-
-	struct bfa_dma_s   	req_cq_ba[BFI_IOC_MAX_CQS];
-	struct bfa_dma_s   	req_cq_shadow_ci[BFI_IOC_MAX_CQS];
-	struct bfa_dma_s   	rsp_cq_ba[BFI_IOC_MAX_CQS];
-	struct bfa_dma_s   	rsp_cq_shadow_pi[BFI_IOC_MAX_CQS];
-	struct bfa_iocfc_regs_s	bfa_regs;	/*  BFA device registers */
-	struct bfa_hwif_s	hwif;
-
-	bfa_cb_iocfc_t		updateq_cbfn; /*  bios callback function */
-	void			*updateq_cbarg;	/*  bios callback arg */
-	u32			intr_mask;
-};
-
-#define bfa_lpuid(__bfa)		bfa_ioc_portid(&(__bfa)->ioc)
-#define bfa_msix_init(__bfa, __nvecs)	\
-	((__bfa)->iocfc.hwif.hw_msix_init(__bfa, __nvecs))
-#define bfa_msix_install(__bfa)	\
-	((__bfa)->iocfc.hwif.hw_msix_install(__bfa))
-#define bfa_msix_uninstall(__bfa)	\
-	((__bfa)->iocfc.hwif.hw_msix_uninstall(__bfa))
-#define bfa_isr_mode_set(__bfa, __msix)	\
-	((__bfa)->iocfc.hwif.hw_isr_mode_set(__bfa, __msix))
-#define bfa_msix_getvecs(__bfa, __vecmap, __nvecs, __maxvec)	\
-	((__bfa)->iocfc.hwif.hw_msix_getvecs(__bfa, __vecmap,	\
-		 __nvecs, __maxvec))
-#define bfa_msix_get_rme_range(__bfa, __start, __end)   \
-	((__bfa)->iocfc.hwif.hw_msix_get_rme_range(__bfa, __start, __end))
-
-/*
- * FC specific IOC functions.
- */
-void bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-		u32 *dm_len);
-void bfa_iocfc_attach(struct bfa_s *bfa, void *bfad,
-		struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
-		struct bfa_pcidev_s *pcidev);
-void bfa_iocfc_detach(struct bfa_s *bfa);
-void bfa_iocfc_init(struct bfa_s *bfa);
-void bfa_iocfc_start(struct bfa_s *bfa);
-void bfa_iocfc_stop(struct bfa_s *bfa);
-void bfa_iocfc_isr(void *bfa, struct bfi_mbmsg_s *msg);
-void bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa);
-bfa_boolean_t bfa_iocfc_is_operational(struct bfa_s *bfa);
-void bfa_iocfc_reset_queues(struct bfa_s *bfa);
-void bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
-			u32 reqq_sci, u32 rspq_spi,
-			bfa_cb_iocfc_t cbfn, void *cbarg);
-
-void bfa_msix_all(struct bfa_s *bfa, int vec);
-void bfa_msix_reqq(struct bfa_s *bfa, int vec);
-void bfa_msix_rspq(struct bfa_s *bfa, int vec);
-void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
-
-void bfa_hwcb_reginit(struct bfa_s *bfa);
-void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwcb_msix_install(struct bfa_s *bfa);
-void bfa_hwcb_msix_uninstall(struct bfa_s *bfa);
-void bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
-void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
-			u32 *nvecs, u32 *maxvec);
-void bfa_hwcb_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
-void bfa_hwct_reginit(struct bfa_s *bfa);
-void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
-void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
-void bfa_hwct_msix_install(struct bfa_s *bfa);
-void bfa_hwct_msix_uninstall(struct bfa_s *bfa);
-void bfa_hwct_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix);
-void bfa_hwct_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
-			u32 *nvecs, u32 *maxvec);
-void bfa_hwct_msix_get_rme_range(struct bfa_s *bfa, u32 *start, u32 *end);
-
-void bfa_com_meminfo(bfa_boolean_t mincfg, u32 *dm_len);
-void bfa_com_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi,
-		bfa_boolean_t mincfg);
-void bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns);
-void bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa,
-		struct bfa_boot_pbc_s *pbcfg);
-int bfa_iocfc_get_pbc_vports(struct bfa_s *bfa,
-		struct bfi_pbc_vport_s *pbc_vport);
-
-#endif /* __BFA_IOCFC_H__ */
-

+ 0 - 44
drivers/scsi/bfa/bfa_iocfc_q.c

@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include "bfa_intr_priv.h"
-
-BFA_TRC_FILE(HAL, IOCFC_Q);
-
-void
-bfa_iocfc_updateq(struct bfa_s *bfa, u32 reqq_ba, u32 rspq_ba,
-				u32 reqq_sci, u32 rspq_spi, bfa_cb_iocfc_t cbfn,
-				void *cbarg)
-{
-	struct bfa_iocfc_s *iocfc = &bfa->iocfc;
-	struct bfi_iocfc_updateq_req_s updateq_req;
-
-	iocfc->updateq_cbfn = cbfn;
-	iocfc->updateq_cbarg = cbarg;
-
-	bfi_h2i_set(updateq_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_UPDATEQ_REQ,
-			bfa_lpuid(bfa));
-
-	updateq_req.reqq_ba = bfa_os_htonl(reqq_ba);
-	updateq_req.rspq_ba = bfa_os_htonl(rspq_ba);
-	updateq_req.reqq_sci = bfa_os_htonl(reqq_sci);
-	updateq_req.rspq_spi = bfa_os_htonl(rspq_spi);
-
-	bfa_ioc_mbox_send(&bfa->ioc, &updateq_req,
-			sizeof(struct bfi_iocfc_updateq_req_s));
-}

+ 0 - 1364
drivers/scsi/bfa/bfa_ioim.c

@@ -1,1364 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include <cs/bfa_debug.h>
-#include <bfa_cb_ioim_macros.h>
-
-BFA_TRC_FILE(HAL, IOIM);
-
-/*
- * forward declarations.
- */
-static bfa_boolean_t	bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
-static bfa_boolean_t	bfa_ioim_sge_setup(struct bfa_ioim_s *ioim);
-static void		bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim);
-static bfa_boolean_t	bfa_ioim_send_abort(struct bfa_ioim_s *ioim);
-static void		bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
-
-/**
- *  bfa_ioim_sm
- */
-
-/**
- * IO state machine events
- */
-enum bfa_ioim_event {
-	BFA_IOIM_SM_START = 1,		/*  io start request from host */
-	BFA_IOIM_SM_COMP_GOOD = 2,	/*  io good comp, resource free */
-	BFA_IOIM_SM_COMP = 3,		/*  io comp, resource is free */
-	BFA_IOIM_SM_COMP_UTAG = 4,	/*  io comp, resource is free */
-	BFA_IOIM_SM_DONE = 5,		/*  io comp, resource not free */
-	BFA_IOIM_SM_FREE = 6,		/*  io resource is freed */
-	BFA_IOIM_SM_ABORT = 7,		/*  abort request from scsi stack */
-	BFA_IOIM_SM_ABORT_COMP = 8,	/*  abort from f/w */
-	BFA_IOIM_SM_ABORT_DONE = 9,	/*  abort completion from f/w */
-	BFA_IOIM_SM_QRESUME = 10,	/*  CQ space available to queue IO */
-	BFA_IOIM_SM_SGALLOCED = 11,	/*  SG page allocation successful */
-	BFA_IOIM_SM_SQRETRY = 12,	/*  sequence recovery retry */
-	BFA_IOIM_SM_HCB	= 13,		/*  bfa callback complete */
-	BFA_IOIM_SM_CLEANUP = 14,	/*  IO cleanup from itnim */
-	BFA_IOIM_SM_TMSTART = 15,	/*  IO cleanup from tskim */
-	BFA_IOIM_SM_TMDONE = 16,	/*  IO cleanup from tskim */
-	BFA_IOIM_SM_HWFAIL = 17,	/*  IOC h/w failure event */
-	BFA_IOIM_SM_IOTOV = 18,		/*  ITN offline TOV       */
-};
-
-/*
- * forward declaration of IO state machine
- */
-static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
-				       enum bfa_ioim_event event);
-static void     bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim,
-					enum bfa_ioim_event event);
-static void     bfa_ioim_sm_active(struct bfa_ioim_s *ioim,
-				       enum bfa_ioim_event event);
-static void     bfa_ioim_sm_abort(struct bfa_ioim_s *ioim,
-				      enum bfa_ioim_event event);
-static void     bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim,
-					enum bfa_ioim_event event);
-static void     bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim,
-				      enum bfa_ioim_event event);
-static void     bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim,
-					    enum bfa_ioim_event event);
-static void     bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim,
-					      enum bfa_ioim_event event);
-static void     bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim,
-				    enum bfa_ioim_event event);
-static void     bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim,
-					 enum bfa_ioim_event event);
-static void     bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim,
-					enum bfa_ioim_event event);
-
-/**
- * 		IO is not started (unallocated).
- */
-static void
-bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_trc_fp(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_START:
-		if (!bfa_itnim_is_online(ioim->itnim)) {
-			if (!bfa_itnim_hold_io(ioim->itnim)) {
-				bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-				list_del(&ioim->qe);
-				list_add_tail(&ioim->qe,
-						&ioim->fcpim->ioim_comp_q);
-				bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-						__bfa_cb_ioim_pathtov, ioim);
-			} else {
-				list_del(&ioim->qe);
-				list_add_tail(&ioim->qe,
-						&ioim->itnim->pending_q);
-			}
-			break;
-		}
-
-		if (ioim->nsges > BFI_SGE_INLINE) {
-			if (!bfa_ioim_sge_setup(ioim)) {
-				bfa_sm_set_state(ioim, bfa_ioim_sm_sgalloc);
-				return;
-			}
-		}
-
-		if (!bfa_ioim_send_ioreq(ioim)) {
-			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
-			break;
-		}
-
-		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
-		break;
-
-	case BFA_IOIM_SM_IOTOV:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-				__bfa_cb_ioim_pathtov, ioim);
-		break;
-
-	case BFA_IOIM_SM_ABORT:
-		/**
-		 * IO in pending queue can get abort requests. Complete abort
-		 * requests immediately.
-		 */
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_assert(bfa_q_is_on_q(&ioim->itnim->pending_q, ioim));
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-				ioim);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * 		IO is waiting for SG pages.
- */
-static void
-bfa_ioim_sm_sgalloc(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_SGALLOCED:
-		if (!bfa_ioim_send_ioreq(ioim)) {
-			bfa_sm_set_state(ioim, bfa_ioim_sm_qfull);
-			break;
-		}
-		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_ABORT:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_sgpg_wcancel(ioim->bfa, &ioim->iosp->sgpg_wqe);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * 		IO is active.
- */
-static void
-bfa_ioim_sm_active(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_trc_fp(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_COMP_GOOD:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-			      __bfa_cb_ioim_good_comp, ioim);
-		break;
-
-	case BFA_IOIM_SM_COMP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_DONE:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_comp,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_ABORT:
-		ioim->iosp->abort_explicit = BFA_TRUE;
-		ioim->io_cbfn = __bfa_cb_ioim_abort;
-
-		if (bfa_ioim_send_abort(ioim))
-			bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
-		else {
-			bfa_sm_set_state(ioim, bfa_ioim_sm_abort_qfull);
-			bfa_reqq_wait(ioim->bfa, ioim->reqq,
-					&ioim->iosp->reqq_wait);
-		}
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		ioim->iosp->abort_explicit = BFA_FALSE;
-		ioim->io_cbfn = __bfa_cb_ioim_failed;
-
-		if (bfa_ioim_send_abort(ioim))
-			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
-		else {
-			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
-			bfa_reqq_wait(ioim->bfa, ioim->reqq,
-					&ioim->iosp->reqq_wait);
-		}
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * 		IO is being aborted, waiting for completion from firmware.
- */
-static void
-bfa_ioim_sm_abort(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_COMP_GOOD:
-	case BFA_IOIM_SM_COMP:
-	case BFA_IOIM_SM_DONE:
-	case BFA_IOIM_SM_FREE:
-		break;
-
-	case BFA_IOIM_SM_ABORT_DONE:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_ABORT_COMP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_COMP_UTAG:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
-		ioim->iosp->abort_explicit = BFA_FALSE;
-
-		if (bfa_ioim_send_abort(ioim))
-			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
-		else {
-			bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
-			bfa_reqq_wait(ioim->bfa, ioim->reqq,
-					  &ioim->iosp->reqq_wait);
-		}
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * IO is being cleaned up (implicit abort), waiting for completion from
- * firmware.
- */
-static void
-bfa_ioim_sm_cleanup(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_COMP_GOOD:
-	case BFA_IOIM_SM_COMP:
-	case BFA_IOIM_SM_DONE:
-	case BFA_IOIM_SM_FREE:
-		break;
-
-	case BFA_IOIM_SM_ABORT:
-		/**
-		 * IO is already being aborted implicitly
-		 */
-		ioim->io_cbfn = __bfa_cb_ioim_abort;
-		break;
-
-	case BFA_IOIM_SM_ABORT_DONE:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_ABORT_COMP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_COMP_UTAG:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		/**
-		 * IO can be in cleanup state already due to TM command. 2nd cleanup
-		 * request comes from ITN offline event.
-		 */
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * 		IO is waiting for room in request CQ
- */
-static void
-bfa_ioim_sm_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_QRESUME:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_active);
-		bfa_ioim_send_ioreq(ioim);
-		break;
-
-	case BFA_IOIM_SM_ABORT:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * 		Active IO is being aborted, waiting for room in request CQ.
- */
-static void
-bfa_ioim_sm_abort_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_QRESUME:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_abort);
-		bfa_ioim_send_abort(ioim);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		bfa_assert(ioim->iosp->abort_explicit == BFA_TRUE);
-		ioim->iosp->abort_explicit = BFA_FALSE;
-		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup_qfull);
-		break;
-
-	case BFA_IOIM_SM_COMP_GOOD:
-	case BFA_IOIM_SM_COMP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_DONE:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_abort,
-			      ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * 		Active IO is being cleaned up, waiting for room in request CQ.
- */
-static void
-bfa_ioim_sm_cleanup_qfull(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_QRESUME:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_cleanup);
-		bfa_ioim_send_abort(ioim);
-		break;
-
-	case BFA_IOIM_SM_ABORT:
-		/**
-		 * IO is already being cleaned up implicitly
-		 */
-		ioim->io_cbfn = __bfa_cb_ioim_abort;
-		break;
-
-	case BFA_IOIM_SM_COMP_GOOD:
-	case BFA_IOIM_SM_COMP:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_DONE:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		bfa_reqq_wcancel(&ioim->iosp->reqq_wait);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, __bfa_cb_ioim_failed,
-			      ioim);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * IO bfa callback is pending.
- */
-static void
-bfa_ioim_sm_hcb(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_trc_fp(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_HCB:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
-		bfa_ioim_free(ioim);
-		bfa_cb_ioim_resfree(ioim->bfa->bfad);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * IO bfa callback is pending. IO resource cannot be freed.
- */
-static void
-bfa_ioim_sm_hcb_free(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_HCB:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_resfree);
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_resfree_q);
-		break;
-
-	case BFA_IOIM_SM_FREE:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-/**
- * IO is completed, waiting resource free from firmware.
- */
-static void
-bfa_ioim_sm_resfree(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, event);
-
-	switch (event) {
-	case BFA_IOIM_SM_FREE:
-		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
-		bfa_ioim_free(ioim);
-		bfa_cb_ioim_resfree(ioim->bfa->bfad);
-		break;
-
-	case BFA_IOIM_SM_CLEANUP:
-		bfa_ioim_notify_cleanup(ioim);
-		break;
-
-	case BFA_IOIM_SM_HWFAIL:
-		break;
-
-	default:
-		bfa_sm_fault(ioim->bfa, event);
-	}
-}
-
-
-
-/**
- *  bfa_ioim_private
- */
-
-static void
-__bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	bfa_cb_ioim_good_comp(ioim->bfa->bfad, ioim->dio);
-}
-
-static void
-__bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s	*ioim = cbarg;
-	struct bfi_ioim_rsp_s *m;
-	u8		*snsinfo = NULL;
-	u8         sns_len = 0;
-	s32         residue = 0;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
-	if (m->io_status == BFI_IOIM_STS_OK) {
-		/**
-		 * setup sense information, if present
-		 */
-		if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION
-					&& m->sns_len) {
-			sns_len = m->sns_len;
-			snsinfo = ioim->iosp->snsinfo;
-		}
-
-		/**
-		 * setup residue value correctly for normal completions
-		 */
-		if (m->resid_flags == FCP_RESID_UNDER)
-			residue = bfa_os_ntohl(m->residue);
-		if (m->resid_flags == FCP_RESID_OVER) {
-			residue = bfa_os_ntohl(m->residue);
-			residue = -residue;
-		}
-	}
-
-	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, m->io_status,
-			  m->scsi_status, sns_len, snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
-			  0, 0, NULL, 0);
-}
-
-static void
-__bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
-			  0, 0, NULL, 0);
-}
-
-static void
-__bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-
-	if (!complete) {
-		bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-		return;
-	}
-
-	bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
-}
-
-static void
-bfa_ioim_sgpg_alloced(void *cbarg)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-
-	ioim->nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
-	list_splice_tail_init(&ioim->iosp->sgpg_wqe.sgpg_q, &ioim->sgpg_q);
-	bfa_ioim_sgpg_setup(ioim);
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
-}
-
-/**
- * Send I/O request to firmware.
- */
-static          bfa_boolean_t
-bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim)
-{
-	struct bfa_itnim_s *itnim = ioim->itnim;
-	struct bfi_ioim_req_s *m;
-	static struct fcp_cmnd_s cmnd_z0 = { 0 };
-	struct bfi_sge_s      *sge;
-	u32        pgdlen = 0;
-	u64 addr;
-	struct scatterlist *sg;
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
-	if (!m) {
-		bfa_reqq_wait(ioim->bfa, ioim->reqq,
-				  &ioim->iosp->reqq_wait);
-		return BFA_FALSE;
-	}
-
-	/**
-	 * build i/o request message next
-	 */
-	m->io_tag = bfa_os_htons(ioim->iotag);
-	m->rport_hdl = ioim->itnim->rport->fw_handle;
-	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
-
-	/**
-	 * build inline IO SG element here
-	 */
-	sge = &m->sges[0];
-	if (ioim->nsges) {
-		sg = (struct scatterlist *)scsi_sglist(cmnd);
-		addr = bfa_os_sgaddr(sg_dma_address(sg));
-		sge->sga = *(union bfi_addr_u *) &addr;
-		pgdlen = sg_dma_len(sg);
-		sge->sg_len = pgdlen;
-		sge->flags = (ioim->nsges > BFI_SGE_INLINE) ?
-					BFI_SGE_DATA_CPL : BFI_SGE_DATA_LAST;
-		bfa_sge_to_be(sge);
-		sge++;
-	}
-
-	if (ioim->nsges > BFI_SGE_INLINE) {
-		sge->sga = ioim->sgpg->sgpg_pa;
-	} else {
-		sge->sga.a32.addr_lo = 0;
-		sge->sga.a32.addr_hi = 0;
-	}
-	sge->sg_len = pgdlen;
-	sge->flags = BFI_SGE_PGDLEN;
-	bfa_sge_to_be(sge);
-
-	/**
-	 * set up I/O command parameters
-	 */
-	bfa_os_assign(m->cmnd, cmnd_z0);
-	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
-	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
-	bfa_os_assign(m->cmnd.cdb,
-			*(struct scsi_cdb_s *)bfa_cb_ioim_get_cdb(ioim->dio));
-	m->cmnd.fcp_dl = bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
-
-	/**
-	 * set up I/O message header
-	 */
-	switch (m->cmnd.iodir) {
-	case FCP_IODIR_READ:
-		bfi_h2i_set(m->mh, BFI_MC_IOIM_READ, 0, bfa_lpuid(ioim->bfa));
-		bfa_stats(itnim, input_reqs);
-		break;
-	case FCP_IODIR_WRITE:
-		bfi_h2i_set(m->mh, BFI_MC_IOIM_WRITE, 0, bfa_lpuid(ioim->bfa));
-		bfa_stats(itnim, output_reqs);
-		break;
-	case FCP_IODIR_RW:
-		bfa_stats(itnim, input_reqs);
-		bfa_stats(itnim, output_reqs);
-	default:
-		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
-	}
-	if (itnim->seq_rec ||
-	    (bfa_cb_ioim_get_size(ioim->dio) & (sizeof(u32) - 1)))
-		bfi_h2i_set(m->mh, BFI_MC_IOIM_IO, 0, bfa_lpuid(ioim->bfa));
-
-#ifdef IOIM_ADVANCED
-	m->cmnd.crn = bfa_cb_ioim_get_crn(ioim->dio);
-	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
-	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
-
-	/**
-	 * Handle large CDB (>16 bytes).
-	 */
-	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
-					FCP_CMND_CDB_LEN) / sizeof(u32);
-	if (m->cmnd.addl_cdb_len) {
-		bfa_os_memcpy(&m->cmnd.cdb + 1, (struct scsi_cdb_s *)
-				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
-				m->cmnd.addl_cdb_len * sizeof(u32));
-		fcp_cmnd_fcpdl(&m->cmnd) =
-				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
-	}
-#endif
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(ioim->bfa, ioim->reqq);
-	return BFA_TRUE;
-}
-
-/**
- * Setup any additional SG pages needed.Inline SG element is setup
- * at queuing time.
- */
-static bfa_boolean_t
-bfa_ioim_sge_setup(struct bfa_ioim_s *ioim)
-{
-	u16        nsgpgs;
-
-	bfa_assert(ioim->nsges > BFI_SGE_INLINE);
-
-	/**
-	 * allocate SG pages needed
-	 */
-	nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
-	if (!nsgpgs)
-		return BFA_TRUE;
-
-	if (bfa_sgpg_malloc(ioim->bfa, &ioim->sgpg_q, nsgpgs)
-	    != BFA_STATUS_OK) {
-		bfa_sgpg_wait(ioim->bfa, &ioim->iosp->sgpg_wqe, nsgpgs);
-		return BFA_FALSE;
-	}
-
-	ioim->nsgpgs = nsgpgs;
-	bfa_ioim_sgpg_setup(ioim);
-
-	return BFA_TRUE;
-}
-
-static void
-bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
-{
-	int             sgeid, nsges, i;
-	struct bfi_sge_s      *sge;
-	struct bfa_sgpg_s *sgpg;
-	u32        pgcumsz;
-	u64        addr;
-	struct scatterlist *sg;
-	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
-
-	sgeid = BFI_SGE_INLINE;
-	ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);
-
-	sg = scsi_sglist(cmnd);
-	sg = sg_next(sg);
-
-	do {
-		sge = sgpg->sgpg->sges;
-		nsges = ioim->nsges - sgeid;
-		if (nsges > BFI_SGPG_DATA_SGES)
-			nsges = BFI_SGPG_DATA_SGES;
-
-		pgcumsz = 0;
-		for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
-			addr = bfa_os_sgaddr(sg_dma_address(sg));
-			sge->sga = *(union bfi_addr_u *) &addr;
-			sge->sg_len = sg_dma_len(sg);
-			pgcumsz += sge->sg_len;
-
-			/**
-			 * set flags
-			 */
-			if (i < (nsges - 1))
-				sge->flags = BFI_SGE_DATA;
-			else if (sgeid < (ioim->nsges - 1))
-				sge->flags = BFI_SGE_DATA_CPL;
-			else
-				sge->flags = BFI_SGE_DATA_LAST;
-		}
-
-		sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
-
-		/**
-		 * set the link element of each page
-		 */
-		if (sgeid == ioim->nsges) {
-			sge->flags = BFI_SGE_PGDLEN;
-			sge->sga.a32.addr_lo = 0;
-			sge->sga.a32.addr_hi = 0;
-		} else {
-			sge->flags = BFI_SGE_LINK;
-			sge->sga = sgpg->sgpg_pa;
-		}
-		sge->sg_len = pgcumsz;
-	} while (sgeid < ioim->nsges);
-}
-
-/**
- * Send I/O abort request to firmware.
- */
-static          bfa_boolean_t
-bfa_ioim_send_abort(struct bfa_ioim_s *ioim)
-{
-	struct bfi_ioim_abort_req_s *m;
-	enum bfi_ioim_h2i       msgop;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
-	if (!m)
-		return BFA_FALSE;
-
-	/**
-	 * build i/o request message next
-	 */
-	if (ioim->iosp->abort_explicit)
-		msgop = BFI_IOIM_H2I_IOABORT_REQ;
-	else
-		msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
-
-	bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
-	m->io_tag    = bfa_os_htons(ioim->iotag);
-	m->abort_tag = ++ioim->abort_tag;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(ioim->bfa, ioim->reqq);
-	return BFA_TRUE;
-}
-
-/**
- * Call to resume any I/O requests waiting for room in request queue.
- */
-static void
-bfa_ioim_qresume(void *cbarg)
-{
-	struct bfa_ioim_s *ioim = cbarg;
-
-	bfa_fcpim_stats(ioim->fcpim, qresumes);
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_QRESUME);
-}
-
-
-static void
-bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
-{
-	/**
-	 * Move IO from itnim queue to fcpim global queue since itnim will be
-	 * freed.
-	 */
-	list_del(&ioim->qe);
-	list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-
-	if (!ioim->iosp->tskim) {
-		if (ioim->fcpim->delay_comp && ioim->itnim->iotov_active) {
-			bfa_cb_dequeue(&ioim->hcb_qe);
-			list_del(&ioim->qe);
-			list_add_tail(&ioim->qe, &ioim->itnim->delay_comp_q);
-		}
-		bfa_itnim_iodone(ioim->itnim);
-	} else
-		bfa_tskim_iodone(ioim->iosp->tskim);
-}
-
-/**
- * 		  or after the link comes back.
- */
-void
-bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
-{
-	/**
-	 * If path tov timer expired, failback with PATHTOV status - these
-	 * IO requests are not normally retried by IO stack.
-	 *
-	 * Otherwise device cameback online and fail it with normal failed
-	 * status so that IO stack retries these failed IO requests.
-	 */
-	if (iotov)
-		ioim->io_cbfn = __bfa_cb_ioim_pathtov;
-	else
-		ioim->io_cbfn = __bfa_cb_ioim_failed;
-
-	bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
-
-    /**
-     * Move IO to fcpim global queue since itnim will be
-     * freed.
-     */
-    list_del(&ioim->qe);
-    list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-}
-
-
-
-/**
- *  bfa_ioim_friend
- */
-
-/**
- * Memory allocation and initialization.
- */
-void
-bfa_ioim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
-{
-	struct bfa_ioim_s		*ioim;
-	struct bfa_ioim_sp_s	*iosp;
-	u16		i;
-	u8			*snsinfo;
-	u32		snsbufsz;
-
-	/**
-	 * claim memory first
-	 */
-	ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
-	fcpim->ioim_arr = ioim;
-	bfa_meminfo_kva(minfo) = (u8 *) (ioim + fcpim->num_ioim_reqs);
-
-	iosp = (struct bfa_ioim_sp_s *) bfa_meminfo_kva(minfo);
-	fcpim->ioim_sp_arr = iosp;
-	bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
-
-	/**
-	 * Claim DMA memory for per IO sense data.
-	 */
-	snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
-	fcpim->snsbase.pa  = bfa_meminfo_dma_phys(minfo);
-	bfa_meminfo_dma_phys(minfo) += snsbufsz;
-
-	fcpim->snsbase.kva = bfa_meminfo_dma_virt(minfo);
-	bfa_meminfo_dma_virt(minfo) += snsbufsz;
-	snsinfo = fcpim->snsbase.kva;
-	bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
-
-	/**
-	 * Initialize ioim free queues
-	 */
-	INIT_LIST_HEAD(&fcpim->ioim_free_q);
-	INIT_LIST_HEAD(&fcpim->ioim_resfree_q);
-	INIT_LIST_HEAD(&fcpim->ioim_comp_q);
-
-	for (i = 0; i < fcpim->num_ioim_reqs;
-	     i++, ioim++, iosp++, snsinfo += BFI_IOIM_SNSLEN) {
-		/*
-		 * initialize IOIM
-		 */
-		bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
-		ioim->iotag   = i;
-		ioim->bfa     = fcpim->bfa;
-		ioim->fcpim   = fcpim;
-		ioim->iosp    = iosp;
-		iosp->snsinfo = snsinfo;
-		INIT_LIST_HEAD(&ioim->sgpg_q);
-		bfa_reqq_winit(&ioim->iosp->reqq_wait,
-				   bfa_ioim_qresume, ioim);
-		bfa_sgpg_winit(&ioim->iosp->sgpg_wqe,
-				   bfa_ioim_sgpg_alloced, ioim);
-		bfa_sm_set_state(ioim, bfa_ioim_sm_uninit);
-
-		list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
-	}
-}
-
-/**
- * Driver detach time call.
- */
-void
-bfa_ioim_detach(struct bfa_fcpim_mod_s *fcpim)
-{
-}
-
-void
-bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
-	struct bfa_ioim_s *ioim;
-	u16        iotag;
-	enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
-
-	iotag = bfa_os_ntohs(rsp->io_tag);
-
-	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
-	bfa_assert(ioim->iotag == iotag);
-
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_trc(ioim->bfa, rsp->io_status);
-	bfa_trc(ioim->bfa, rsp->reuse_io_tag);
-
-	if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
-		bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
-
-	switch (rsp->io_status) {
-	case BFI_IOIM_STS_OK:
-		bfa_fcpim_stats(fcpim, iocomp_ok);
-		if (rsp->reuse_io_tag == 0)
-			evt = BFA_IOIM_SM_DONE;
-		else
-			evt = BFA_IOIM_SM_COMP;
-		break;
-
-	case BFI_IOIM_STS_TIMEDOUT:
-	case BFI_IOIM_STS_ABORTED:
-		rsp->io_status = BFI_IOIM_STS_ABORTED;
-		bfa_fcpim_stats(fcpim, iocomp_aborted);
-		if (rsp->reuse_io_tag == 0)
-			evt = BFA_IOIM_SM_DONE;
-		else
-			evt = BFA_IOIM_SM_COMP;
-		break;
-
-	case BFI_IOIM_STS_PROTO_ERR:
-		bfa_fcpim_stats(fcpim, iocom_proto_err);
-		bfa_assert(rsp->reuse_io_tag);
-		evt = BFA_IOIM_SM_COMP;
-		break;
-
-	case BFI_IOIM_STS_SQER_NEEDED:
-		bfa_fcpim_stats(fcpim, iocom_sqer_needed);
-		bfa_assert(rsp->reuse_io_tag == 0);
-		evt = BFA_IOIM_SM_SQRETRY;
-		break;
-
-	case BFI_IOIM_STS_RES_FREE:
-		bfa_fcpim_stats(fcpim, iocom_res_free);
-		evt = BFA_IOIM_SM_FREE;
-		break;
-
-	case BFI_IOIM_STS_HOST_ABORTED:
-		bfa_fcpim_stats(fcpim, iocom_hostabrts);
-		if (rsp->abort_tag != ioim->abort_tag) {
-			bfa_trc(ioim->bfa, rsp->abort_tag);
-			bfa_trc(ioim->bfa, ioim->abort_tag);
-			return;
-		}
-
-		if (rsp->reuse_io_tag)
-			evt = BFA_IOIM_SM_ABORT_COMP;
-		else
-			evt = BFA_IOIM_SM_ABORT_DONE;
-		break;
-
-	case BFI_IOIM_STS_UTAG:
-		bfa_fcpim_stats(fcpim, iocom_utags);
-		evt = BFA_IOIM_SM_COMP_UTAG;
-		break;
-
-	default:
-		bfa_assert(0);
-	}
-
-	bfa_sm_send_event(ioim, evt);
-}
-
-void
-bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct bfi_ioim_rsp_s *rsp = (struct bfi_ioim_rsp_s *) m;
-	struct bfa_ioim_s *ioim;
-	u16        iotag;
-
-	iotag = bfa_os_ntohs(rsp->io_tag);
-
-	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
-	bfa_assert(ioim->iotag == iotag);
-
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
-}
-
-/**
- * Called by itnim to clean up IO while going offline.
- */
-void
-bfa_ioim_cleanup(struct bfa_ioim_s *ioim)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_fcpim_stats(ioim->fcpim, io_cleanups);
-
-	ioim->iosp->tskim = NULL;
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
-}
-
-void
-bfa_ioim_cleanup_tm(struct bfa_ioim_s *ioim, struct bfa_tskim_s *tskim)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_fcpim_stats(ioim->fcpim, io_tmaborts);
-
-	ioim->iosp->tskim = tskim;
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
-}
-
-/**
- * IOC failure handling.
- */
-void
-bfa_ioim_iocdisable(struct bfa_ioim_s *ioim)
-{
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
-}
-
-/**
- * IO offline TOV popped. Fail the pending IO.
- */
-void
-bfa_ioim_tov(struct bfa_ioim_s *ioim)
-{
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_IOTOV);
-}
-
-
-
-/**
- *  bfa_ioim_api
- */
-
-/**
- * Allocate IOIM resource for initiator mode I/O request.
- */
-struct bfa_ioim_s *
-bfa_ioim_alloc(struct bfa_s *bfa, struct bfad_ioim_s *dio,
-		struct bfa_itnim_s *itnim, u16 nsges)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct bfa_ioim_s *ioim;
-
-	/**
-	 * alocate IOIM resource
-	 */
-	bfa_q_deq(&fcpim->ioim_free_q, &ioim);
-	if (!ioim) {
-		bfa_fcpim_stats(fcpim, no_iotags);
-		return NULL;
-	}
-
-	ioim->dio = dio;
-	ioim->itnim = itnim;
-	ioim->nsges = nsges;
-	ioim->nsgpgs = 0;
-
-	bfa_stats(fcpim, total_ios);
-	bfa_stats(itnim, ios);
-	fcpim->ios_active++;
-
-	list_add_tail(&ioim->qe, &itnim->io_q);
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-
-	return ioim;
-}
-
-void
-bfa_ioim_free(struct bfa_ioim_s *ioim)
-{
-	struct bfa_fcpim_mod_s *fcpim = ioim->fcpim;
-
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-	bfa_assert_fp(bfa_sm_cmp_state(ioim, bfa_ioim_sm_uninit));
-
-	bfa_assert_fp(list_empty(&ioim->sgpg_q)
-		   || (ioim->nsges > BFI_SGE_INLINE));
-
-	if (ioim->nsgpgs > 0)
-		bfa_sgpg_mfree(ioim->bfa, &ioim->sgpg_q, ioim->nsgpgs);
-
-	bfa_stats(ioim->itnim, io_comps);
-	fcpim->ios_active--;
-
-	list_del(&ioim->qe);
-	list_add_tail(&ioim->qe, &fcpim->ioim_free_q);
-}
-
-void
-bfa_ioim_start(struct bfa_ioim_s *ioim)
-{
-	bfa_trc_fp(ioim->bfa, ioim->iotag);
-
-	/**
-	 * Obtain the queue over which this request has to be issued
-	 */
-	ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
-			bfa_cb_ioim_get_reqq(ioim->dio) :
-			bfa_itnim_get_reqq(ioim);
-
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
-}
-
-/**
- * Driver I/O abort request.
- */
-void
-bfa_ioim_abort(struct bfa_ioim_s *ioim)
-{
-	bfa_trc(ioim->bfa, ioim->iotag);
-	bfa_fcpim_stats(ioim->fcpim, io_aborts);
-	bfa_sm_send_event(ioim, BFA_IOIM_SM_ABORT);
-}
-
-

+ 0 - 1088
drivers/scsi/bfa/bfa_itnim.c

@@ -1,1088 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include <bfa_fcpim.h>
-#include "bfa_fcpim_priv.h"
-
-BFA_TRC_FILE(HAL, ITNIM);
-
-#define BFA_ITNIM_FROM_TAG(_fcpim, _tag)				\
-	((_fcpim)->itnim_arr + ((_tag) & ((_fcpim)->num_itnims - 1)))
-
-#define bfa_fcpim_additn(__itnim)					\
-	list_add_tail(&(__itnim)->qe, &(__itnim)->fcpim->itnim_q)
-#define bfa_fcpim_delitn(__itnim)	do {				\
-	bfa_assert(bfa_q_is_on_q(&(__itnim)->fcpim->itnim_q, __itnim));      \
-	list_del(&(__itnim)->qe);      \
-	bfa_assert(list_empty(&(__itnim)->io_q));      \
-	bfa_assert(list_empty(&(__itnim)->io_cleanup_q));      \
-	bfa_assert(list_empty(&(__itnim)->pending_q));      \
-} while (0)
-
-#define bfa_itnim_online_cb(__itnim) do {				\
-	if ((__itnim)->bfa->fcs)					\
-		bfa_cb_itnim_online((__itnim)->ditn);      \
-	else {								\
-		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
-		__bfa_cb_itnim_online, (__itnim));      \
-	}								\
-} while (0)
-
-#define bfa_itnim_offline_cb(__itnim) do {				\
-	if ((__itnim)->bfa->fcs)					\
-		bfa_cb_itnim_offline((__itnim)->ditn);      \
-	else {								\
-		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
-		__bfa_cb_itnim_offline, (__itnim));      \
-	}								\
-} while (0)
-
-#define bfa_itnim_sler_cb(__itnim) do {					\
-	if ((__itnim)->bfa->fcs)					\
-		bfa_cb_itnim_sler((__itnim)->ditn);      \
-	else {								\
-		bfa_cb_queue((__itnim)->bfa, &(__itnim)->hcb_qe,	\
-		__bfa_cb_itnim_sler, (__itnim));      \
-	}								\
-} while (0)
-
-/*
- * forward declarations
- */
-static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim);
-static void     bfa_itnim_cleanp_comp(void *itnim_cbarg);
-static void     bfa_itnim_cleanup(struct bfa_itnim_s *itnim);
-static void     __bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete);
-static void     __bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete);
-static void     __bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete);
-static void     bfa_itnim_iotov_online(struct bfa_itnim_s *itnim);
-static void     bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim);
-static void     bfa_itnim_iotov(void *itnim_arg);
-static void     bfa_itnim_iotov_start(struct bfa_itnim_s *itnim);
-static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
-static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
-
-/**
- *  bfa_itnim_sm BFA itnim state machine
- */
-
-
-enum bfa_itnim_event {
-	BFA_ITNIM_SM_CREATE = 1,	/*  itnim is created */
-	BFA_ITNIM_SM_ONLINE = 2,	/*  itnim is online */
-	BFA_ITNIM_SM_OFFLINE = 3,	/*  itnim is offline */
-	BFA_ITNIM_SM_FWRSP = 4,		/*  firmware response */
-	BFA_ITNIM_SM_DELETE = 5,	/*  deleting an existing itnim */
-	BFA_ITNIM_SM_CLEANUP = 6,	/*  IO cleanup completion */
-	BFA_ITNIM_SM_SLER = 7,		/*  second level error recovery */
-	BFA_ITNIM_SM_HWFAIL = 8,	/*  IOC h/w failure event */
-	BFA_ITNIM_SM_QRESUME = 9,	/*  queue space available */
-};
-
-static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
-					enum bfa_itnim_event event);
-static void     bfa_itnim_sm_created(struct bfa_itnim_s *itnim,
-					 enum bfa_itnim_event event);
-static void     bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim,
-					  enum bfa_itnim_event event);
-static void	bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
-				enum bfa_itnim_event event);
-static void     bfa_itnim_sm_online(struct bfa_itnim_s *itnim,
-					enum bfa_itnim_event event);
-static void     bfa_itnim_sm_sler(struct bfa_itnim_s *itnim,
-				      enum bfa_itnim_event event);
-static void     bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
-						 enum bfa_itnim_event event);
-static void     bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
-						enum bfa_itnim_event event);
-static void     bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim,
-					  enum bfa_itnim_event event);
-static void     bfa_itnim_sm_offline(struct bfa_itnim_s *itnim,
-					 enum bfa_itnim_event event);
-static void     bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
-					    enum bfa_itnim_event event);
-static void     bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim,
-					  enum bfa_itnim_event event);
-static void     bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
-					  enum bfa_itnim_event event);
-static void     bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
-					  enum bfa_itnim_event event);
-static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
-					  enum bfa_itnim_event event);
-
-/**
- * 		Beginning/unallocated state - no events expected.
- */
-static void
-bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_CREATE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_created);
-		itnim->is_online = BFA_FALSE;
-		bfa_fcpim_additn(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Beginning state, only online event expected.
- */
-static void
-bfa_itnim_sm_created(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_ONLINE:
-		if (bfa_itnim_send_fwcreate(itnim))
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
-		else
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-		bfa_fcpim_delitn(itnim);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Waiting for itnim create response from firmware.
- */
-static void
-bfa_itnim_sm_fwcreate(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_FWRSP:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_online);
-		itnim->is_online = BFA_TRUE;
-		bfa_itnim_iotov_online(itnim);
-		bfa_itnim_online_cb(itnim);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_delete_pending);
-		break;
-
-	case BFA_ITNIM_SM_OFFLINE:
-		if (bfa_itnim_send_fwdelete(itnim))
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
-		else
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-static void
-bfa_itnim_sm_fwcreate_qfull(struct bfa_itnim_s *itnim,
-			enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_QRESUME:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
-		bfa_itnim_send_fwcreate(itnim);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-		bfa_reqq_wcancel(&itnim->reqq_wait);
-		bfa_fcpim_delitn(itnim);
-		break;
-
-	case BFA_ITNIM_SM_OFFLINE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
-		bfa_reqq_wcancel(&itnim->reqq_wait);
-		bfa_itnim_offline_cb(itnim);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		bfa_reqq_wcancel(&itnim->reqq_wait);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 	Waiting for itnim create response from firmware, a delete is pending.
- */
-static void
-bfa_itnim_sm_delete_pending(struct bfa_itnim_s *itnim,
-				enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_FWRSP:
-		if (bfa_itnim_send_fwdelete(itnim))
-			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
-		else
-			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-		bfa_fcpim_delitn(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Online state - normal parking state.
- */
-static void
-bfa_itnim_sm_online(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_OFFLINE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
-		itnim->is_online = BFA_FALSE;
-		bfa_itnim_iotov_start(itnim);
-		bfa_itnim_cleanup(itnim);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
-		itnim->is_online = BFA_FALSE;
-		bfa_itnim_cleanup(itnim);
-		break;
-
-	case BFA_ITNIM_SM_SLER:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_sler);
-		itnim->is_online = BFA_FALSE;
-		bfa_itnim_iotov_start(itnim);
-		bfa_itnim_sler_cb(itnim);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		itnim->is_online = BFA_FALSE;
-		bfa_itnim_iotov_start(itnim);
-		bfa_itnim_iocdisable_cleanup(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Second level error recovery need.
- */
-static void
-bfa_itnim_sm_sler(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_OFFLINE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_offline);
-		bfa_itnim_cleanup(itnim);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
-		bfa_itnim_cleanup(itnim);
-		bfa_itnim_iotov_delete(itnim);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		bfa_itnim_iocdisable_cleanup(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Going offline. Waiting for active IO cleanup.
- */
-static void
-bfa_itnim_sm_cleanup_offline(struct bfa_itnim_s *itnim,
-				 enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_CLEANUP:
-		if (bfa_itnim_send_fwdelete(itnim))
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
-		else
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete_qfull);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_cleanup_delete);
-		bfa_itnim_iotov_delete(itnim);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		bfa_itnim_iocdisable_cleanup(itnim);
-		bfa_itnim_offline_cb(itnim);
-		break;
-
-	case BFA_ITNIM_SM_SLER:
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Deleting itnim. Waiting for active IO cleanup.
- */
-static void
-bfa_itnim_sm_cleanup_delete(struct bfa_itnim_s *itnim,
-				enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_CLEANUP:
-		if (bfa_itnim_send_fwdelete(itnim))
-			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
-		else
-			bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		bfa_itnim_iocdisable_cleanup(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
- */
-static void
-bfa_itnim_sm_fwdelete(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_FWRSP:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_offline);
-		bfa_itnim_offline_cb(itnim);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		bfa_itnim_offline_cb(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-static void
-bfa_itnim_sm_fwdelete_qfull(struct bfa_itnim_s *itnim,
-			enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_QRESUME:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_fwdelete);
-		bfa_itnim_send_fwdelete(itnim);
-		break;
-
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting_qfull);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		bfa_reqq_wcancel(&itnim->reqq_wait);
-		bfa_itnim_offline_cb(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Offline state.
- */
-static void
-bfa_itnim_sm_offline(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-		bfa_itnim_iotov_delete(itnim);
-		bfa_fcpim_delitn(itnim);
-		break;
-
-	case BFA_ITNIM_SM_ONLINE:
-		if (bfa_itnim_send_fwcreate(itnim))
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
-		else
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_iocdisable);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		IOC h/w failed state.
- */
-static void
-bfa_itnim_sm_iocdisable(struct bfa_itnim_s *itnim,
-			    enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_DELETE:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-		bfa_itnim_iotov_delete(itnim);
-		bfa_fcpim_delitn(itnim);
-		break;
-
-	case BFA_ITNIM_SM_OFFLINE:
-		bfa_itnim_offline_cb(itnim);
-		break;
-
-	case BFA_ITNIM_SM_ONLINE:
-		if (bfa_itnim_send_fwcreate(itnim))
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate);
-		else
-			bfa_sm_set_state(itnim, bfa_itnim_sm_fwcreate_qfull);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-/**
- * 		Itnim is deleted, waiting for firmware response to delete.
- */
-static void
-bfa_itnim_sm_deleting(struct bfa_itnim_s *itnim, enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_FWRSP:
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-		bfa_fcpim_delitn(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-static void
-bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
-			enum bfa_itnim_event event)
-{
-	bfa_trc(itnim->bfa, itnim->rport->rport_tag);
-	bfa_trc(itnim->bfa, event);
-
-	switch (event) {
-	case BFA_ITNIM_SM_QRESUME:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_deleting);
-		bfa_itnim_send_fwdelete(itnim);
-		break;
-
-	case BFA_ITNIM_SM_HWFAIL:
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-		bfa_reqq_wcancel(&itnim->reqq_wait);
-		bfa_fcpim_delitn(itnim);
-		break;
-
-	default:
-		bfa_sm_fault(itnim->bfa, event);
-	}
-}
-
-
-
-/**
- *  bfa_itnim_private
- */
-
-/**
- * 		Initiate cleanup of all IOs on an IOC failure.
- */
-static void
-bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim)
-{
-	struct bfa_tskim_s *tskim;
-	struct bfa_ioim_s *ioim;
-	struct list_head        *qe, *qen;
-
-	list_for_each_safe(qe, qen, &itnim->tsk_q) {
-		tskim = (struct bfa_tskim_s *) qe;
-		bfa_tskim_iocdisable(tskim);
-	}
-
-	list_for_each_safe(qe, qen, &itnim->io_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-		bfa_ioim_iocdisable(ioim);
-	}
-
-	/**
-	 * For IO request in pending queue, we pretend an early timeout.
-	 */
-	list_for_each_safe(qe, qen, &itnim->pending_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-		bfa_ioim_tov(ioim);
-	}
-
-	list_for_each_safe(qe, qen, &itnim->io_cleanup_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-		bfa_ioim_iocdisable(ioim);
-	}
-}
-
-/**
- * 		IO cleanup completion
- */
-static void
-bfa_itnim_cleanp_comp(void *itnim_cbarg)
-{
-	struct bfa_itnim_s *itnim = itnim_cbarg;
-
-	bfa_stats(itnim, cleanup_comps);
-	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
-}
-
-/**
- * 		Initiate cleanup of all IOs.
- */
-static void
-bfa_itnim_cleanup(struct bfa_itnim_s *itnim)
-{
-	struct bfa_ioim_s  *ioim;
-	struct bfa_tskim_s *tskim;
-	struct list_head         *qe, *qen;
-
-	bfa_wc_init(&itnim->wc, bfa_itnim_cleanp_comp, itnim);
-
-	list_for_each_safe(qe, qen, &itnim->io_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-
-		/**
-		 * Move IO to a cleanup queue from active queue so that a later
-		 * TM will not pickup this IO.
-		 */
-		list_del(&ioim->qe);
-		list_add_tail(&ioim->qe, &itnim->io_cleanup_q);
-
-		bfa_wc_up(&itnim->wc);
-		bfa_ioim_cleanup(ioim);
-	}
-
-	list_for_each_safe(qe, qen, &itnim->tsk_q) {
-		tskim = (struct bfa_tskim_s *) qe;
-		bfa_wc_up(&itnim->wc);
-		bfa_tskim_cleanup(tskim);
-	}
-
-	bfa_wc_wait(&itnim->wc);
-}
-
-static void
-__bfa_cb_itnim_online(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_itnim_s *itnim = cbarg;
-
-	if (complete)
-		bfa_cb_itnim_online(itnim->ditn);
-}
-
-static void
-__bfa_cb_itnim_offline(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_itnim_s *itnim = cbarg;
-
-	if (complete)
-		bfa_cb_itnim_offline(itnim->ditn);
-}
-
-static void
-__bfa_cb_itnim_sler(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_itnim_s *itnim = cbarg;
-
-	if (complete)
-		bfa_cb_itnim_sler(itnim->ditn);
-}
-
-/**
- * Call to resume any I/O requests waiting for room in request queue.
- */
-static void
-bfa_itnim_qresume(void *cbarg)
-{
-	struct bfa_itnim_s *itnim = cbarg;
-
-	bfa_sm_send_event(itnim, BFA_ITNIM_SM_QRESUME);
-}
-
-
-
-
-/**
- *  bfa_itnim_public
- */
-
-void
-bfa_itnim_iodone(struct bfa_itnim_s *itnim)
-{
-	bfa_wc_down(&itnim->wc);
-}
-
-void
-bfa_itnim_tskdone(struct bfa_itnim_s *itnim)
-{
-	bfa_wc_down(&itnim->wc);
-}
-
-void
-bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-		u32 *dm_len)
-{
-	/**
-	 * ITN memory
-	 */
-	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
-}
-
-void
-bfa_itnim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
-{
-	struct bfa_s      *bfa = fcpim->bfa;
-	struct bfa_itnim_s *itnim;
-	int             i;
-
-	INIT_LIST_HEAD(&fcpim->itnim_q);
-
-	itnim = (struct bfa_itnim_s *) bfa_meminfo_kva(minfo);
-	fcpim->itnim_arr = itnim;
-
-	for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
-		bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
-		itnim->bfa = bfa;
-		itnim->fcpim = fcpim;
-		itnim->reqq = BFA_REQQ_QOS_LO;
-		itnim->rport = BFA_RPORT_FROM_TAG(bfa, i);
-		itnim->iotov_active = BFA_FALSE;
-		bfa_reqq_winit(&itnim->reqq_wait, bfa_itnim_qresume, itnim);
-
-		INIT_LIST_HEAD(&itnim->io_q);
-		INIT_LIST_HEAD(&itnim->io_cleanup_q);
-		INIT_LIST_HEAD(&itnim->pending_q);
-		INIT_LIST_HEAD(&itnim->tsk_q);
-		INIT_LIST_HEAD(&itnim->delay_comp_q);
-		bfa_sm_set_state(itnim, bfa_itnim_sm_uninit);
-	}
-
-	bfa_meminfo_kva(minfo) = (u8 *) itnim;
-}
-
-void
-bfa_itnim_iocdisable(struct bfa_itnim_s *itnim)
-{
-	bfa_stats(itnim, ioc_disabled);
-	bfa_sm_send_event(itnim, BFA_ITNIM_SM_HWFAIL);
-}
-
-static bfa_boolean_t
-bfa_itnim_send_fwcreate(struct bfa_itnim_s *itnim)
-{
-	struct bfi_itnim_create_req_s *m;
-
-	itnim->msg_no++;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
-	if (!m) {
-		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
-		return BFA_FALSE;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_CREATE_REQ,
-			bfa_lpuid(itnim->bfa));
-	m->fw_handle = itnim->rport->fw_handle;
-	m->class = FC_CLASS_3;
-	m->seq_rec = itnim->seq_rec;
-	m->msg_no = itnim->msg_no;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(itnim->bfa, itnim->reqq);
-	return BFA_TRUE;
-}
-
-static bfa_boolean_t
-bfa_itnim_send_fwdelete(struct bfa_itnim_s *itnim)
-{
-	struct bfi_itnim_delete_req_s *m;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
-	if (!m) {
-		bfa_reqq_wait(itnim->bfa, itnim->reqq, &itnim->reqq_wait);
-		return BFA_FALSE;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_ITNIM, BFI_ITNIM_H2I_DELETE_REQ,
-			bfa_lpuid(itnim->bfa));
-	m->fw_handle = itnim->rport->fw_handle;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(itnim->bfa, itnim->reqq);
-	return BFA_TRUE;
-}
-
-/**
- * Cleanup all pending failed inflight requests.
- */
-static void
-bfa_itnim_delayed_comp(struct bfa_itnim_s *itnim, bfa_boolean_t iotov)
-{
-	struct bfa_ioim_s *ioim;
-	struct list_head *qe, *qen;
-
-	list_for_each_safe(qe, qen, &itnim->delay_comp_q) {
-		ioim = (struct bfa_ioim_s *)qe;
-		bfa_ioim_delayed_comp(ioim, iotov);
-	}
-}
-
-/**
- * Start all pending IO requests.
- */
-static void
-bfa_itnim_iotov_online(struct bfa_itnim_s *itnim)
-{
-	struct bfa_ioim_s *ioim;
-
-	bfa_itnim_iotov_stop(itnim);
-
-	/**
-	 * Abort all inflight IO requests in the queue
-	 */
-	bfa_itnim_delayed_comp(itnim, BFA_FALSE);
-
-	/**
-	 * Start all pending IO requests.
-	 */
-	while (!list_empty(&itnim->pending_q)) {
-		bfa_q_deq(&itnim->pending_q, &ioim);
-		list_add_tail(&ioim->qe, &itnim->io_q);
-		bfa_ioim_start(ioim);
-	}
-}
-
-/**
- * Fail all pending IO requests
- */
-static void
-bfa_itnim_iotov_cleanup(struct bfa_itnim_s *itnim)
-{
-	struct bfa_ioim_s *ioim;
-
-	/**
-	 * Fail all inflight IO requests in the queue
-	 */
-	bfa_itnim_delayed_comp(itnim, BFA_TRUE);
-
-	/**
-	 * Fail any pending IO requests.
-	 */
-	while (!list_empty(&itnim->pending_q)) {
-		bfa_q_deq(&itnim->pending_q, &ioim);
-		list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-		bfa_ioim_tov(ioim);
-	}
-}
-
-/**
- * IO TOV timer callback. Fail any pending IO requests.
- */
-static void
-bfa_itnim_iotov(void *itnim_arg)
-{
-	struct bfa_itnim_s *itnim = itnim_arg;
-
-	itnim->iotov_active = BFA_FALSE;
-
-	bfa_cb_itnim_tov_begin(itnim->ditn);
-	bfa_itnim_iotov_cleanup(itnim);
-	bfa_cb_itnim_tov(itnim->ditn);
-}
-
-/**
- * Start IO TOV timer for failing back pending IO requests in offline state.
- */
-static void
-bfa_itnim_iotov_start(struct bfa_itnim_s *itnim)
-{
-	if (itnim->fcpim->path_tov > 0) {
-
-		itnim->iotov_active = BFA_TRUE;
-		bfa_assert(bfa_itnim_hold_io(itnim));
-		bfa_timer_start(itnim->bfa, &itnim->timer,
-			bfa_itnim_iotov, itnim, itnim->fcpim->path_tov);
-	}
-}
-
-/**
- * Stop IO TOV timer.
- */
-static void
-bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim)
-{
-	if (itnim->iotov_active) {
-		itnim->iotov_active = BFA_FALSE;
-		bfa_timer_stop(&itnim->timer);
-	}
-}
-
-/**
- * Stop IO TOV timer.
- */
-static void
-bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim)
-{
-    bfa_boolean_t pathtov_active = BFA_FALSE;
-
-    if (itnim->iotov_active)
-		pathtov_active = BFA_TRUE;
-
-	bfa_itnim_iotov_stop(itnim);
-	if (pathtov_active)
-		bfa_cb_itnim_tov_begin(itnim->ditn);
-	bfa_itnim_iotov_cleanup(itnim);
-	if (pathtov_active)
-		bfa_cb_itnim_tov(itnim->ditn);
-}
-
-
-
-/**
- *  bfa_itnim_public
- */
-
-/**
- * 		Itnim interrupt processing.
- */
-void
-bfa_itnim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	union bfi_itnim_i2h_msg_u msg;
-	struct bfa_itnim_s *itnim;
-
-	bfa_trc(bfa, m->mhdr.msg_id);
-
-	msg.msg = m;
-
-	switch (m->mhdr.msg_id) {
-	case BFI_ITNIM_I2H_CREATE_RSP:
-		itnim = BFA_ITNIM_FROM_TAG(fcpim,
-					       msg.create_rsp->bfa_handle);
-		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
-		bfa_stats(itnim, create_comps);
-		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
-		break;
-
-	case BFI_ITNIM_I2H_DELETE_RSP:
-		itnim = BFA_ITNIM_FROM_TAG(fcpim,
-					       msg.delete_rsp->bfa_handle);
-		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
-		bfa_stats(itnim, delete_comps);
-		bfa_sm_send_event(itnim, BFA_ITNIM_SM_FWRSP);
-		break;
-
-	case BFI_ITNIM_I2H_SLER_EVENT:
-		itnim = BFA_ITNIM_FROM_TAG(fcpim,
-					       msg.sler_event->bfa_handle);
-		bfa_stats(itnim, sler_events);
-		bfa_sm_send_event(itnim, BFA_ITNIM_SM_SLER);
-		break;
-
-	default:
-		bfa_trc(bfa, m->mhdr.msg_id);
-		bfa_assert(0);
-	}
-}
-
-
-
-/**
- *  bfa_itnim_api
- */
-
-struct bfa_itnim_s *
-bfa_itnim_create(struct bfa_s *bfa, struct bfa_rport_s *rport, void *ditn)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct bfa_itnim_s *itnim;
-
-	itnim = BFA_ITNIM_FROM_TAG(fcpim, rport->rport_tag);
-	bfa_assert(itnim->rport == rport);
-
-	itnim->ditn = ditn;
-
-	bfa_stats(itnim, creates);
-	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CREATE);
-
-	return itnim;
-}
-
-void
-bfa_itnim_delete(struct bfa_itnim_s *itnim)
-{
-	bfa_stats(itnim, deletes);
-	bfa_sm_send_event(itnim, BFA_ITNIM_SM_DELETE);
-}
-
-void
-bfa_itnim_online(struct bfa_itnim_s *itnim, bfa_boolean_t seq_rec)
-{
-	itnim->seq_rec = seq_rec;
-	bfa_stats(itnim, onlines);
-	bfa_sm_send_event(itnim, BFA_ITNIM_SM_ONLINE);
-}
-
-void
-bfa_itnim_offline(struct bfa_itnim_s *itnim)
-{
-	bfa_stats(itnim, offlines);
-	bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
-}
-
-/**
- * Return true if itnim is considered offline for holding off IO request.
- * IO is not held if itnim is being deleted.
- */
-bfa_boolean_t
-bfa_itnim_hold_io(struct bfa_itnim_s *itnim)
-{
-	return
-		itnim->fcpim->path_tov && itnim->iotov_active &&
-		(bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwcreate) ||
-		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_sler) ||
-		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_cleanup_offline) ||
-		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_fwdelete) ||
-		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_offline) ||
-		 bfa_sm_cmp_state(itnim, bfa_itnim_sm_iocdisable))
-	;
-}
-
-void
-bfa_itnim_get_stats(struct bfa_itnim_s *itnim,
-	struct bfa_itnim_hal_stats_s *stats)
-{
-	*stats = itnim->stats;
-}
-
-void
-bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
-{
-	bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
-}
-
-

+ 0 - 346
drivers/scsi/bfa/bfa_log.c

@@ -1,346 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  bfa_log.c BFA log library
- */
-
-#include <bfa_os_inc.h>
-#include <cs/bfa_log.h>
-
-/*
- * global log info structure
- */
-struct bfa_log_info_s {
-	u32        start_idx;	/*  start index for a module */
-	u32        total_count;	/*  total count for a module */
-	enum bfa_log_severity level;	/*  global log level */
-	bfa_log_cb_t	cbfn;		/*  callback function */
-};
-
-static struct bfa_log_info_s bfa_log_info[BFA_LOG_MODULE_ID_MAX + 1];
-static u32 bfa_log_msg_total_count;
-static int      bfa_log_initialized;
-
-static char    *bfa_log_severity[] =
-	{ "[none]", "[critical]", "[error]", "[warn]", "[info]", "" };
-
-/**
- * BFA log library initialization
- *
- * The log library initialization includes the following,
- *    - set log instance name and callback function
- *    - read the message array generated from xml files
- *    - calculate start index for each module
- *    - calculate message count for each module
- *    - perform error checking
- *
- * @param[in] log_mod - log module info
- * @param[in] instance_name - instance name
- * @param[in] cbfn - callback function
- *
- * It return 0 on success, or -1 on failure
- */
-int
-bfa_log_init(struct bfa_log_mod_s *log_mod, char *instance_name,
-			bfa_log_cb_t cbfn)
-{
-	struct bfa_log_msgdef_s *msg;
-	u32        pre_mod_id = 0;
-	u32        cur_mod_id = 0;
-	u32        i, pre_idx, idx, msg_id;
-
-	/*
-	 * set instance name
-	 */
-	if (log_mod) {
-		strncpy(log_mod->instance_info, instance_name,
-			sizeof(log_mod->instance_info));
-		log_mod->cbfn = cbfn;
-		for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++)
-			log_mod->log_level[i] = BFA_LOG_WARNING;
-	}
-
-	if (bfa_log_initialized)
-		return 0;
-
-	for (i = 0; i <= BFA_LOG_MODULE_ID_MAX; i++) {
-		bfa_log_info[i].start_idx = 0;
-		bfa_log_info[i].total_count = 0;
-		bfa_log_info[i].level = BFA_LOG_WARNING;
-		bfa_log_info[i].cbfn = cbfn;
-	}
-
-	pre_idx = 0;
-	idx = 0;
-	msg = bfa_log_msg_array;
-	msg_id = BFA_LOG_GET_MSG_ID(msg);
-	pre_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
-	while (msg_id != 0) {
-		cur_mod_id = BFA_LOG_GET_MOD_ID(msg_id);
-
-		if (cur_mod_id > BFA_LOG_MODULE_ID_MAX) {
-			cbfn(log_mod, msg_id,
-				"%s%s log: module id %u out of range\n",
-				BFA_LOG_CAT_NAME,
-				bfa_log_severity[BFA_LOG_ERROR],
-				cur_mod_id);
-			return -1;
-		}
-
-		if (pre_mod_id > BFA_LOG_MODULE_ID_MAX) {
-			cbfn(log_mod, msg_id,
-				"%s%s log: module id %u out of range\n",
-				BFA_LOG_CAT_NAME,
-				bfa_log_severity[BFA_LOG_ERROR],
-				pre_mod_id);
-			return -1;
-		}
-
-		if (cur_mod_id != pre_mod_id) {
-			bfa_log_info[pre_mod_id].start_idx = pre_idx;
-			bfa_log_info[pre_mod_id].total_count = idx - pre_idx;
-			pre_mod_id = cur_mod_id;
-			pre_idx = idx;
-		}
-
-		idx++;
-		msg++;
-		msg_id = BFA_LOG_GET_MSG_ID(msg);
-	}
-
-	bfa_log_info[cur_mod_id].start_idx = pre_idx;
-	bfa_log_info[cur_mod_id].total_count = idx - pre_idx;
-	bfa_log_msg_total_count = idx;
-
-	cbfn(log_mod, msg_id, "%s%s log: init OK, msg total count %u\n",
-		BFA_LOG_CAT_NAME,
-		bfa_log_severity[BFA_LOG_INFO], bfa_log_msg_total_count);
-
-	bfa_log_initialized = 1;
-
-	return 0;
-}
-
-/**
- * BFA log set log level for a module
- *
- * @param[in] log_mod - log module info
- * @param[in] mod_id - module id
- * @param[in] log_level - log severity level
- *
- * It return BFA_STATUS_OK on success, or > 0 on failure
- */
-bfa_status_t
-bfa_log_set_level(struct bfa_log_mod_s *log_mod, int mod_id,
-		  enum bfa_log_severity log_level)
-{
-	if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
-		return BFA_STATUS_EINVAL;
-
-	if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
-		return BFA_STATUS_EINVAL;
-
-	if (log_mod)
-		log_mod->log_level[mod_id] = log_level;
-	else
-		bfa_log_info[mod_id].level = log_level;
-
-	return BFA_STATUS_OK;
-}
-
-/**
- * BFA log set log level for all modules
- *
- * @param[in] log_mod - log module info
- * @param[in] log_level - log severity level
- *
- * It return BFA_STATUS_OK on success, or > 0 on failure
- */
-bfa_status_t
-bfa_log_set_level_all(struct bfa_log_mod_s *log_mod,
-		  enum bfa_log_severity log_level)
-{
-	int mod_id = BFA_LOG_UNUSED_ID + 1;
-
-	if (log_level <= BFA_LOG_INVALID || log_level > BFA_LOG_LEVEL_MAX)
-		return BFA_STATUS_EINVAL;
-
-	if (log_mod) {
-		for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
-			log_mod->log_level[mod_id] = log_level;
-	} else {
-		for (; mod_id <= BFA_LOG_MODULE_ID_MAX; mod_id++)
-			bfa_log_info[mod_id].level = log_level;
-	}
-
-	return BFA_STATUS_OK;
-}
-
-/**
- * BFA log set log level for all aen sub-modules
- *
- * @param[in] log_mod - log module info
- * @param[in] log_level - log severity level
- *
- * It return BFA_STATUS_OK on success, or > 0 on failure
- */
-bfa_status_t
-bfa_log_set_level_aen(struct bfa_log_mod_s *log_mod,
-		  enum bfa_log_severity log_level)
-{
-	int mod_id = BFA_LOG_AEN_MIN + 1;
-
-	if (log_mod) {
-		for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
-			log_mod->log_level[mod_id] = log_level;
-	} else {
-		for (; mod_id <= BFA_LOG_AEN_MAX; mod_id++)
-			bfa_log_info[mod_id].level = log_level;
-	}
-
-	return BFA_STATUS_OK;
-}
-
-/**
- * BFA log get log level for a module
- *
- * @param[in] log_mod - log module info
- * @param[in] mod_id - module id
- *
- * It returns log level or -1 on error
- */
-enum bfa_log_severity
-bfa_log_get_level(struct bfa_log_mod_s *log_mod, int mod_id)
-{
-	if (mod_id <= BFA_LOG_UNUSED_ID || mod_id > BFA_LOG_MODULE_ID_MAX)
-		return BFA_LOG_INVALID;
-
-	if (log_mod)
-		return log_mod->log_level[mod_id];
-	else
-		return bfa_log_info[mod_id].level;
-}
-
-enum bfa_log_severity
-bfa_log_get_msg_level(struct bfa_log_mod_s *log_mod, u32 msg_id)
-{
-	struct bfa_log_msgdef_s *msg;
-	u32        mod = BFA_LOG_GET_MOD_ID(msg_id);
-	u32        idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
-
-	if (!bfa_log_initialized)
-		return BFA_LOG_INVALID;
-
-	if (mod > BFA_LOG_MODULE_ID_MAX)
-		return BFA_LOG_INVALID;
-
-	if (idx >= bfa_log_info[mod].total_count) {
-		bfa_log_info[mod].cbfn(log_mod, msg_id,
-			"%s%s log: inconsistent idx %u vs. total count %u\n",
-			BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
-			bfa_log_info[mod].total_count);
-		return BFA_LOG_INVALID;
-	}
-
-	msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
-	if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
-		bfa_log_info[mod].cbfn(log_mod, msg_id,
-			"%s%s log: inconsistent msg id %u array msg id %u\n",
-			BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
-			msg_id, BFA_LOG_GET_MSG_ID(msg));
-		return BFA_LOG_INVALID;
-	}
-
-	return BFA_LOG_GET_SEVERITY(msg);
-}
-
-/**
- * BFA log message handling
- *
- * BFA log message handling finds the message based on message id and prints
- * out the message based on its format and arguments. It also does prefix
- * the severity etc.
- *
- * @param[in] log_mod - log module info
- * @param[in] msg_id - message id
- * @param[in] ... - message arguments
- *
- * It return 0 on success, or -1 on errors
- */
-int
-bfa_log(struct bfa_log_mod_s *log_mod, u32 msg_id, ...)
-{
-	va_list         ap;
-	char            buf[256];
-	struct bfa_log_msgdef_s *msg;
-	int             log_level;
-	u32        mod = BFA_LOG_GET_MOD_ID(msg_id);
-	u32        idx = BFA_LOG_GET_MSG_IDX(msg_id) - 1;
-
-	if (!bfa_log_initialized)
-		return -1;
-
-	if (mod > BFA_LOG_MODULE_ID_MAX)
-		return -1;
-
-	if (idx >= bfa_log_info[mod].total_count) {
-		bfa_log_info[mod].
-			cbfn
-			(log_mod, msg_id,
-			"%s%s log: inconsistent idx %u vs. total count %u\n",
-			BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR], idx,
-			bfa_log_info[mod].total_count);
-		return -1;
-	}
-
-	msg = bfa_log_msg_array + bfa_log_info[mod].start_idx + idx;
-	if (msg_id != BFA_LOG_GET_MSG_ID(msg)) {
-		bfa_log_info[mod].
-			cbfn
-			(log_mod, msg_id,
-			"%s%s log: inconsistent msg id %u array msg id %u\n",
-			BFA_LOG_CAT_NAME, bfa_log_severity[BFA_LOG_ERROR],
-			msg_id, BFA_LOG_GET_MSG_ID(msg));
-		return -1;
-	}
-
-	log_level = log_mod ? log_mod->log_level[mod] : bfa_log_info[mod].level;
-	if ((BFA_LOG_GET_SEVERITY(msg) > log_level) &&
-			(msg->attributes != BFA_LOG_ATTR_NONE))
-		return 0;
-
-	va_start(ap, msg_id);
-	bfa_os_vsprintf(buf, BFA_LOG_GET_MSG_FMT_STRING(msg), ap);
-	va_end(ap);
-
-	if (log_mod)
-		log_mod->cbfn(log_mod, msg_id, "%s[%s]%s%s %s: %s\n",
-				BFA_LOG_CAT_NAME, log_mod->instance_info,
-				bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
-				(msg->attributes & BFA_LOG_ATTR_AUDIT)
-				? " (audit) " : "", msg->msg_value, buf);
-	else
-		bfa_log_info[mod].cbfn(log_mod, msg_id, "%s%s%s %s: %s\n",
-				BFA_LOG_CAT_NAME,
-				bfa_log_severity[BFA_LOG_GET_SEVERITY(msg)],
-				(msg->attributes & BFA_LOG_ATTR_AUDIT) ?
-				" (audit) " : "", msg->msg_value, buf);
-
-	return 0;
-}
-

+ 0 - 537
drivers/scsi/bfa/bfa_log_module.c

@@ -1,537 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <cs/bfa_log.h>
-#include <aen/bfa_aen_adapter.h>
-#include <aen/bfa_aen_audit.h>
-#include <aen/bfa_aen_ethport.h>
-#include <aen/bfa_aen_ioc.h>
-#include <aen/bfa_aen_itnim.h>
-#include <aen/bfa_aen_lport.h>
-#include <aen/bfa_aen_port.h>
-#include <aen/bfa_aen_rport.h>
-#include <log/bfa_log_fcs.h>
-#include <log/bfa_log_hal.h>
-#include <log/bfa_log_linux.h>
-#include <log/bfa_log_wdrv.h>
-
-struct bfa_log_msgdef_s bfa_log_msg_array[] = {
-
-
-/* messages define for BFA_AEN_CAT_ADAPTER Module */
-{BFA_AEN_ADAPTER_ADD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_ADAPTER_ADD",
- "New adapter found: SN = %s, base port WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_ADAPTER_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_ADAPTER_REMOVE",
- "Adapter removed: SN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-
-
-
-/* messages define for BFA_AEN_CAT_AUDIT Module */
-{BFA_AEN_AUDIT_AUTH_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_ENABLE",
- "Authentication enabled for base port: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_AUDIT_AUTH_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "BFA_AEN_AUDIT_AUTH_DISABLE",
- "Authentication disabled for base port: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-
-
-
-/* messages define for BFA_AEN_CAT_ETHPORT Module */
-{BFA_AEN_ETHPORT_LINKUP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_ETHPORT_LINKUP",
- "Base port ethernet linkup: mac = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_ETHPORT_LINKDOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_ETHPORT_LINKDOWN",
- "Base port ethernet linkdown: mac = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_ETHPORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_ETHPORT_ENABLE",
- "Base port ethernet interface enabled: mac = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_ETHPORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_ETHPORT_DISABLE",
- "Base port ethernet interface disabled: mac = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-
-
-
-/* messages define for BFA_AEN_CAT_IOC Module */
-{BFA_AEN_IOC_HBGOOD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_IOC_HBGOOD",
- "Heart Beat of IOC %d is good.",
- ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_IOC_HBFAIL, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_CRITICAL,
- "BFA_AEN_IOC_HBFAIL",
- "Heart Beat of IOC %d has failed.",
- ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_IOC_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_IOC_ENABLE",
- "IOC %d is enabled.",
- ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_IOC_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_IOC_DISABLE",
- "IOC %d is disabled.",
- ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_IOC_FWMISMATCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWMISMATCH",
- "Running firmware version is incompatible with the driver version.",
- (0), 0},
-
-{BFA_AEN_IOC_FWCFG_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_CRITICAL, "BFA_AEN_IOC_FWCFG_ERROR",
- "Link initialization failed due to firmware configuration read error:"
- " WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_IOC_INVALID_VENDOR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_VENDOR",
- "Unsupported switch vendor. Link initialization failed: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_IOC_INVALID_NWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_NWWN",
- "Invalid NWWN. Link initialization failed: NWWN = 00:00:00:00:00:00:00:00.",
- (0), 0},
-
-{BFA_AEN_IOC_INVALID_PWWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_ERROR, "BFA_AEN_IOC_INVALID_PWWN",
- "Invalid PWWN. Link initialization failed: PWWN = 00:00:00:00:00:00:00:00.",
- (0), 0},
-
-
-
-
-/* messages define for BFA_AEN_CAT_ITNIM Module */
-{BFA_AEN_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_ITNIM_ONLINE",
- "Target (WWN = %s) is online for initiator (WWN = %s).",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_ITNIM_OFFLINE",
- "Target (WWN = %s) offlined by initiator (WWN = %s).",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_ITNIM_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_ERROR, "BFA_AEN_ITNIM_DISCONNECT",
- "Target (WWN = %s) connectivity lost for initiator (WWN = %s).",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-
-
-
-/* messages define for BFA_AEN_CAT_LPORT Module */
-{BFA_AEN_LPORT_NEW, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_LPORT_NEW",
- "New logical port created: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_DELETE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_LPORT_DELETE",
- "Logical port deleted: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_LPORT_ONLINE",
- "Logical port online: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_LPORT_OFFLINE",
- "Logical port taken offline: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_ERROR, "BFA_AEN_LPORT_DISCONNECT",
- "Logical port lost fabric connectivity: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_NEW_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_LPORT_NEW_PROP",
- "New virtual port created using proprietary interface: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_DELETE_PROP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_PROP",
- "Virtual port deleted using proprietary interface: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_NEW_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "BFA_AEN_LPORT_NEW_STANDARD",
- "New virtual port created using standard interface: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_DELETE_STANDARD, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "BFA_AEN_LPORT_DELETE_STANDARD",
- "Virtual port deleted using standard interface: WWN = %s, Role = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_LPORT_NPIV_DUP_WWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_DUP_WWN",
- "Virtual port login failed. Duplicate WWN = %s reported by fabric.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_LPORT_NPIV_FABRIC_MAX, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_FABRIC_MAX",
- "Virtual port (WWN = %s) login failed. Max NPIV ports already exist in"
- " fabric/fport.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_LPORT_NPIV_UNKNOWN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_LPORT_NPIV_UNKNOWN",
- "Virtual port (WWN = %s) login failed.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-
-
-
-/* messages define for BFA_AEN_CAT_PORT Module */
-{BFA_AEN_PORT_ONLINE, BFA_LOG_ATTR_NONE, BFA_LOG_INFO, "BFA_AEN_PORT_ONLINE",
- "Base port online: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
- "BFA_AEN_PORT_OFFLINE",
- "Base port offline: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_RLIR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_PORT_RLIR",
- "RLIR event not supported.",
- (0), 0},
-
-{BFA_AEN_PORT_SFP_INSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_PORT_SFP_INSERT",
- "New SFP found: WWN/MAC = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_SFP_REMOVE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_REMOVE",
- "SFP removed: WWN/MAC = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_SFP_POM, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
- "BFA_AEN_PORT_SFP_POM",
- "SFP POM level to %s: WWN/MAC = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_PORT_ENABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_PORT_ENABLE",
- "Base port enabled: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_DISABLE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_PORT_DISABLE",
- "Base port disabled: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_AUTH_ON, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_PORT_AUTH_ON",
- "Authentication successful for base port: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_AUTH_OFF, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
- "BFA_AEN_PORT_AUTH_OFF",
- "Authentication unsuccessful for base port: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
- "BFA_AEN_PORT_DISCONNECT",
- "Base port (WWN = %s) lost fabric connectivity.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_QOS_NEG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_WARNING,
- "BFA_AEN_PORT_QOS_NEG",
- "QOS negotiation failed for base port: WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_FABRIC_NAME_CHANGE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_PORT_FABRIC_NAME_CHANGE",
- "Base port WWN = %s, Fabric WWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_PORT_SFP_ACCESS_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_ACCESS_ERROR",
- "SFP access error: WWN/MAC = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_AEN_PORT_SFP_UNSUPPORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_WARNING, "BFA_AEN_PORT_SFP_UNSUPPORT",
- "Unsupported SFP found: WWN/MAC = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-
-
-
-/* messages define for BFA_AEN_CAT_RPORT Module */
-{BFA_AEN_RPORT_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_RPORT_ONLINE",
- "Remote port (WWN = %s) online for logical port (WWN = %s).",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_RPORT_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_RPORT_OFFLINE",
- "Remote port (WWN = %s) offlined by logical port (WWN = %s).",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_RPORT_DISCONNECT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_ERROR, "BFA_AEN_RPORT_DISCONNECT",
- "Remote port (WWN = %s) connectivity lost for logical port (WWN = %s).",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) | 0), 2},
-
-{BFA_AEN_RPORT_QOS_PRIO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_RPORT_QOS_PRIO",
- "QOS priority changed to %s: RPWWN = %s and LPWWN = %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
-  (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
-
-{BFA_AEN_RPORT_QOS_FLOWID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "BFA_AEN_RPORT_QOS_FLOWID",
- "QOS flow ID changed to %d: RPWWN = %s and LPWWN = %s.",
- ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
-  (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
-
-
-
-
-/* messages define for FCS Module */
-{BFA_LOG_FCS_FABRIC_NOSWITCH, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "FCS_FABRIC_NOSWITCH",
- "No switched fabric presence is detected.",
- (0), 0},
-
-{BFA_LOG_FCS_FABRIC_ISOLATED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "FCS_FABRIC_ISOLATED",
- "Port is isolated due to VF_ID mismatch. PWWN: %s, Port VF_ID: %04x and"
- " switch port VF_ID: %04x.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_X << BFA_LOG_ARG1) |
-  (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
-
-
-
-
-/* messages define for HAL Module */
-{BFA_LOG_HAL_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
- "HAL_ASSERT",
- "Assertion failure: %s:%d: %s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
-  (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
-
-{BFA_LOG_HAL_HEARTBEAT_FAILURE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_CRITICAL, "HAL_HEARTBEAT_FAILURE",
- "Firmware heartbeat failure at %d",
- ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_HAL_FCPIM_PARM_INVALID, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "HAL_FCPIM_PARM_INVALID",
- "Driver configuration %s value %d is invalid. Value should be within"
- " %d and %d.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
-  (BFA_LOG_D << BFA_LOG_ARG2) | (BFA_LOG_D << BFA_LOG_ARG3) | 0), 4},
-
-{BFA_LOG_HAL_SM_ASSERT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_ERROR,
- "HAL_SM_ASSERT",
- "SM Assertion failure: %s:%d: event = %d",
- ((BFA_LOG_S << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
-  (BFA_LOG_D << BFA_LOG_ARG2) | 0), 3},
-
-{BFA_LOG_HAL_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "HAL_DRIVER_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_HAL_DRIVER_CONFIG_ERROR,
- BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "HAL_DRIVER_CONFIG_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_HAL_MBOX_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "HAL_MBOX_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-
-
-
-/* messages define for LINUX Module */
-{BFA_LOG_LINUX_DEVICE_CLAIMED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_DEVICE_CLAIMED",
- "bfa device at %s claimed.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_HASH_INIT_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_HASH_INIT_FAILED",
- "Hash table initialization failure for the port %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_SYSFS_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_SYSFS_FAILED",
- "sysfs file creation failure for the port %s.",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_MEM_ALLOC_FAILED, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_MEM_ALLOC_FAILED",
- "Memory allocation failed: %s.  ",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_DRIVER_REGISTRATION_FAILED,
- BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "LINUX_DRIVER_REGISTRATION_FAILED",
- "%s.  ",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_ITNIM_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "LINUX_ITNIM_FREE",
- "scsi%d: FCID: %s WWPN: %s",
- ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_S << BFA_LOG_ARG1) |
-  (BFA_LOG_S << BFA_LOG_ARG2) | 0), 3},
-
-{BFA_LOG_LINUX_ITNIM_ONLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_ITNIM_ONLINE",
- "Target: %d:0:%d FCID: %s WWPN: %s",
- ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
-  (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
-
-{BFA_LOG_LINUX_ITNIM_OFFLINE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_ITNIM_OFFLINE",
- "Target: %d:0:%d FCID: %s WWPN: %s",
- ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_D << BFA_LOG_ARG1) |
-  (BFA_LOG_S << BFA_LOG_ARG2) | (BFA_LOG_S << BFA_LOG_ARG3) | 0), 4},
-
-{BFA_LOG_LINUX_SCSI_HOST_FREE, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_SCSI_HOST_FREE",
- "Free scsi%d",
- ((BFA_LOG_D << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_SCSI_ABORT, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "LINUX_SCSI_ABORT",
- "scsi%d: abort cmnd %p, iotag %x",
- ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
-  (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
-
-{BFA_LOG_LINUX_SCSI_ABORT_COMP, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_SCSI_ABORT_COMP",
- "scsi%d: complete abort 0x%p, iotag 0x%x",
- ((BFA_LOG_D << BFA_LOG_ARG0) | (BFA_LOG_P << BFA_LOG_ARG1) |
-  (BFA_LOG_X << BFA_LOG_ARG2) | 0), 3},
-
-{BFA_LOG_LINUX_DRIVER_CONFIG_ERROR,
- BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "LINUX_DRIVER_CONFIG_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_BNA_STATE_MACHINE,
- BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "LINUX_BNA_STATE_MACHINE",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_IOC_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_IOC_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR,
- BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "LINUX_RESOURCE_ALLOC_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_RING_BUFFER_ERROR,
- BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG, BFA_LOG_INFO,
- "LINUX_RING_BUFFER_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_DRIVER_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_ERROR, "LINUX_DRIVER_ERROR",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_DRIVER_INFO, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_DRIVER_INFO",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_DRIVER_DIAG, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_DRIVER_DIAG",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-{BFA_LOG_LINUX_DRIVER_AEN, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "LINUX_DRIVER_AEN",
- "%s",
- ((BFA_LOG_S << BFA_LOG_ARG0) | 0), 1},
-
-
-
-
-/* messages define for WDRV Module */
-{BFA_LOG_WDRV_IOC_INIT_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "WDRV_IOC_INIT_ERROR",
- "IOC initialization has failed.",
- (0), 0},
-
-{BFA_LOG_WDRV_IOC_INTERNAL_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "WDRV_IOC_INTERNAL_ERROR",
- "IOC internal error.  ",
- (0), 0},
-
-{BFA_LOG_WDRV_IOC_START_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "WDRV_IOC_START_ERROR",
- "IOC could not be started.  ",
- (0), 0},
-
-{BFA_LOG_WDRV_IOC_STOP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "WDRV_IOC_STOP_ERROR",
- "IOC could not be stopped.  ",
- (0), 0},
-
-{BFA_LOG_WDRV_INSUFFICIENT_RESOURCES, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "WDRV_INSUFFICIENT_RESOURCES",
- "Insufficient memory.  ",
- (0), 0},
-
-{BFA_LOG_WDRV_BASE_ADDRESS_MAP_ERROR, BFA_LOG_ATTR_NONE | BFA_LOG_ATTR_LOG,
- BFA_LOG_INFO, "WDRV_BASE_ADDRESS_MAP_ERROR",
- "Unable to map the IOC onto the system address space.  ",
- (0), 0},
-
-
-{0, 0, 0, "", "", 0, 0},
-};

+ 0 - 892
drivers/scsi/bfa/bfa_lps.c

@@ -1,892 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include <bfi/bfi_lps.h>
-#include <cs/bfa_debug.h>
-#include <defs/bfa_defs_pci.h>
-
-BFA_TRC_FILE(HAL, LPS);
-BFA_MODULE(lps);
-
-#define BFA_LPS_MIN_LPORTS	(1)
-#define BFA_LPS_MAX_LPORTS	(256)
-
-/*
- * Maximum Vports supported per physical port or vf.
- */
-#define BFA_LPS_MAX_VPORTS_SUPP_CB  255
-#define BFA_LPS_MAX_VPORTS_SUPP_CT  190
-
-/**
- * forward declarations
- */
-static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
-			    u32 *dm_len);
-static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
-			   struct bfa_iocfc_cfg_s *cfg,
-			   struct bfa_meminfo_s *meminfo,
-			   struct bfa_pcidev_s *pcidev);
-static void bfa_lps_detach(struct bfa_s *bfa);
-static void bfa_lps_start(struct bfa_s *bfa);
-static void bfa_lps_stop(struct bfa_s *bfa);
-static void bfa_lps_iocdisable(struct bfa_s *bfa);
-static void bfa_lps_login_rsp(struct bfa_s *bfa,
-			      struct bfi_lps_login_rsp_s *rsp);
-static void bfa_lps_logout_rsp(struct bfa_s *bfa,
-			       struct bfi_lps_logout_rsp_s *rsp);
-static void bfa_lps_reqq_resume(void *lps_arg);
-static void bfa_lps_free(struct bfa_lps_s *lps);
-static void bfa_lps_send_login(struct bfa_lps_s *lps);
-static void bfa_lps_send_logout(struct bfa_lps_s *lps);
-static void bfa_lps_login_comp(struct bfa_lps_s *lps);
-static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
-static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
-
-/**
- *  lps_pvt BFA LPS private functions
- */
-
-enum bfa_lps_event {
-	BFA_LPS_SM_LOGIN	= 1,	/* login request from user	*/
-	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user	*/
-	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout	*/
-	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue	*/
-	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user		*/
-	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline		*/
-	BFA_LPS_SM_RX_CVL       = 7,	/* Rx clear virtual link        */
-};
-
-static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
-static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
-static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps,
-			enum bfa_lps_event event);
-static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
-static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
-static void bfa_lps_sm_logowait(struct bfa_lps_s *lps,
-			enum bfa_lps_event event);
-
-/**
- * Init state -- no login
- */
-static void
-bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
-{
-	bfa_trc(lps->bfa, lps->lp_tag);
-	bfa_trc(lps->bfa, event);
-
-	switch (event) {
-	case BFA_LPS_SM_LOGIN:
-		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
-			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
-			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
-		} else {
-			bfa_sm_set_state(lps, bfa_lps_sm_login);
-			bfa_lps_send_login(lps);
-		}
-		if (lps->fdisc)
-			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-			BFA_PL_EID_LOGIN, 0, "FDISC Request");
-		else
-			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-			BFA_PL_EID_LOGIN, 0, "FLOGI Request");
-		break;
-
-	case BFA_LPS_SM_LOGOUT:
-		bfa_lps_logout_comp(lps);
-		break;
-
-	case BFA_LPS_SM_DELETE:
-		bfa_lps_free(lps);
-		break;
-
-	case BFA_LPS_SM_RX_CVL:
-	case BFA_LPS_SM_OFFLINE:
-		break;
-
-	case BFA_LPS_SM_FWRSP:
-		/* Could happen when fabric detects loopback and discards
-		 * the lps request. Fw will eventually sent out the timeout
-		 * Just ignore
-		 */
-		break;
-
-	default:
-		bfa_sm_fault(lps->bfa, event);
-	}
-}
-
-/**
- * login is in progress -- awaiting response from firmware
- */
-static void
-bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
-{
-	bfa_trc(lps->bfa, lps->lp_tag);
-	bfa_trc(lps->bfa, event);
-
-	switch (event) {
-	case BFA_LPS_SM_FWRSP:
-		if (lps->status == BFA_STATUS_OK) {
-			bfa_sm_set_state(lps, bfa_lps_sm_online);
-			if (lps->fdisc)
-				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-				BFA_PL_EID_LOGIN, 0, "FDISC Accept");
-			else
-				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-				BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
-		} else {
-			bfa_sm_set_state(lps, bfa_lps_sm_init);
-			if (lps->fdisc)
-				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-				BFA_PL_EID_LOGIN, 0,
-				"FDISC Fail (RJT or timeout)");
-			else
-				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-				BFA_PL_EID_LOGIN, 0,
-				"FLOGI Fail (RJT or timeout)");
-		}
-		bfa_lps_login_comp(lps);
-		break;
-
-	case BFA_LPS_SM_OFFLINE:
-		bfa_sm_set_state(lps, bfa_lps_sm_init);
-		break;
-
-	default:
-		bfa_sm_fault(lps->bfa, event);
-	}
-}
-
-/**
- * login pending - awaiting space in request queue
- */
-static void
-bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
-{
-	bfa_trc(lps->bfa, lps->lp_tag);
-	bfa_trc(lps->bfa, event);
-
-	switch (event) {
-	case BFA_LPS_SM_RESUME:
-		bfa_sm_set_state(lps, bfa_lps_sm_login);
-		break;
-
-	case BFA_LPS_SM_OFFLINE:
-		bfa_sm_set_state(lps, bfa_lps_sm_init);
-		bfa_reqq_wcancel(&lps->wqe);
-		break;
-
-	case BFA_LPS_SM_RX_CVL:
-		/*
-		 * Login was not even sent out; so when getting out
-		 * of this state, it will appear like a login retry
-		 * after Clear virtual link
-		 */
-		break;
-
-	default:
-		bfa_sm_fault(lps->bfa, event);
-	}
-}
-
-/**
- * login complete
- */
-static void
-bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
-{
-	bfa_trc(lps->bfa, lps->lp_tag);
-	bfa_trc(lps->bfa, event);
-
-	switch (event) {
-	case BFA_LPS_SM_LOGOUT:
-		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
-			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
-			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
-		} else {
-			bfa_sm_set_state(lps, bfa_lps_sm_logout);
-			bfa_lps_send_logout(lps);
-		}
-		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-			BFA_PL_EID_LOGO, 0, "Logout");
-		break;
-
-	case BFA_LPS_SM_RX_CVL:
-		bfa_sm_set_state(lps, bfa_lps_sm_init);
-
-		/* Let the vport module know about this event */
-		bfa_lps_cvl_event(lps);
-		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
-			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
-		break;
-
-	case BFA_LPS_SM_OFFLINE:
-	case BFA_LPS_SM_DELETE:
-		bfa_sm_set_state(lps, bfa_lps_sm_init);
-		break;
-
-	default:
-		bfa_sm_fault(lps->bfa, event);
-	}
-}
-
-/**
- * logout in progress - awaiting firmware response
- */
-static void
-bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
-{
-	bfa_trc(lps->bfa, lps->lp_tag);
-	bfa_trc(lps->bfa, event);
-
-	switch (event) {
-	case BFA_LPS_SM_FWRSP:
-		bfa_sm_set_state(lps, bfa_lps_sm_init);
-		bfa_lps_logout_comp(lps);
-		break;
-
-	case BFA_LPS_SM_OFFLINE:
-		bfa_sm_set_state(lps, bfa_lps_sm_init);
-		break;
-
-	default:
-		bfa_sm_fault(lps->bfa, event);
-	}
-}
-
-/**
- * logout pending -- awaiting space in request queue
- */
-static void
-bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
-{
-	bfa_trc(lps->bfa, lps->lp_tag);
-	bfa_trc(lps->bfa, event);
-
-	switch (event) {
-	case BFA_LPS_SM_RESUME:
-		bfa_sm_set_state(lps, bfa_lps_sm_logout);
-		bfa_lps_send_logout(lps);
-		break;
-
-	case BFA_LPS_SM_OFFLINE:
-		bfa_sm_set_state(lps, bfa_lps_sm_init);
-		bfa_reqq_wcancel(&lps->wqe);
-		break;
-
-	default:
-		bfa_sm_fault(lps->bfa, event);
-	}
-}
-
-
-
-/**
- *  lps_pvt BFA LPS private functions
- */
-
-/**
- * return memory requirement
- */
-static void
-bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
-{
-	if (cfg->drvcfg.min_cfg)
-		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
-	else
-		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
-}
-
-/**
- * bfa module attach at initialization time
- */
-static void
-bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
-{
-	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
-	struct bfa_lps_s	*lps;
-	int			i;
-
-	bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
-	mod->num_lps = BFA_LPS_MAX_LPORTS;
-	if (cfg->drvcfg.min_cfg)
-		mod->num_lps = BFA_LPS_MIN_LPORTS;
-	else
-		mod->num_lps = BFA_LPS_MAX_LPORTS;
-	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
-
-	bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
-
-	INIT_LIST_HEAD(&mod->lps_free_q);
-	INIT_LIST_HEAD(&mod->lps_active_q);
-
-	for (i = 0; i < mod->num_lps; i++, lps++) {
-		lps->bfa	= bfa;
-		lps->lp_tag	= (u8) i;
-		lps->reqq	= BFA_REQQ_LPS;
-		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
-		list_add_tail(&lps->qe, &mod->lps_free_q);
-	}
-}
-
-static void
-bfa_lps_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_lps_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_lps_stop(struct bfa_s *bfa)
-{
-}
-
-/**
- * IOC in disabled state -- consider all lps offline
- */
-static void
-bfa_lps_iocdisable(struct bfa_s *bfa)
-{
-	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
-	struct bfa_lps_s	*lps;
-	struct list_head		*qe, *qen;
-
-	list_for_each_safe(qe, qen, &mod->lps_active_q) {
-		lps = (struct bfa_lps_s *) qe;
-		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
-	}
-}
-
-/**
- * Firmware login response
- */
-static void
-bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
-{
-	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
-	struct bfa_lps_s	*lps;
-
-	bfa_assert(rsp->lp_tag < mod->num_lps);
-	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
-
-	lps->status = rsp->status;
-	switch (rsp->status) {
-	case BFA_STATUS_OK:
-		lps->fport	= rsp->f_port;
-		lps->npiv_en	= rsp->npiv_en;
-		lps->lp_pid	= rsp->lp_pid;
-		lps->pr_bbcred	= bfa_os_ntohs(rsp->bb_credit);
-		lps->pr_pwwn	= rsp->port_name;
-		lps->pr_nwwn	= rsp->node_name;
-		lps->auth_req	= rsp->auth_req;
-		lps->lp_mac	= rsp->lp_mac;
-		lps->brcd_switch = rsp->brcd_switch;
-		lps->fcf_mac	= rsp->fcf_mac;
-
-		break;
-
-	case BFA_STATUS_FABRIC_RJT:
-		lps->lsrjt_rsn = rsp->lsrjt_rsn;
-		lps->lsrjt_expl = rsp->lsrjt_expl;
-
-		break;
-
-	case BFA_STATUS_EPROTOCOL:
-		lps->ext_status = rsp->ext_status;
-
-		break;
-
-	default:
-		/* Nothing to do with other status */
-		break;
-	}
-
-	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
-}
-
-/**
- * Firmware logout response
- */
-static void
-bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
-{
-	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
-	struct bfa_lps_s	*lps;
-
-	bfa_assert(rsp->lp_tag < mod->num_lps);
-	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
-
-	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
-}
-
-/**
- * Firmware received a Clear virtual link request (for FCoE)
- */
-static void
-bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
-{
-	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
-	struct bfa_lps_s        *lps;
-
-	lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
-
-	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
-}
-
-/**
- * Space is available in request queue, resume queueing request to firmware.
- */
-static void
-bfa_lps_reqq_resume(void *lps_arg)
-{
-	struct bfa_lps_s	*lps = lps_arg;
-
-	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
-}
-
-/**
- * lps is freed -- triggered by vport delete
- */
-static void
-bfa_lps_free(struct bfa_lps_s *lps)
-{
-	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
-
-	list_del(&lps->qe);
-	list_add_tail(&lps->qe, &mod->lps_free_q);
-}
-
-/**
- * send login request to firmware
- */
-static void
-bfa_lps_send_login(struct bfa_lps_s *lps)
-{
-	struct bfi_lps_login_req_s	*m;
-
-	m = bfa_reqq_next(lps->bfa, lps->reqq);
-	bfa_assert(m);
-
-	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
-			bfa_lpuid(lps->bfa));
-
-	m->lp_tag	= lps->lp_tag;
-	m->alpa		= lps->alpa;
-	m->pdu_size	= bfa_os_htons(lps->pdusz);
-	m->pwwn		= lps->pwwn;
-	m->nwwn		= lps->nwwn;
-	m->fdisc	= lps->fdisc;
-	m->auth_en	= lps->auth_en;
-
-	bfa_reqq_produce(lps->bfa, lps->reqq);
-}
-
-/**
- * send logout request to firmware
- */
-static void
-bfa_lps_send_logout(struct bfa_lps_s *lps)
-{
-	struct bfi_lps_logout_req_s *m;
-
-	m = bfa_reqq_next(lps->bfa, lps->reqq);
-	bfa_assert(m);
-
-	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
-			bfa_lpuid(lps->bfa));
-
-	m->lp_tag    = lps->lp_tag;
-	m->port_name = lps->pwwn;
-	bfa_reqq_produce(lps->bfa, lps->reqq);
-}
-
-/**
- * Indirect login completion handler for non-fcs
- */
-static void
-bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
-{
-	struct bfa_lps_s *lps	= arg;
-
-	if (!complete)
-		return;
-
-	if (lps->fdisc)
-		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
-	else
-		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
-}
-
-/**
- * Login completion handler -- direct call for fcs, queue for others
- */
-static void
-bfa_lps_login_comp(struct bfa_lps_s *lps)
-{
-	if (!lps->bfa->fcs) {
-		bfa_cb_queue(lps->bfa, &lps->hcb_qe,
-				bfa_lps_login_comp_cb, lps);
-		return;
-	}
-
-	if (lps->fdisc)
-		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
-	else
-		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
-}
-
-/**
- * Indirect logout completion handler for non-fcs
- */
-static void
-bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
-{
-	struct bfa_lps_s *lps	= arg;
-
-	if (!complete)
-		return;
-
-	if (lps->fdisc)
-		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
-	else
-		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
-}
-
-/**
- * Logout completion handler -- direct call for fcs, queue for others
- */
-static void
-bfa_lps_logout_comp(struct bfa_lps_s *lps)
-{
-	if (!lps->bfa->fcs) {
-		bfa_cb_queue(lps->bfa, &lps->hcb_qe,
-				bfa_lps_logout_comp_cb, lps);
-		return;
-	}
-	if (lps->fdisc)
-		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
-	else
-		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
-}
-
-/**
- * Clear virtual link completion handler for non-fcs
- */
-static void
-bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
-{
-	struct bfa_lps_s *lps   = arg;
-
-	if (!complete)
-		return;
-
-	/* Clear virtual link to base port will result in link down */
-	if (lps->fdisc)
-		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
-}
-
-/**
- * Received Clear virtual link event --direct call for fcs,
- * queue for others
- */
-static void
-bfa_lps_cvl_event(struct bfa_lps_s *lps)
-{
-	if (!lps->bfa->fcs) {
-		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
-				lps);
-		return;
-	}
-
-	/* Clear virtual link to base port will result in link down */
-	if (lps->fdisc)
-		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
-}
-
-u32
-bfa_lps_get_max_vport(struct bfa_s *bfa)
-{
-	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
-		return BFA_LPS_MAX_VPORTS_SUPP_CT;
-	else
-		return BFA_LPS_MAX_VPORTS_SUPP_CB;
-}
-
-/**
- *  lps_public BFA LPS public functions
- */
-
-/**
- * Allocate a lport srvice tag.
- */
-struct bfa_lps_s  *
-bfa_lps_alloc(struct bfa_s *bfa)
-{
-	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
-	struct bfa_lps_s	*lps = NULL;
-
-	bfa_q_deq(&mod->lps_free_q, &lps);
-
-	if (lps == NULL)
-		return NULL;
-
-	list_add_tail(&lps->qe, &mod->lps_active_q);
-
-	bfa_sm_set_state(lps, bfa_lps_sm_init);
-	return lps;
-}
-
-/**
- * Free lport service tag. This can be called anytime after an alloc.
- * No need to wait for any pending login/logout completions.
- */
-void
-bfa_lps_delete(struct bfa_lps_s *lps)
-{
-	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
-}
-
-/**
- * Initiate a lport login.
- */
-void
-bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
-	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
-{
-	lps->uarg	= uarg;
-	lps->alpa	= alpa;
-	lps->pdusz	= pdusz;
-	lps->pwwn	= pwwn;
-	lps->nwwn	= nwwn;
-	lps->fdisc	= BFA_FALSE;
-	lps->auth_en	= auth_en;
-	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
-}
-
-/**
- * Initiate a lport fdisc login.
- */
-void
-bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
-	wwn_t nwwn)
-{
-	lps->uarg	= uarg;
-	lps->alpa	= 0;
-	lps->pdusz	= pdusz;
-	lps->pwwn	= pwwn;
-	lps->nwwn	= nwwn;
-	lps->fdisc	= BFA_TRUE;
-	lps->auth_en	= BFA_FALSE;
-	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
-}
-
-/**
- * Initiate a lport logout (flogi).
- */
-void
-bfa_lps_flogo(struct bfa_lps_s *lps)
-{
-	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
-}
-
-/**
- * Initiate a lport FDSIC logout.
- */
-void
-bfa_lps_fdisclogo(struct bfa_lps_s *lps)
-{
-	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
-}
-
-/**
- * Discard a pending login request -- should be called only for
- * link down handling.
- */
-void
-bfa_lps_discard(struct bfa_lps_s *lps)
-{
-	bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
-}
-
-/**
- * Return lport services tag
- */
-u8
-bfa_lps_get_tag(struct bfa_lps_s *lps)
-{
-	return lps->lp_tag;
-}
-
-/**
- * Return lport services tag given the pid
- */
-u8
-bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
-{
-	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
-	struct bfa_lps_s	*lps;
-	int			i;
-
-	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
-		if (lps->lp_pid == pid)
-			return lps->lp_tag;
-	}
-
-	/* Return base port tag anyway */
-	return 0;
-}
-
-/**
- * return if fabric login indicates support for NPIV
- */
-bfa_boolean_t
-bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
-{
-	return lps->npiv_en;
-}
-
-/**
- * Return TRUE if attached to F-Port, else return FALSE
- */
-bfa_boolean_t
-bfa_lps_is_fport(struct bfa_lps_s *lps)
-{
-	return lps->fport;
-}
-
-/**
- * Return TRUE if attached to a Brocade Fabric
- */
-bfa_boolean_t
-bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
-{
-	return lps->brcd_switch;
-}
-/**
- * return TRUE if authentication is required
- */
-bfa_boolean_t
-bfa_lps_is_authreq(struct bfa_lps_s *lps)
-{
-	return lps->auth_req;
-}
-
-bfa_eproto_status_t
-bfa_lps_get_extstatus(struct bfa_lps_s *lps)
-{
-	return lps->ext_status;
-}
-
-/**
- * return port id assigned to the lport
- */
-u32
-bfa_lps_get_pid(struct bfa_lps_s *lps)
-{
-	return lps->lp_pid;
-}
-
-/**
- * Return bb_credit assigned in FLOGI response
- */
-u16
-bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
-{
-	return lps->pr_bbcred;
-}
-
-/**
- * Return peer port name
- */
-wwn_t
-bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
-{
-	return lps->pr_pwwn;
-}
-
-/**
- * Return peer node name
- */
-wwn_t
-bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
-{
-	return lps->pr_nwwn;
-}
-
-/**
- * return reason code if login request is rejected
- */
-u8
-bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
-{
-	return lps->lsrjt_rsn;
-}
-
-/**
- * return explanation code if login request is rejected
- */
-u8
-bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
-{
-	return lps->lsrjt_expl;
-}
-
-/**
- * Return fpma/spma MAC for lport
- */
-struct mac_s
-bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
-{
-	return lps->lp_mac;
-}
-
-/**
- * LPS firmware message class handler.
- */
-void
-bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-{
-	union bfi_lps_i2h_msg_u	msg;
-
-	bfa_trc(bfa, m->mhdr.msg_id);
-	msg.msg = m;
-
-	switch (m->mhdr.msg_id) {
-	case BFI_LPS_H2I_LOGIN_RSP:
-		bfa_lps_login_rsp(bfa, msg.login_rsp);
-		break;
-
-	case BFI_LPS_H2I_LOGOUT_RSP:
-		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
-		break;
-
-	case BFI_LPS_H2I_CVL_EVENT:
-		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
-		break;
-
-	default:
-		bfa_trc(bfa, m->mhdr.msg_id);
-		bfa_assert(0);
-	}
-}
-
-

+ 0 - 38
drivers/scsi/bfa/bfa_lps_priv.h

@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_LPS_PRIV_H__
-#define __BFA_LPS_PRIV_H__
-
-#include <bfa_svc.h>
-
-struct bfa_lps_mod_s {
-	struct list_head		lps_free_q;
-	struct list_head		lps_active_q;
-	struct bfa_lps_s	*lps_arr;
-	int			num_lps;
-};
-
-#define BFA_LPS_MOD(__bfa)		(&(__bfa)->modules.lps_mod)
-#define BFA_LPS_FROM_TAG(__mod, __tag)	(&(__mod)->lps_arr[__tag])
-
-/*
- * external functions
- */
-void	bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-#endif /* __BFA_LPS_PRIV_H__ */

+ 42 - 22
drivers/scsi/bfa/bfa_priv.h → drivers/scsi/bfa/bfa_modules.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,26 +15,52 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#ifndef __BFA_PRIV_H__
-#define __BFA_PRIV_H__
+/**
+ *  bfa_modules.h BFA modules
+ */
+
+#ifndef __BFA_MODULES_H__
+#define __BFA_MODULES_H__
+
+#include "bfa_cs.h"
+#include "bfa.h"
+#include "bfa_svc.h"
+#include "bfa_fcpim.h"
+#include "bfa_port.h"
+
+struct bfa_modules_s {
+	struct bfa_fcport_s	fcport;		/*  fc port module	      */
+	struct bfa_fcxp_mod_s	fcxp_mod;	/*  fcxp module	      */
+	struct bfa_lps_mod_s	lps_mod;	/*  fcxp module	      */
+	struct bfa_uf_mod_s	uf_mod;		/*  unsolicited frame module */
+	struct bfa_rport_mod_s	rport_mod;	/*  remote port module	      */
+	struct bfa_fcpim_mod_s	fcpim_mod;	/*  FCP initiator module     */
+	struct bfa_sgpg_mod_s	sgpg_mod;	/*  SG page module	      */
+	struct bfa_port_s	port;		/*  Physical port module     */
+};
+
+/*
+ * !!! Only append to the enums defined here to avoid any versioning
+ * !!! needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_HAL_CORE	= 1,
+	BFA_TRC_HAL_FCXP	= 2,
+	BFA_TRC_HAL_FCPIM	= 3,
+	BFA_TRC_HAL_IOCFC_CT	= 4,
+	BFA_TRC_HAL_IOCFC_CB	= 5,
+};
 
 
-#include "bfa_iocfc.h"
-#include "bfa_intr_priv.h"
-#include "bfa_trcmod_priv.h"
-#include "bfa_modules_priv.h"
-#include "bfa_fwimg_priv.h"
-#include <cs/bfa_log.h>
-#include <bfa_timer.h>
 
 
 /**
 /**
  * Macro to define a new BFA module
  * Macro to define a new BFA module
  */
  */
-#define BFA_MODULE(__mod) 						\
+#define BFA_MODULE(__mod)						\
 	static void bfa_ ## __mod ## _meminfo(				\
 	static void bfa_ ## __mod ## _meminfo(				\
 			struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,	\
 			struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,	\
 			u32 *dm_len);      \
 			u32 *dm_len);      \
 	static void bfa_ ## __mod ## _attach(struct bfa_s *bfa,		\
 	static void bfa_ ## __mod ## _attach(struct bfa_s *bfa,		\
-			void *bfad, struct bfa_iocfc_cfg_s *cfg, 	\
+			void *bfad, struct bfa_iocfc_cfg_s *cfg,	\
 			struct bfa_meminfo_s *meminfo,			\
 			struct bfa_meminfo_s *meminfo,			\
 			struct bfa_pcidev_s *pcidev);      \
 			struct bfa_pcidev_s *pcidev);      \
 	static void bfa_ ## __mod ## _detach(struct bfa_s *bfa);      \
 	static void bfa_ ## __mod ## _detach(struct bfa_s *bfa);      \
@@ -77,17 +103,15 @@ extern struct bfa_module_s *hal_mods[];
 
 
 struct bfa_s {
 struct bfa_s {
 	void			*bfad;		/*  BFA driver instance    */
 	void			*bfad;		/*  BFA driver instance    */
-	struct bfa_aen_s	*aen;		/*  AEN module		    */
 	struct bfa_plog_s	*plog;		/*  portlog buffer	    */
 	struct bfa_plog_s	*plog;		/*  portlog buffer	    */
-	struct bfa_log_mod_s	*logm;		/*  driver logging modulen */
 	struct bfa_trc_mod_s	*trcmod;	/*  driver tracing	    */
 	struct bfa_trc_mod_s	*trcmod;	/*  driver tracing	    */
 	struct bfa_ioc_s	ioc;		/*  IOC module		    */
 	struct bfa_ioc_s	ioc;		/*  IOC module		    */
 	struct bfa_iocfc_s	iocfc;		/*  IOCFC module	    */
 	struct bfa_iocfc_s	iocfc;		/*  IOCFC module	    */
 	struct bfa_timer_mod_s	timer_mod;	/*  timer module	    */
 	struct bfa_timer_mod_s	timer_mod;	/*  timer module	    */
 	struct bfa_modules_s	modules;	/*  BFA modules	    */
 	struct bfa_modules_s	modules;	/*  BFA modules	    */
-	struct list_head	comp_q;		/*  pending completions    */
-	bfa_boolean_t		rme_process;	/*  RME processing enabled */
-	struct list_head		reqq_waitq[BFI_IOC_MAX_CQS];
+	struct list_head	comp_q;		/*  pending completions     */
+	bfa_boolean_t		rme_process;	/*  RME processing enabled  */
+	struct list_head	reqq_waitq[BFI_IOC_MAX_CQS];
 	bfa_boolean_t		fcs;		/*  FCS is attached to BFA */
 	bfa_boolean_t		fcs;		/*  FCS is attached to BFA */
 	struct bfa_msix_s	msix;
 	struct bfa_msix_s	msix;
 };
 };
@@ -95,8 +119,6 @@ struct bfa_s {
 extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
 extern bfa_isr_func_t bfa_isrs[BFI_MC_MAX];
 extern bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[];
 extern bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[];
 extern bfa_boolean_t bfa_auto_recover;
 extern bfa_boolean_t bfa_auto_recover;
-extern struct bfa_module_s hal_mod_flash;
-extern struct bfa_module_s hal_mod_fcdiag;
 extern struct bfa_module_s hal_mod_sgpg;
 extern struct bfa_module_s hal_mod_sgpg;
 extern struct bfa_module_s hal_mod_fcport;
 extern struct bfa_module_s hal_mod_fcport;
 extern struct bfa_module_s hal_mod_fcxp;
 extern struct bfa_module_s hal_mod_fcxp;
@@ -104,7 +126,5 @@ extern struct bfa_module_s hal_mod_lps;
 extern struct bfa_module_s hal_mod_uf;
 extern struct bfa_module_s hal_mod_uf;
 extern struct bfa_module_s hal_mod_rport;
 extern struct bfa_module_s hal_mod_rport;
 extern struct bfa_module_s hal_mod_fcpim;
 extern struct bfa_module_s hal_mod_fcpim;
-extern struct bfa_module_s hal_mod_pbind;
-
-#endif /* __BFA_PRIV_H__ */
 
 
+#endif /* __BFA_MODULES_H__ */

+ 0 - 43
drivers/scsi/bfa/bfa_modules_priv.h

@@ -1,43 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_MODULES_PRIV_H__
-#define __BFA_MODULES_PRIV_H__
-
-#include "bfa_uf_priv.h"
-#include "bfa_port_priv.h"
-#include "bfa_rport_priv.h"
-#include "bfa_fcxp_priv.h"
-#include "bfa_lps_priv.h"
-#include "bfa_fcpim_priv.h"
-#include <cee/bfa_cee.h>
-#include <port/bfa_port.h>
-
-
-struct bfa_modules_s {
-	struct bfa_fcport_s	fcport;	/*  fc port module	*/
-	struct bfa_fcxp_mod_s fcxp_mod; /*  fcxp module		*/
-	struct bfa_lps_mod_s lps_mod;   /*  fcxp module		*/
-	struct bfa_uf_mod_s uf_mod;	/*  unsolicited frame module	*/
-	struct bfa_rport_mod_s rport_mod; /*  remote port module	*/
-	struct bfa_fcpim_mod_s fcpim_mod; /*  FCP initiator module	*/
-	struct bfa_sgpg_mod_s sgpg_mod; /*  SG page module		*/
-	struct bfa_cee_s cee;   	/*  CEE Module                 */
-	struct bfa_port_s port;		/*  Physical port module	*/
-};
-
-#endif /* __BFA_MODULES_PRIV_H__ */

+ 57 - 87
drivers/scsi/bfa/bfa_os_inc.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -22,30 +22,20 @@
 #ifndef __BFA_OS_INC_H__
 #ifndef __BFA_OS_INC_H__
 #define __BFA_OS_INC_H__
 #define __BFA_OS_INC_H__
 
 
-#ifndef __KERNEL__
-#include <stdint.h>
-#else
 #include <linux/types.h>
 #include <linux/types.h>
-
 #include <linux/version.h>
 #include <linux/version.h>
 #include <linux/pci.h>
 #include <linux/pci.h>
-
 #include <linux/dma-mapping.h>
 #include <linux/dma-mapping.h>
-#define SET_MODULE_VERSION(VER)
-
 #include <linux/idr.h>
 #include <linux/idr.h>
-
 #include <linux/interrupt.h>
 #include <linux/interrupt.h>
 #include <linux/cdev.h>
 #include <linux/cdev.h>
 #include <linux/fs.h>
 #include <linux/fs.h>
 #include <linux/delay.h>
 #include <linux/delay.h>
 #include <linux/vmalloc.h>
 #include <linux/vmalloc.h>
-
 #include <linux/workqueue.h>
 #include <linux/workqueue.h>
-
+#include <linux/bitops.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_host.h>
-
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_transport.h>
@@ -54,97 +44,75 @@
 #define __BIGENDIAN
 #define __BIGENDIAN
 #endif
 #endif
 
 
-#define BFA_ERR			KERN_ERR
-#define BFA_WARNING		KERN_WARNING
-#define BFA_NOTICE		KERN_NOTICE
-#define BFA_INFO		KERN_INFO
-#define BFA_DEBUG		KERN_DEBUG
-
-#define LOG_BFAD_INIT		0x00000001
-#define LOG_FCP_IO		0x00000002
-
-#ifdef DEBUG
-#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...)			\
-		BFA_LOG(bfad, level, mask, fmt, ## arg)
-#define BFA_DEV_TRACE(bfad, level, fmt, arg...)				\
-		BFA_DEV_PRINTF(bfad, level, fmt, ## arg)
-#define BFA_TRACE(level, fmt, arg...)					\
-		BFA_PRINTF(level, fmt, ## arg)
-#else
-#define BFA_LOG_TRACE(bfad, level, mask, fmt, arg...)
-#define BFA_DEV_TRACE(bfad, level, fmt, arg...)
-#define BFA_TRACE(level, fmt, arg...)
-#endif
+static inline u64 bfa_os_get_clock(void)
+{
+	return jiffies;
+}
+
+static inline u64 bfa_os_get_log_time(void)
+{
+	u64 system_time = 0;
+	struct timeval tv;
+	do_gettimeofday(&tv);
+
+	/* We are interested in seconds only. */
+	system_time = tv.tv_sec;
+	return system_time;
+}
+
+#define bfa_io_lat_clock_res_div HZ
+#define bfa_io_lat_clock_res_mul 1000
 
 
 #define BFA_ASSERT(p) do {						\
 #define BFA_ASSERT(p) do {						\
 	if (!(p)) {      \
 	if (!(p)) {      \
 		printk(KERN_ERR "assert(%s) failed at %s:%d\n",		\
 		printk(KERN_ERR "assert(%s) failed at %s:%d\n",		\
 		#p, __FILE__, __LINE__);      \
 		#p, __FILE__, __LINE__);      \
-		BUG();      \
 	}								\
 	}								\
 } while (0)
 } while (0)
 
 
-
-#define BFA_LOG(bfad, level, mask, fmt, arg...)				\
-do { 									\
-	if (((mask) & (((struct bfad_s *)(bfad))->			\
-		cfg_data[cfg_log_mask])) || (level[1] <= '3'))		\
-		dev_printk(level, &(((struct bfad_s *)			\
-			(bfad))->pcidev->dev), fmt, ##arg);      \
+#define BFA_LOG(level, bfad, mask, fmt, arg...)				\
+do {									\
+	if (((mask) == 4) || (level[1] <= '4'))				\
+		dev_printk(level, &((bfad)->pcidev)->dev, fmt, ##arg);	\
 } while (0)
 } while (0)
 
 
-#ifndef BFA_DEV_PRINTF
-#define BFA_DEV_PRINTF(bfad, level, fmt, arg...)			\
-		dev_printk(level, &(((struct bfad_s *)			\
-			(bfad))->pcidev->dev), fmt, ##arg);
-#endif
-
-#define BFA_PRINTF(level, fmt, arg...)					\
-		printk(level fmt, ##arg);
-
-int bfa_os_MWB(void *);
-
-#define bfa_os_mmiowb()		mmiowb()
-
 #define bfa_swap_3b(_x)				\
 #define bfa_swap_3b(_x)				\
 	((((_x) & 0xff) << 16) |		\
 	((((_x) & 0xff) << 16) |		\
 	((_x) & 0x00ff00) |			\
 	((_x) & 0x00ff00) |			\
 	(((_x) & 0xff0000) >> 16))
 	(((_x) & 0xff0000) >> 16))
 
 
-#define bfa_swap_8b(_x) 				\
-     ((((_x) & 0xff00000000000000ull) >> 56)		\
-      | (((_x) & 0x00ff000000000000ull) >> 40)		\
-      | (((_x) & 0x0000ff0000000000ull) >> 24)		\
-      | (((_x) & 0x000000ff00000000ull) >> 8)		\
-      | (((_x) & 0x00000000ff000000ull) << 8)		\
-      | (((_x) & 0x0000000000ff0000ull) << 24)		\
-      | (((_x) & 0x000000000000ff00ull) << 40)		\
-      | (((_x) & 0x00000000000000ffull) << 56))
-
-#define bfa_os_swap32(_x) 			\
-	((((_x) & 0xff) << 24) 		|	\
+#define bfa_swap_8b(_x)					\
+	((((_x) & 0xff00000000000000ull) >> 56)		\
+	 | (((_x) & 0x00ff000000000000ull) >> 40)	\
+	 | (((_x) & 0x0000ff0000000000ull) >> 24)	\
+	 | (((_x) & 0x000000ff00000000ull) >> 8)	\
+	 | (((_x) & 0x00000000ff000000ull) << 8)	\
+	 | (((_x) & 0x0000000000ff0000ull) << 24)	\
+	 | (((_x) & 0x000000000000ff00ull) << 40)	\
+	 | (((_x) & 0x00000000000000ffull) << 56))
+
+#define bfa_os_swap32(_x)			\
+	((((_x) & 0xff) << 24)		|	\
 	(((_x) & 0x0000ff00) << 8)	|	\
 	(((_x) & 0x0000ff00) << 8)	|	\
 	(((_x) & 0x00ff0000) >> 8)	|	\
 	(((_x) & 0x00ff0000) >> 8)	|	\
 	(((_x) & 0xff000000) >> 24))
 	(((_x) & 0xff000000) >> 24))
 
 
-#define bfa_os_swap_sgaddr(_x)	((u64)(					\
-	(((u64)(_x) & (u64)0x00000000000000ffull) << 32)	|	\
-	(((u64)(_x) & (u64)0x000000000000ff00ull) << 32)	|	\
-	(((u64)(_x) & (u64)0x0000000000ff0000ull) << 32)	|	\
-	(((u64)(_x) & (u64)0x00000000ff000000ull) << 32)	|	\
-	(((u64)(_x) & (u64)0x000000ff00000000ull) >> 32)	|	\
-	(((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32)	|	\
-	(((u64)(_x) & (u64)0x00ff000000000000ull) >> 32)	|	\
+#define bfa_os_swap_sgaddr(_x)  ((u64)(                                 \
+	(((u64)(_x) & (u64)0x00000000000000ffull) << 32)        |       \
+	(((u64)(_x) & (u64)0x000000000000ff00ull) << 32)        |       \
+	(((u64)(_x) & (u64)0x0000000000ff0000ull) << 32)        |       \
+	(((u64)(_x) & (u64)0x00000000ff000000ull) << 32)        |       \
+	(((u64)(_x) & (u64)0x000000ff00000000ull) >> 32)        |       \
+	(((u64)(_x) & (u64)0x0000ff0000000000ull) >> 32)        |       \
+	(((u64)(_x) & (u64)0x00ff000000000000ull) >> 32)        |       \
 	(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
 	(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
 
 
 #ifndef __BIGENDIAN
 #ifndef __BIGENDIAN
 #define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
 #define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
 				 (((_x) & 0x00ff) << 8)))
 				 (((_x) & 0x00ff) << 8)))
-
 #define bfa_os_htonl(_x)	bfa_os_swap32(_x)
 #define bfa_os_htonl(_x)	bfa_os_swap32(_x)
 #define bfa_os_htonll(_x)	bfa_swap_8b(_x)
 #define bfa_os_htonll(_x)	bfa_swap_8b(_x)
 #define bfa_os_hton3b(_x)	bfa_swap_3b(_x)
 #define bfa_os_hton3b(_x)	bfa_swap_3b(_x)
-
 #define bfa_os_wtole(_x)   (_x)
 #define bfa_os_wtole(_x)   (_x)
 #define bfa_os_sgaddr(_x)  (_x)
 #define bfa_os_sgaddr(_x)  (_x)
 
 
@@ -170,17 +138,16 @@ int bfa_os_MWB(void *);
 #define bfa_os_memcpy	memcpy
 #define bfa_os_memcpy	memcpy
 #define bfa_os_udelay	udelay
 #define bfa_os_udelay	udelay
 #define bfa_os_vsprintf vsprintf
 #define bfa_os_vsprintf vsprintf
+#define bfa_os_snprintf snprintf
 
 
 #define bfa_os_assign(__t, __s) __t = __s
 #define bfa_os_assign(__t, __s) __t = __s
-
-#define bfa_os_addr_t char __iomem *
-#define bfa_os_panic()
+#define bfa_os_addr_t void __iomem *
 
 
 #define bfa_os_reg_read(_raddr) readl(_raddr)
 #define bfa_os_reg_read(_raddr) readl(_raddr)
 #define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
 #define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
-#define bfa_os_mem_read(_raddr, _off)                                   \
+#define bfa_os_mem_read(_raddr, _off)					\
 	bfa_os_swap32(readl(((_raddr) + (_off))))
 	bfa_os_swap32(readl(((_raddr) + (_off))))
-#define bfa_os_mem_write(_raddr, _off, _val)                            \
+#define bfa_os_mem_write(_raddr, _off, _val)				\
 	writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
 	writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
 
 
 #define BFA_TRC_TS(_trcm)						\
 #define BFA_TRC_TS(_trcm)						\
@@ -191,11 +158,6 @@ int bfa_os_MWB(void *);
 				(tv.tv_sec*1000000+tv.tv_usec);      \
 				(tv.tv_sec*1000000+tv.tv_usec);      \
 			 })
 			 })
 
 
-struct bfa_log_mod_s;
-void bfa_os_printf(struct bfa_log_mod_s *log_mod, u32 msg_id,
-			const char *fmt, ...);
-#endif
-
 #define boolean_t int
 #define boolean_t int
 
 
 /**
 /**
@@ -206,7 +168,15 @@ struct bfa_timeval_s {
 	u32	tv_usec;	/*  microseconds   */
 	u32	tv_usec;	/*  microseconds   */
 };
 };
 
 
-void bfa_os_gettimeofday(struct bfa_timeval_s *tv);
+static inline void
+bfa_os_gettimeofday(struct bfa_timeval_s *tv)
+{
+	struct timeval  tmp_tv;
+
+	do_gettimeofday(&tmp_tv);
+	tv->tv_sec = (u32) tmp_tv.tv_sec;
+	tv->tv_usec = (u32) tmp_tv.tv_usec;
+}
 
 
 static inline void
 static inline void
 wwn2str(char *wwn_str, u64 wwn)
 wwn2str(char *wwn_str, u64 wwn)

+ 56 - 64
drivers/scsi/bfa/include/cs/bfa_plog.h → drivers/scsi/bfa/bfa_plog.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -17,8 +17,8 @@
 #ifndef __BFA_PORTLOG_H__
 #ifndef __BFA_PORTLOG_H__
 #define __BFA_PORTLOG_H__
 #define __BFA_PORTLOG_H__
 
 
-#include "protocol/fc.h"
-#include <defs/bfa_defs_types.h>
+#include "bfa_fc.h"
+#include "bfa_defs.h"
 
 
 #define BFA_PL_NLOG_ENTS 256
 #define BFA_PL_NLOG_ENTS 256
 #define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
 #define BFA_PL_LOG_REC_INCR(_x) ((_x)++, (_x) %= BFA_PL_NLOG_ENTS)
@@ -27,38 +27,30 @@
 #define BFA_PL_INT_LOG_SZ      8    /* number of integers in the integer log */
 #define BFA_PL_INT_LOG_SZ      8    /* number of integers in the integer log */
 
 
 enum bfa_plog_log_type {
 enum bfa_plog_log_type {
-	BFA_PL_LOG_TYPE_INVALID = 0,
-	BFA_PL_LOG_TYPE_INT 	= 1,
-	BFA_PL_LOG_TYPE_STRING 	= 2,
+	BFA_PL_LOG_TYPE_INVALID	= 0,
+	BFA_PL_LOG_TYPE_INT	= 1,
+	BFA_PL_LOG_TYPE_STRING	= 2,
 };
 };
 
 
 /*
 /*
  * the (fixed size) record format for each entry in the portlog
  * the (fixed size) record format for each entry in the portlog
  */
  */
 struct bfa_plog_rec_s {
 struct bfa_plog_rec_s {
-	u32        tv;	/* Filled by the portlog driver when the *
-				 * entry is added to the circular log.   */
-	u8         port;	/* Source port that logged this entry. CM
-				 * entities will use 0xFF */
-	u8         mid;	/* Integer value to be used by all entities *
-				 * while logging. The module id to string   *
-				 * conversion will be done by BFAL. See
-				 * enum bfa_plog_mid */
-	u8         eid;	/* indicates Rx, Tx, IOCTL, etc. See
-				 * enum bfa_plog_eid */
-	u8         log_type; /* indicates string log or integer log.
-				   * see bfa_plog_log_type_t */
-	u8         log_num_ints;
+	u64	tv;	/* timestamp */
+	u8	 port;	/* Source port that logged this entry */
+	u8	 mid;	/* module id */
+	u8	 eid;	/* indicates Rx, Tx, IOCTL, etc.  bfa_plog_eid */
+	u8	 log_type; /* string/integer log, bfa_plog_log_type_t */
+	u8	 log_num_ints;
 	/*
 	/*
 	 * interpreted only if log_type is INT_LOG. indicates number of
 	 * interpreted only if log_type is INT_LOG. indicates number of
 	 * integers in the int_log[] (0-PL_INT_LOG_SZ).
 	 * integers in the int_log[] (0-PL_INT_LOG_SZ).
 	 */
 	 */
-	u8         rsvd;
-	u16        misc;	/* can be used to indicate fc frame length,
-				 *etc.. */
+	u8	 rsvd;
+	u16	misc;	/* can be used to indicate fc frame length */
 	union {
 	union {
-		char            string_log[BFA_PL_STRING_LOG_SZ];
-		u32        int_log[BFA_PL_INT_LOG_SZ];
+		char	    string_log[BFA_PL_STRING_LOG_SZ];
+		u32	int_log[BFA_PL_INT_LOG_SZ];
 	} log_entry;
 	} log_entry;
 
 
 };
 };
@@ -73,20 +65,20 @@ struct bfa_plog_rec_s {
  *  - Do not remove any entry or rearrange the order.
  *  - Do not remove any entry or rearrange the order.
  */
  */
 enum bfa_plog_mid {
 enum bfa_plog_mid {
-	BFA_PL_MID_INVALID 	= 0,
-	BFA_PL_MID_DEBUG 	= 1,
-	BFA_PL_MID_DRVR 	= 2,
-	BFA_PL_MID_HAL 		= 3,
-	BFA_PL_MID_HAL_FCXP 	= 4,
-	BFA_PL_MID_HAL_UF 	= 5,
-	BFA_PL_MID_FCS 		= 6,
+	BFA_PL_MID_INVALID	= 0,
+	BFA_PL_MID_DEBUG	= 1,
+	BFA_PL_MID_DRVR		= 2,
+	BFA_PL_MID_HAL		= 3,
+	BFA_PL_MID_HAL_FCXP	= 4,
+	BFA_PL_MID_HAL_UF	= 5,
+	BFA_PL_MID_FCS		= 6,
 	BFA_PL_MID_LPS		= 7,
 	BFA_PL_MID_LPS		= 7,
-	BFA_PL_MID_MAX 		= 8
+	BFA_PL_MID_MAX		= 8
 };
 };
 
 
 #define BFA_PL_MID_STRLEN    8
 #define BFA_PL_MID_STRLEN    8
 struct bfa_plog_mid_strings_s {
 struct bfa_plog_mid_strings_s {
-	char            m_str[BFA_PL_MID_STRLEN];
+	char	    m_str[BFA_PL_MID_STRLEN];
 };
 };
 
 
 /*
 /*
@@ -99,36 +91,37 @@ struct bfa_plog_mid_strings_s {
  *  - Do not remove any entry or rearrange the order.
  *  - Do not remove any entry or rearrange the order.
  */
  */
 enum bfa_plog_eid {
 enum bfa_plog_eid {
-	BFA_PL_EID_INVALID 		= 0,
-	BFA_PL_EID_IOC_DISABLE 		= 1,
-	BFA_PL_EID_IOC_ENABLE 		= 2,
-	BFA_PL_EID_PORT_DISABLE 	= 3,
-	BFA_PL_EID_PORT_ENABLE 		= 4,
-	BFA_PL_EID_PORT_ST_CHANGE 	= 5,
-	BFA_PL_EID_TX 			= 6,
-	BFA_PL_EID_TX_ACK1 		= 7,
-	BFA_PL_EID_TX_RJT 		= 8,
-	BFA_PL_EID_TX_BSY 		= 9,
-	BFA_PL_EID_RX 			= 10,
-	BFA_PL_EID_RX_ACK1 		= 11,
-	BFA_PL_EID_RX_RJT 		= 12,
-	BFA_PL_EID_RX_BSY 		= 13,
-	BFA_PL_EID_CT_IN 		= 14,
-	BFA_PL_EID_CT_OUT 		= 15,
-	BFA_PL_EID_DRIVER_START 	= 16,
-	BFA_PL_EID_RSCN 		= 17,
-	BFA_PL_EID_DEBUG 		= 18,
-	BFA_PL_EID_MISC 		= 19,
+	BFA_PL_EID_INVALID		= 0,
+	BFA_PL_EID_IOC_DISABLE		= 1,
+	BFA_PL_EID_IOC_ENABLE		= 2,
+	BFA_PL_EID_PORT_DISABLE		= 3,
+	BFA_PL_EID_PORT_ENABLE		= 4,
+	BFA_PL_EID_PORT_ST_CHANGE	= 5,
+	BFA_PL_EID_TX			= 6,
+	BFA_PL_EID_TX_ACK1		= 7,
+	BFA_PL_EID_TX_RJT		= 8,
+	BFA_PL_EID_TX_BSY		= 9,
+	BFA_PL_EID_RX			= 10,
+	BFA_PL_EID_RX_ACK1		= 11,
+	BFA_PL_EID_RX_RJT		= 12,
+	BFA_PL_EID_RX_BSY		= 13,
+	BFA_PL_EID_CT_IN		= 14,
+	BFA_PL_EID_CT_OUT		= 15,
+	BFA_PL_EID_DRIVER_START		= 16,
+	BFA_PL_EID_RSCN			= 17,
+	BFA_PL_EID_DEBUG		= 18,
+	BFA_PL_EID_MISC			= 19,
 	BFA_PL_EID_FIP_FCF_DISC		= 20,
 	BFA_PL_EID_FIP_FCF_DISC		= 20,
 	BFA_PL_EID_FIP_FCF_CVL		= 21,
 	BFA_PL_EID_FIP_FCF_CVL		= 21,
 	BFA_PL_EID_LOGIN		= 22,
 	BFA_PL_EID_LOGIN		= 22,
 	BFA_PL_EID_LOGO			= 23,
 	BFA_PL_EID_LOGO			= 23,
-	BFA_PL_EID_MAX			= 24
+	BFA_PL_EID_TRUNK_SCN		= 24,
+	BFA_PL_EID_MAX
 };
 };
 
 
-#define BFA_PL_ENAME_STRLEN    	8
+#define BFA_PL_ENAME_STRLEN	8
 struct bfa_plog_eid_strings_s {
 struct bfa_plog_eid_strings_s {
-	char            e_str[BFA_PL_ENAME_STRLEN];
+	char	    e_str[BFA_PL_ENAME_STRLEN];
 };
 };
 
 
 #define BFA_PL_SIG_LEN	8
 #define BFA_PL_SIG_LEN	8
@@ -138,12 +131,12 @@ struct bfa_plog_eid_strings_s {
  * per port circular log buffer
  * per port circular log buffer
  */
  */
 struct bfa_plog_s {
 struct bfa_plog_s {
-	char            plog_sig[BFA_PL_SIG_LEN];	/* Start signature */
-	u8         plog_enabled;
-	u8         rsvd[7];
-	u32        ticks;
-	u16        head;
-	u16        tail;
+	char	    plog_sig[BFA_PL_SIG_LEN];	/* Start signature */
+	u8	 plog_enabled;
+	u8	 rsvd[7];
+	u32	ticks;
+	u16	head;
+	u16	tail;
 	struct bfa_plog_rec_s  plog_recs[BFA_PL_NLOG_ENTS];
 	struct bfa_plog_rec_s  plog_recs[BFA_PL_NLOG_ENTS];
 };
 };
 
 
@@ -154,8 +147,7 @@ void bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 			enum bfa_plog_eid event, u16 misc,
 			enum bfa_plog_eid event, u16 misc,
 			u32 *intarr, u32 num_ints);
 			u32 *intarr, u32 num_ints);
 void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 void bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
-			enum bfa_plog_eid event, u16 misc,
-			struct fchs_s *fchdr);
+		enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr);
 void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 void bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
 			enum bfa_plog_eid event, u16 misc,
 			enum bfa_plog_eid event, u16 misc,
 			struct fchs_s *fchdr, u32 pld_w0);
 			struct fchs_s *fchdr, u32 pld_w0);

+ 62 - 72
drivers/scsi/bfa/bfa_port.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -15,30 +15,25 @@
  * General Public License for more details.
  * General Public License for more details.
  */
  */
 
 
-#include <defs/bfa_defs_port.h>
-#include <cs/bfa_trc.h>
-#include <cs/bfa_log.h>
-#include <cs/bfa_debug.h>
-#include <port/bfa_port.h>
-#include <bfi/bfi.h>
-#include <bfi/bfi_port.h>
-#include <bfa_ioc.h>
-#include <cna/bfa_cna_trcmod.h>
+#include "bfa_defs_svc.h"
+#include "bfa_port.h"
+#include "bfi.h"
+#include "bfa_ioc.h"
+
 
 
 BFA_TRC_FILE(CNA, PORT);
 BFA_TRC_FILE(CNA, PORT);
 
 
 #define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
 #define bfa_ioc_portid(__ioc) ((__ioc)->port_id)
-#define bfa_lpuid(__arg) bfa_ioc_portid(&(__arg)->ioc)
 
 
 static void
 static void
-bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
+bfa_port_stats_swap(struct bfa_port_s *port, union bfa_port_stats_u *stats)
 {
 {
-	u32       *dip = (u32 *) stats;
-	u32        t0, t1;
-	int             i;
+	u32    *dip = (u32 *) stats;
+	u32    t0, t1;
+	int	    i;
 
 
-	for (i = 0; i < sizeof(union bfa_pport_stats_u) / sizeof(u32);
-	     i += 2) {
+	for (i = 0; i < sizeof(union bfa_port_stats_u)/sizeof(u32);
+		i += 2) {
 		t0 = dip[i];
 		t0 = dip[i];
 		t1 = dip[i + 1];
 		t1 = dip[i + 1];
 #ifdef __BIGENDIAN
 #ifdef __BIGENDIAN
@@ -49,11 +44,6 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
 		dip[i + 1] = bfa_os_ntohl(t0);
 		dip[i + 1] = bfa_os_ntohl(t0);
 #endif
 #endif
 	}
 	}
-
-    /** todo
-     * QoS stats r also swapped as 64bit; that structure also
-     * has to use 64 bit counters
-     */
 }
 }
 
 
 /**
 /**
@@ -68,7 +58,9 @@ bfa_port_stats_swap(struct bfa_port_s *port, union bfa_pport_stats_u *stats)
 static void
 static void
 bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
 bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
 {
 {
-	bfa_assert(0);
+	bfa_trc(port, status);
+	port->endis_pending = BFA_FALSE;
+	port->endis_cbfn(port->endis_cbarg, status);
 }
 }
 
 
 /**
 /**
@@ -83,7 +75,9 @@ bfa_port_enable_isr(struct bfa_port_s *port, bfa_status_t status)
 static void
 static void
 bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
 bfa_port_disable_isr(struct bfa_port_s *port, bfa_status_t status)
 {
 {
-	bfa_assert(0);
+	bfa_trc(port, status);
+	port->endis_pending = BFA_FALSE;
+	port->endis_cbfn(port->endis_cbarg, status);
 }
 }
 
 
 /**
 /**
@@ -105,7 +99,7 @@ bfa_port_get_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 		struct bfa_timeval_s tv;
 		struct bfa_timeval_s tv;
 
 
 		memcpy(port->stats, port->stats_dma.kva,
 		memcpy(port->stats, port->stats_dma.kva,
-		       sizeof(union bfa_pport_stats_u));
+		       sizeof(union bfa_port_stats_u));
 		bfa_port_stats_swap(port, port->stats);
 		bfa_port_stats_swap(port, port->stats);
 
 
 		bfa_os_gettimeofday(&tv);
 		bfa_os_gettimeofday(&tv);
@@ -133,11 +127,11 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 	struct bfa_timeval_s tv;
 	struct bfa_timeval_s tv;
 
 
 	port->stats_status = status;
 	port->stats_status = status;
-	port->stats_busy = BFA_FALSE;
+	port->stats_busy   = BFA_FALSE;
 
 
 	/**
 	/**
-	 * re-initialize time stamp for stats reset
-	 */
+	* re-initialize time stamp for stats reset
+	*/
 	bfa_os_gettimeofday(&tv);
 	bfa_os_gettimeofday(&tv);
 	port->stats_reset_time = tv.tv_sec;
 	port->stats_reset_time = tv.tv_sec;
 
 
@@ -158,10 +152,10 @@ bfa_port_clear_stats_isr(struct bfa_port_s *port, bfa_status_t status)
 static void
 static void
 bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
 bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
 {
 {
-	struct bfa_port_s *port = (struct bfa_port_s *)cbarg;
+	struct bfa_port_s *port = (struct bfa_port_s *) cbarg;
 	union bfi_port_i2h_msg_u *i2hmsg;
 	union bfi_port_i2h_msg_u *i2hmsg;
 
 
-	i2hmsg = (union bfi_port_i2h_msg_u *)m;
+	i2hmsg = (union bfi_port_i2h_msg_u *) m;
 	bfa_trc(port, m->mh.msg_id);
 	bfa_trc(port, m->mh.msg_id);
 
 
 	switch (m->mh.msg_id) {
 	switch (m->mh.msg_id) {
@@ -178,9 +172,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
 		break;
 		break;
 
 
 	case BFI_PORT_I2H_GET_STATS_RSP:
 	case BFI_PORT_I2H_GET_STATS_RSP:
-		/*
-		 * Stats busy flag is still set? (may be cmd timed out)
-		 */
+		/* Stats busy flag is still set? (may be cmd timed out) */
 		if (port->stats_busy == BFA_FALSE)
 		if (port->stats_busy == BFA_FALSE)
 			break;
 			break;
 		bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
 		bfa_port_get_stats_isr(port, i2hmsg->getstats_rsp.status);
@@ -208,7 +200,7 @@ bfa_port_isr(void *cbarg, struct bfi_mbmsg_s *m)
 u32
 u32
 bfa_port_meminfo(void)
 bfa_port_meminfo(void)
 {
 {
-	return BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), BFA_DMA_ALIGN_SZ);
+	return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
 }
 }
 
 
 /**
 /**
@@ -216,8 +208,8 @@ bfa_port_meminfo(void)
  *
  *
  *
  *
  * @param[in] port Port module pointer
  * @param[in] port Port module pointer
- * 	      dma_kva Kernel Virtual Address of Port DMA Memory
- * 	      dma_pa  Physical Address of Port DMA Memory
+ *	      dma_kva Kernel Virtual Address of Port DMA Memory
+ *	      dma_pa  Physical Address of Port DMA Memory
  *
  *
  * @return void
  * @return void
  */
  */
@@ -225,7 +217,7 @@ void
 bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
 bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
 {
 {
 	port->stats_dma.kva = dma_kva;
 	port->stats_dma.kva = dma_kva;
-	port->stats_dma.pa = dma_pa;
+	port->stats_dma.pa  = dma_pa;
 }
 }
 
 
 /**
 /**
@@ -239,12 +231,14 @@ bfa_port_mem_claim(struct bfa_port_s *port, u8 *dma_kva, u64 dma_pa)
  */
  */
 bfa_status_t
 bfa_status_t
 bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
-		void *cbarg)
+		 void *cbarg)
 {
 {
 	struct bfi_port_generic_req_s *m;
 	struct bfi_port_generic_req_s *m;
 
 
-	/** todo Not implemented */
-	bfa_assert(0);
+	if (bfa_ioc_is_disabled(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_DISABLED);
+		return BFA_STATUS_IOC_DISABLED;
+	}
 
 
 	if (!bfa_ioc_is_operational(port->ioc)) {
 	if (!bfa_ioc_is_operational(port->ioc)) {
 		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
 		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@@ -256,11 +250,11 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 		return BFA_STATUS_DEVBUSY;
 		return BFA_STATUS_DEVBUSY;
 	}
 	}
 
 
-	m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;
+	m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
 
 
 	port->msgtag++;
 	port->msgtag++;
-	port->endis_cbfn = cbfn;
-	port->endis_cbarg = cbarg;
+	port->endis_cbfn    = cbfn;
+	port->endis_cbarg   = cbarg;
 	port->endis_pending = BFA_TRUE;
 	port->endis_pending = BFA_TRUE;
 
 
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_ENABLE_REQ,
@@ -281,12 +275,14 @@ bfa_port_enable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
  */
  */
 bfa_status_t
 bfa_status_t
 bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
-		 void *cbarg)
+		  void *cbarg)
 {
 {
 	struct bfi_port_generic_req_s *m;
 	struct bfi_port_generic_req_s *m;
 
 
-	/** todo Not implemented */
-	bfa_assert(0);
+	if (bfa_ioc_is_disabled(port->ioc)) {
+		bfa_trc(port, BFA_STATUS_IOC_DISABLED);
+		return BFA_STATUS_IOC_DISABLED;
+	}
 
 
 	if (!bfa_ioc_is_operational(port->ioc)) {
 	if (!bfa_ioc_is_operational(port->ioc)) {
 		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
 		bfa_trc(port, BFA_STATUS_IOC_FAILURE);
@@ -298,11 +294,11 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
 		return BFA_STATUS_DEVBUSY;
 		return BFA_STATUS_DEVBUSY;
 	}
 	}
 
 
-	m = (struct bfi_port_generic_req_s *)port->endis_mb.msg;
+	m = (struct bfi_port_generic_req_s *) port->endis_mb.msg;
 
 
 	port->msgtag++;
 	port->msgtag++;
-	port->endis_cbfn = cbfn;
-	port->endis_cbarg = cbarg;
+	port->endis_cbfn    = cbfn;
+	port->endis_cbarg   = cbarg;
 	port->endis_pending = BFA_TRUE;
 	port->endis_pending = BFA_TRUE;
 
 
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_DISABLE_REQ,
@@ -322,8 +318,8 @@ bfa_port_disable(struct bfa_port_s *port, bfa_port_endis_cbfn_t cbfn,
  * @return Status
  * @return Status
  */
  */
 bfa_status_t
 bfa_status_t
-bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
-		   bfa_port_stats_cbfn_t cbfn, void *cbarg)
+bfa_port_get_stats(struct bfa_port_s *port, union bfa_port_stats_u *stats,
+		    bfa_port_stats_cbfn_t cbfn, void *cbarg)
 {
 {
 	struct bfi_port_get_stats_req_s *m;
 	struct bfi_port_get_stats_req_s *m;
 
 
@@ -337,12 +333,12 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
 		return BFA_STATUS_DEVBUSY;
 		return BFA_STATUS_DEVBUSY;
 	}
 	}
 
 
-	m = (struct bfi_port_get_stats_req_s *)port->stats_mb.msg;
+	m = (struct bfi_port_get_stats_req_s *) port->stats_mb.msg;
 
 
-	port->stats = stats;
-	port->stats_cbfn = cbfn;
+	port->stats	  = stats;
+	port->stats_cbfn  = cbfn;
 	port->stats_cbarg = cbarg;
 	port->stats_cbarg = cbarg;
-	port->stats_busy = BFA_TRUE;
+	port->stats_busy  = BFA_TRUE;
 	bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
 	bfa_dma_be_addr_set(m->dma_addr, port->stats_dma.pa);
 
 
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_GET_STATS_REQ,
@@ -362,7 +358,7 @@ bfa_port_get_stats(struct bfa_port_s *port, union bfa_pport_stats_u *stats,
  */
  */
 bfa_status_t
 bfa_status_t
 bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
 bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
-		     void *cbarg)
+		      void *cbarg)
 {
 {
 	struct bfi_port_generic_req_s *m;
 	struct bfi_port_generic_req_s *m;
 
 
@@ -376,11 +372,11 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
 		return BFA_STATUS_DEVBUSY;
 		return BFA_STATUS_DEVBUSY;
 	}
 	}
 
 
-	m = (struct bfi_port_generic_req_s *)port->stats_mb.msg;
+	m = (struct bfi_port_generic_req_s *) port->stats_mb.msg;
 
 
-	port->stats_cbfn = cbfn;
+	port->stats_cbfn  = cbfn;
 	port->stats_cbarg = cbarg;
 	port->stats_cbarg = cbarg;
-	port->stats_busy = BFA_TRUE;
+	port->stats_busy  = BFA_TRUE;
 
 
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
 	bfi_h2i_set(m->mh, BFI_MC_PORT, BFI_PORT_H2I_CLEAR_STATS_REQ,
 		    bfa_ioc_portid(port->ioc));
 		    bfa_ioc_portid(port->ioc));
@@ -400,11 +396,9 @@ bfa_port_clear_stats(struct bfa_port_s *port, bfa_port_stats_cbfn_t cbfn,
 void
 void
 bfa_port_hbfail(void *arg)
 bfa_port_hbfail(void *arg)
 {
 {
-	struct bfa_port_s *port = (struct bfa_port_s *)arg;
+	struct bfa_port_s *port = (struct bfa_port_s *) arg;
 
 
-	/*
-	 * Fail any pending get_stats/clear_stats requests
-	 */
+	/* Fail any pending get_stats/clear_stats requests */
 	if (port->stats_busy) {
 	if (port->stats_busy) {
 		if (port->stats_cbfn)
 		if (port->stats_cbfn)
 			port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
 			port->stats_cbfn(port->stats_cbarg, BFA_STATUS_FAILED);
@@ -412,9 +406,7 @@ bfa_port_hbfail(void *arg)
 		port->stats_busy = BFA_FALSE;
 		port->stats_busy = BFA_FALSE;
 	}
 	}
 
 
-	/*
-	 * Clear any enable/disable is pending
-	 */
+	/* Clear any enable/disable is pending */
 	if (port->endis_pending) {
 	if (port->endis_pending) {
 		if (port->endis_cbfn)
 		if (port->endis_cbfn)
 			port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
 			port->endis_cbfn(port->endis_cbarg, BFA_STATUS_FAILED);
@@ -433,22 +425,20 @@ bfa_port_hbfail(void *arg)
  *                   The device driver specific mbox ISR functions have
  *                   The device driver specific mbox ISR functions have
  *                   this pointer as one of the parameters.
  *                   this pointer as one of the parameters.
  *            trcmod -
  *            trcmod -
- *            logmod -
  *
  *
  * @return void
  * @return void
  */
  */
 void
 void
-bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc, void *dev,
-		struct bfa_trc_mod_s *trcmod, struct bfa_log_mod_s *logmod)
+bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
+		 void *dev, struct bfa_trc_mod_s *trcmod)
 {
 {
 	struct bfa_timeval_s tv;
 	struct bfa_timeval_s tv;
 
 
 	bfa_assert(port);
 	bfa_assert(port);
 
 
-	port->dev = dev;
-	port->ioc = ioc;
+	port->dev    = dev;
+	port->ioc    = ioc;
 	port->trcmod = trcmod;
 	port->trcmod = trcmod;
-	port->logmod = logmod;
 
 
 	port->stats_busy = BFA_FALSE;
 	port->stats_busy = BFA_FALSE;
 	port->endis_pending = BFA_FALSE;
 	port->endis_pending = BFA_FALSE;

+ 66 - 0
drivers/scsi/bfa/bfa_port.h

@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_PORT_H__
+#define __BFA_PORT_H__
+
+#include "bfa_defs_svc.h"
+#include "bfa_ioc.h"
+#include "bfa_cs.h"
+
+typedef void (*bfa_port_stats_cbfn_t) (void *dev, bfa_status_t status);
+typedef void (*bfa_port_endis_cbfn_t) (void *dev, bfa_status_t status);
+
+struct bfa_port_s {
+	void				*dev;
+	struct bfa_ioc_s		*ioc;
+	struct bfa_trc_mod_s		*trcmod;
+	u32			msgtag;
+	bfa_boolean_t			stats_busy;
+	struct bfa_mbox_cmd_s		stats_mb;
+	bfa_port_stats_cbfn_t		stats_cbfn;
+	void				*stats_cbarg;
+	bfa_status_t			stats_status;
+	u32			stats_reset_time;
+	union bfa_port_stats_u		*stats;
+	struct bfa_dma_s		stats_dma;
+	bfa_boolean_t			endis_pending;
+	struct bfa_mbox_cmd_s		endis_mb;
+	bfa_port_endis_cbfn_t		endis_cbfn;
+	void				*endis_cbarg;
+	bfa_status_t			endis_status;
+	struct bfa_ioc_hbfail_notify_s	hbfail;
+};
+
+void	     bfa_port_attach(struct bfa_port_s *port, struct bfa_ioc_s *ioc,
+				void *dev, struct bfa_trc_mod_s *trcmod);
+void	     bfa_port_detach(struct bfa_port_s *port);
+void	     bfa_port_hbfail(void *arg);
+
+bfa_status_t bfa_port_get_stats(struct bfa_port_s *port,
+				 union bfa_port_stats_u *stats,
+				 bfa_port_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_clear_stats(struct bfa_port_s *port,
+				   bfa_port_stats_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_enable(struct bfa_port_s *port,
+			      bfa_port_endis_cbfn_t cbfn, void *cbarg);
+bfa_status_t bfa_port_disable(struct bfa_port_s *port,
+			       bfa_port_endis_cbfn_t cbfn, void *cbarg);
+u32     bfa_port_meminfo(void);
+void	     bfa_port_mem_claim(struct bfa_port_s *port,
+				 u8 *dma_kva, u64 dma_pa);
+#endif	/* __BFA_PORT_H__ */

+ 0 - 94
drivers/scsi/bfa/bfa_port_priv.h

@@ -1,94 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_PORT_PRIV_H__
-#define __BFA_PORT_PRIV_H__
-
-#include <defs/bfa_defs_pport.h>
-#include <bfi/bfi_pport.h>
-#include "bfa_intr_priv.h"
-
-/**
- * Link notification data structure
- */
-struct bfa_fcport_ln_s {
-	struct bfa_fcport_s     *fcport;
-	bfa_sm_t                sm;
-	struct bfa_cb_qe_s      ln_qe;  /*  BFA callback queue elem for ln */
-	enum bfa_pport_linkstate ln_event; /*  ln event for callback */
-};
-
-/**
- * BFA FC port data structure
- */
-struct bfa_fcport_s {
-	struct bfa_s 		*bfa;	/*  parent BFA instance */
-	bfa_sm_t		sm;	/*  port state machine */
-	wwn_t			nwwn;	/*  node wwn of physical port */
-	wwn_t			pwwn;	/*  port wwn of physical oprt */
-	enum bfa_pport_speed speed_sup;
-					/*  supported speeds */
-	enum bfa_pport_speed speed;	/*  current speed */
-	enum bfa_pport_topology topology;	/*  current topology */
-	u8			myalpa;	/*  my ALPA in LOOP topology */
-	u8			rsvd[3];
-	u32             mypid:24;
-	u32             rsvd_b:8;
-	struct bfa_pport_cfg_s	cfg;	/*  current port configuration */
-	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
-	struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
-	struct bfa_reqq_wait_s	reqq_wait;
-					/*  to wait for room in reqq */
-	struct bfa_reqq_wait_s	svcreq_wait;
-					/*  to wait for room in reqq */
-	struct bfa_reqq_wait_s	stats_reqq_wait;
-					/*  to wait for room in reqq (stats) */
-	void			*event_cbarg;
-	void			(*event_cbfn) (void *cbarg,
-						bfa_pport_event_t event);
-	union {
-		union bfi_fcport_i2h_msg_u i2hmsg;
-	} event_arg;
-	void			*bfad;	/*  BFA driver handle */
-	struct bfa_fcport_ln_s   ln; /* Link Notification */
-	struct bfa_cb_qe_s		hcb_qe;	/*  BFA callback queue elem */
-	struct bfa_timer_s      timer;  /*  timer */
-	u32		msgtag;	/*  fimrware msg tag for reply */
-	u8			*stats_kva;
-	u64		stats_pa;
-	union bfa_fcport_stats_u *stats;
-	union bfa_fcport_stats_u *stats_ret; /*  driver stats location */
-	bfa_status_t            stats_status; /*  stats/statsclr status */
-	bfa_boolean_t           stats_busy; /*  outstanding stats/statsclr */
-	bfa_boolean_t           stats_qfull;
-	u32                	stats_reset_time; /* stats reset time stamp */
-	bfa_cb_pport_t          stats_cbfn; /*  driver callback function */
-	void                    *stats_cbarg; /* user callback arg */
-	bfa_boolean_t           diag_busy; /*  diag busy status */
-	bfa_boolean_t           beacon; /*  port beacon status */
-	bfa_boolean_t           link_e2e_beacon; /*  link beacon status */
-};
-
-#define BFA_FCPORT_MOD(__bfa)	(&(__bfa)->modules.fcport)
-
-/*
- * public functions
- */
-void bfa_fcport_init(struct bfa_s *bfa);
-void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-#endif /* __BFA_PORT_PRIV_H__ */

+ 0 - 906
drivers/scsi/bfa/bfa_rport.c

@@ -1,906 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include <bfa_svc.h>
-#include <cs/bfa_debug.h>
-#include <bfi/bfi_rport.h>
-#include "bfa_intr_priv.h"
-
-BFA_TRC_FILE(HAL, RPORT);
-BFA_MODULE(rport);
-
-#define bfa_rport_offline_cb(__rp) do {				\
-	if ((__rp)->bfa->fcs)						\
-		bfa_cb_rport_offline((__rp)->rport_drv);      \
-	else {								\
-		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
-				__bfa_cb_rport_offline, (__rp));      \
-	}								\
-} while (0)
-
-#define bfa_rport_online_cb(__rp) do {				\
-	if ((__rp)->bfa->fcs)						\
-		bfa_cb_rport_online((__rp)->rport_drv);      \
-	else {								\
-		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
-				  __bfa_cb_rport_online, (__rp));      \
-		}							\
-} while (0)
-
-/*
- * forward declarations
- */
-static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
-static void bfa_rport_free(struct bfa_rport_s *rport);
-static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
-static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
-static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
-static void __bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete);
-
-/**
- *  bfa_rport_sm BFA rport state machine
- */
-
-
-enum bfa_rport_event {
-	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event		*/
-	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport */
-	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online		*/
-	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline		*/
-	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response		*/
-	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure		*/
-	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware	*/
-	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed 		*/
-	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue	*/
-};
-
-static void	bfa_rport_sm_uninit(struct bfa_rport_s *rp,
-					enum bfa_rport_event event);
-static void	bfa_rport_sm_created(struct bfa_rport_s *rp,
-					 enum bfa_rport_event event);
-static void	bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-static void	bfa_rport_sm_online(struct bfa_rport_s *rp,
-					enum bfa_rport_event event);
-static void	bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-static void	bfa_rport_sm_offline(struct bfa_rport_s *rp,
-					 enum bfa_rport_event event);
-static void	bfa_rport_sm_deleting(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-static void	bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-static void	bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-static void	bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
-					    enum bfa_rport_event event);
-static void	bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-static void	bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-static void	bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
-					  enum bfa_rport_event event);
-
-/**
- * Beginning state, only online event expected.
- */
-static void
-bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_CREATE:
-		bfa_stats(rp, sm_un_cr);
-		bfa_sm_set_state(rp, bfa_rport_sm_created);
-		break;
-
-	default:
-		bfa_stats(rp, sm_un_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-static void
-bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_ONLINE:
-		bfa_stats(rp, sm_cr_on);
-		if (bfa_rport_send_fwcreate(rp))
-			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
-		else
-			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_cr_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_rport_free(rp);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_cr_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		break;
-
-	default:
-		bfa_stats(rp, sm_cr_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Waiting for rport create response from firmware.
- */
-static void
-bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_FWRSP:
-		bfa_stats(rp, sm_fwc_rsp);
-		bfa_sm_set_state(rp, bfa_rport_sm_online);
-		bfa_rport_online_cb(rp);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_fwc_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
-		break;
-
-	case BFA_RPORT_SM_OFFLINE:
-		bfa_stats(rp, sm_fwc_off);
-		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_fwc_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		break;
-
-	default:
-		bfa_stats(rp, sm_fwc_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Request queue is full, awaiting queue resume to send create request.
- */
-static void
-bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_QRESUME:
-		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
-		bfa_rport_send_fwcreate(rp);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_fwc_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_reqq_wcancel(&rp->reqq_wait);
-		bfa_rport_free(rp);
-		break;
-
-	case BFA_RPORT_SM_OFFLINE:
-		bfa_stats(rp, sm_fwc_off);
-		bfa_sm_set_state(rp, bfa_rport_sm_offline);
-		bfa_reqq_wcancel(&rp->reqq_wait);
-		bfa_rport_offline_cb(rp);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_fwc_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		bfa_reqq_wcancel(&rp->reqq_wait);
-		break;
-
-	default:
-		bfa_stats(rp, sm_fwc_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Online state - normal parking state.
- */
-static void
-bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	struct bfi_rport_qos_scn_s *qos_scn;
-
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_OFFLINE:
-		bfa_stats(rp, sm_on_off);
-		if (bfa_rport_send_fwdelete(rp))
-			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
-		else
-			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_on_del);
-		if (bfa_rport_send_fwdelete(rp))
-			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
-		else
-			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_on_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		break;
-
-	case BFA_RPORT_SM_SET_SPEED:
-		bfa_rport_send_fwspeed(rp);
-		break;
-
-	case BFA_RPORT_SM_QOS_SCN:
-		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
-		rp->qos_attr = qos_scn->new_qos_attr;
-		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
-		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
-		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
-		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
-
-		qos_scn->old_qos_attr.qos_flow_id  =
-			bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
-		qos_scn->new_qos_attr.qos_flow_id  =
-			bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
-		qos_scn->old_qos_attr.qos_priority =
-			bfa_os_ntohl(qos_scn->old_qos_attr.qos_priority);
-		qos_scn->new_qos_attr.qos_priority =
-			bfa_os_ntohl(qos_scn->new_qos_attr.qos_priority);
-
-		if (qos_scn->old_qos_attr.qos_flow_id !=
-			qos_scn->new_qos_attr.qos_flow_id)
-			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
-						    qos_scn->old_qos_attr,
-						    qos_scn->new_qos_attr);
-		if (qos_scn->old_qos_attr.qos_priority !=
-			qos_scn->new_qos_attr.qos_priority)
-			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
-						  qos_scn->old_qos_attr,
-						  qos_scn->new_qos_attr);
-		break;
-
-	default:
-		bfa_stats(rp, sm_on_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Firmware rport is being deleted - awaiting f/w response.
- */
-static void
-bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_FWRSP:
-		bfa_stats(rp, sm_fwd_rsp);
-		bfa_sm_set_state(rp, bfa_rport_sm_offline);
-		bfa_rport_offline_cb(rp);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_fwd_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_fwd_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		bfa_rport_offline_cb(rp);
-		break;
-
-	default:
-		bfa_stats(rp, sm_fwd_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-static void
-bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_QRESUME:
-		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
-		bfa_rport_send_fwdelete(rp);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_fwd_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_fwd_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		bfa_reqq_wcancel(&rp->reqq_wait);
-		bfa_rport_offline_cb(rp);
-		break;
-
-	default:
-		bfa_stats(rp, sm_fwd_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Offline state.
- */
-static void
-bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_off_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_rport_free(rp);
-		break;
-
-	case BFA_RPORT_SM_ONLINE:
-		bfa_stats(rp, sm_off_on);
-		if (bfa_rport_send_fwcreate(rp))
-			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
-		else
-			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_off_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		break;
-
-	default:
-		bfa_stats(rp, sm_off_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Rport is deleted, waiting for firmware response to delete.
- */
-static void
-bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_FWRSP:
-		bfa_stats(rp, sm_del_fwrsp);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_rport_free(rp);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_del_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_rport_free(rp);
-		break;
-
-	default:
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-static void
-bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_QRESUME:
-		bfa_stats(rp, sm_del_fwrsp);
-		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
-		bfa_rport_send_fwdelete(rp);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_del_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_reqq_wcancel(&rp->reqq_wait);
-		bfa_rport_free(rp);
-		break;
-
-	default:
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Waiting for rport create response from firmware. A delete is pending.
- */
-static void
-bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
-				enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_FWRSP:
-		bfa_stats(rp, sm_delp_fwrsp);
-		if (bfa_rport_send_fwdelete(rp))
-			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
-		else
-			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_delp_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_rport_free(rp);
-		break;
-
-	default:
-		bfa_stats(rp, sm_delp_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * Waiting for rport create response from firmware. Rport offline is pending.
- */
-static void
-bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
-				 enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_FWRSP:
-		bfa_stats(rp, sm_offp_fwrsp);
-		if (bfa_rport_send_fwdelete(rp))
-			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
-		else
-			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_offp_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		bfa_stats(rp, sm_offp_hwf);
-		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
-		break;
-
-	default:
-		bfa_stats(rp, sm_offp_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-/**
- * IOC h/w failed.
- */
-static void
-bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
-{
-	bfa_trc(rp->bfa, rp->rport_tag);
-	bfa_trc(rp->bfa, event);
-
-	switch (event) {
-	case BFA_RPORT_SM_OFFLINE:
-		bfa_stats(rp, sm_iocd_off);
-		bfa_rport_offline_cb(rp);
-		break;
-
-	case BFA_RPORT_SM_DELETE:
-		bfa_stats(rp, sm_iocd_del);
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-		bfa_rport_free(rp);
-		break;
-
-	case BFA_RPORT_SM_ONLINE:
-		bfa_stats(rp, sm_iocd_on);
-		if (bfa_rport_send_fwcreate(rp))
-			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
-		else
-			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
-		break;
-
-	case BFA_RPORT_SM_HWFAIL:
-		break;
-
-	default:
-		bfa_stats(rp, sm_iocd_unexp);
-		bfa_sm_fault(rp->bfa, event);
-	}
-}
-
-
-
-/**
- *  bfa_rport_private BFA rport private functions
- */
-
-static void
-__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_rport_s *rp = cbarg;
-
-	if (complete)
-		bfa_cb_rport_online(rp->rport_drv);
-}
-
-static void
-__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_rport_s *rp = cbarg;
-
-	if (complete)
-		bfa_cb_rport_offline(rp->rport_drv);
-}
-
-static void
-bfa_rport_qresume(void *cbarg)
-{
-	struct bfa_rport_s	*rp = cbarg;
-
-	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
-}
-
-static void
-bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-		u32 *dm_len)
-{
-	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
-		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
-
-	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
-}
-
-static void
-bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		     struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
-{
-	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
-	struct bfa_rport_s *rp;
-	u16        i;
-
-	INIT_LIST_HEAD(&mod->rp_free_q);
-	INIT_LIST_HEAD(&mod->rp_active_q);
-
-	rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
-	mod->rps_list = rp;
-	mod->num_rports = cfg->fwcfg.num_rports;
-
-	bfa_assert(mod->num_rports
-		   && !(mod->num_rports & (mod->num_rports - 1)));
-
-	for (i = 0; i < mod->num_rports; i++, rp++) {
-		bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
-		rp->bfa = bfa;
-		rp->rport_tag = i;
-		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
-
-		/**
-		 *  - is unused
-		 */
-		if (i)
-			list_add_tail(&rp->qe, &mod->rp_free_q);
-
-		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
-	}
-
-	/**
-	 * consume memory
-	 */
-	bfa_meminfo_kva(meminfo) = (u8 *) rp;
-}
-
-static void
-bfa_rport_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_rport_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_rport_stop(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_rport_iocdisable(struct bfa_s *bfa)
-{
-	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
-	struct bfa_rport_s *rport;
-	struct list_head        *qe, *qen;
-
-	list_for_each_safe(qe, qen, &mod->rp_active_q) {
-		rport = (struct bfa_rport_s *) qe;
-		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
-	}
-}
-
-static struct bfa_rport_s *
-bfa_rport_alloc(struct bfa_rport_mod_s *mod)
-{
-	struct bfa_rport_s *rport;
-
-	bfa_q_deq(&mod->rp_free_q, &rport);
-	if (rport)
-		list_add_tail(&rport->qe, &mod->rp_active_q);
-
-	return rport;
-}
-
-static void
-bfa_rport_free(struct bfa_rport_s *rport)
-{
-	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
-
-	bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
-	list_del(&rport->qe);
-	list_add_tail(&rport->qe, &mod->rp_free_q);
-}
-
-static bfa_boolean_t
-bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
-{
-	struct bfi_rport_create_req_s *m;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
-	if (!m) {
-		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
-		return BFA_FALSE;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
-			bfa_lpuid(rp->bfa));
-	m->bfa_handle = rp->rport_tag;
-	m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
-	m->pid = rp->rport_info.pid;
-	m->lp_tag = rp->rport_info.lp_tag;
-	m->local_pid = rp->rport_info.local_pid;
-	m->fc_class = rp->rport_info.fc_class;
-	m->vf_en = rp->rport_info.vf_en;
-	m->vf_id = rp->rport_info.vf_id;
-	m->cisc = rp->rport_info.cisc;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
-	return BFA_TRUE;
-}
-
-static bfa_boolean_t
-bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
-{
-	struct bfi_rport_delete_req_s *m;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
-	if (!m) {
-		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
-		return BFA_FALSE;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
-			bfa_lpuid(rp->bfa));
-	m->fw_handle = rp->fw_handle;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
-	return BFA_TRUE;
-}
-
-static bfa_boolean_t
-bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
-{
-	struct bfa_rport_speed_req_s *m;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
-	if (!m) {
-		bfa_trc(rp->bfa, rp->rport_info.speed);
-		return BFA_FALSE;
-	}
-
-	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
-			bfa_lpuid(rp->bfa));
-	m->fw_handle = rp->fw_handle;
-	m->speed = (u8)rp->rport_info.speed;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
-	return BFA_TRUE;
-}
-
-
-
-/**
- *  bfa_rport_public
- */
-
-/**
- * 		Rport interrupt processing.
- */
-void
-bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-{
-	union bfi_rport_i2h_msg_u msg;
-	struct bfa_rport_s *rp;
-
-	bfa_trc(bfa, m->mhdr.msg_id);
-
-	msg.msg = m;
-
-	switch (m->mhdr.msg_id) {
-	case BFI_RPORT_I2H_CREATE_RSP:
-		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
-		rp->fw_handle = msg.create_rsp->fw_handle;
-		rp->qos_attr = msg.create_rsp->qos_attr;
-		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
-		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
-		break;
-
-	case BFI_RPORT_I2H_DELETE_RSP:
-		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
-		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
-		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
-		break;
-
-	case BFI_RPORT_I2H_QOS_SCN:
-		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
-		rp->event_arg.fw_msg = msg.qos_scn_evt;
-		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
-		break;
-
-	default:
-		bfa_trc(bfa, m->mhdr.msg_id);
-		bfa_assert(0);
-	}
-}
-
-
-
-/**
- *  bfa_rport_api
- */
-
-struct bfa_rport_s *
-bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
-{
-	struct bfa_rport_s *rp;
-
-	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
-
-	if (rp == NULL)
-		return NULL;
-
-	rp->bfa = bfa;
-	rp->rport_drv = rport_drv;
-	bfa_rport_clear_stats(rp);
-
-	bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
-	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
-
-	return rp;
-}
-
-void
-bfa_rport_delete(struct bfa_rport_s *rport)
-{
-	bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
-}
-
-void
-bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
-{
-	bfa_assert(rport_info->max_frmsz != 0);
-
-	/**
-	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
-	 * responses. Default to minimum size.
-	 */
-	if (rport_info->max_frmsz == 0) {
-		bfa_trc(rport->bfa, rport->rport_tag);
-		rport_info->max_frmsz = FC_MIN_PDUSZ;
-	}
-
-	bfa_os_assign(rport->rport_info, *rport_info);
-	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
-}
-
-void
-bfa_rport_offline(struct bfa_rport_s *rport)
-{
-	bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
-}
-
-void
-bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_pport_speed speed)
-{
-	bfa_assert(speed != 0);
-	bfa_assert(speed != BFA_PPORT_SPEED_AUTO);
-
-	rport->rport_info.speed = speed;
-	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
-}
-
-void
-bfa_rport_get_stats(struct bfa_rport_s *rport,
-	struct bfa_rport_hal_stats_s *stats)
-{
-	*stats = rport->stats;
-}
-
-void
-bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
-					struct bfa_rport_qos_attr_s *qos_attr)
-{
-	qos_attr->qos_priority  = bfa_os_ntohl(rport->qos_attr.qos_priority);
-	qos_attr->qos_flow_id  = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
-
-}
-
-void
-bfa_rport_clear_stats(struct bfa_rport_s *rport)
-{
-	bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
-}
-
-

+ 0 - 45
drivers/scsi/bfa/bfa_rport_priv.h

@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_RPORT_PRIV_H__
-#define __BFA_RPORT_PRIV_H__
-
-#include <bfa_svc.h>
-
-#define BFA_RPORT_MIN	4
-
-struct bfa_rport_mod_s {
-	struct bfa_rport_s *rps_list;	/*  list of rports	*/
-	struct list_head 	rp_free_q;	/*  free bfa_rports	*/
-	struct list_head 	rp_active_q;	/*  free bfa_rports 	*/
-	u16	num_rports;	/*  number of rports	*/
-};
-
-#define BFA_RPORT_MOD(__bfa)	(&(__bfa)->modules.rport_mod)
-
-/**
- * Convert rport tag to RPORT
- */
-#define BFA_RPORT_FROM_TAG(__bfa, _tag)				\
-	(BFA_RPORT_MOD(__bfa)->rps_list +				\
-	 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
-
-/*
- * external functions
- */
-void	bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-#endif /* __BFA_RPORT_PRIV_H__ */

+ 0 - 226
drivers/scsi/bfa/bfa_sgpg.c

@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-
-BFA_TRC_FILE(HAL, SGPG);
-BFA_MODULE(sgpg);
-
-/**
- *  bfa_sgpg_mod BFA SGPG Mode module
- */
-
-/**
- * Compute and return memory needed by FCP(im) module.
- */
-static void
-bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
-		u32 *dm_len)
-{
-	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
-		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
-
-	*km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
-	*dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
-}
-
-
-static void
-bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		    struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
-{
-	struct bfa_sgpg_mod_s	*mod = BFA_SGPG_MOD(bfa);
-	int				i;
-	struct bfa_sgpg_s		*hsgpg;
-	struct bfi_sgpg_s 	*sgpg;
-	u64		align_len;
-
-	union {
-		u64        pa;
-		union bfi_addr_u      addr;
-	} sgpg_pa;
-
-	INIT_LIST_HEAD(&mod->sgpg_q);
-	INIT_LIST_HEAD(&mod->sgpg_wait_q);
-
-	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
-
-	mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
-	mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
-	align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
-	mod->sgpg_arr_pa += align_len;
-	mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
-						align_len);
-	mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
-						align_len);
-
-	hsgpg = mod->hsgpg_arr;
-	sgpg = mod->sgpg_arr;
-	sgpg_pa.pa = mod->sgpg_arr_pa;
-	mod->free_sgpgs = mod->num_sgpgs;
-
-	bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
-
-	for (i = 0; i < mod->num_sgpgs; i++) {
-		bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
-		bfa_os_memset(sgpg, 0, sizeof(*sgpg));
-
-		hsgpg->sgpg = sgpg;
-		hsgpg->sgpg_pa = sgpg_pa.addr;
-		list_add_tail(&hsgpg->qe, &mod->sgpg_q);
-
-		hsgpg++;
-		sgpg++;
-		sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
-	}
-
-	bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
-	bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
-	bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
-}
-
-static void
-bfa_sgpg_detach(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_sgpg_start(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_sgpg_stop(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_sgpg_iocdisable(struct bfa_s *bfa)
-{
-}
-
-
-
-/**
- *  bfa_sgpg_public BFA SGPG public functions
- */
-
-bfa_status_t
-bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
-{
-	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
-	struct bfa_sgpg_s *hsgpg;
-	int             i;
-
-	bfa_trc_fp(bfa, nsgpgs);
-
-	if (mod->free_sgpgs < nsgpgs)
-		return BFA_STATUS_ENOMEM;
-
-	for (i = 0; i < nsgpgs; i++) {
-		bfa_q_deq(&mod->sgpg_q, &hsgpg);
-		bfa_assert(hsgpg);
-		list_add_tail(&hsgpg->qe, sgpg_q);
-	}
-
-	mod->free_sgpgs -= nsgpgs;
-	return BFA_STATUS_OK;
-}
-
-void
-bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
-{
-	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
-	struct bfa_sgpg_wqe_s *wqe;
-
-	bfa_trc_fp(bfa, nsgpg);
-
-	mod->free_sgpgs += nsgpg;
-	bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
-
-	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
-
-	if (list_empty(&mod->sgpg_wait_q))
-		return;
-
-	/**
-	 * satisfy as many waiting requests as possible
-	 */
-	do {
-		wqe = bfa_q_first(&mod->sgpg_wait_q);
-		if (mod->free_sgpgs < wqe->nsgpg)
-			nsgpg = mod->free_sgpgs;
-		else
-			nsgpg = wqe->nsgpg;
-		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
-		wqe->nsgpg -= nsgpg;
-		if (wqe->nsgpg == 0) {
-			list_del(&wqe->qe);
-			wqe->cbfn(wqe->cbarg);
-		}
-	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
-}
-
-void
-bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
-{
-	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
-
-	bfa_assert(nsgpg > 0);
-	bfa_assert(nsgpg > mod->free_sgpgs);
-
-	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
-
-	/**
-	 * allocate any left to this one first
-	 */
-	if (mod->free_sgpgs) {
-		/**
-		 * no one else is waiting for SGPG
-		 */
-		bfa_assert(list_empty(&mod->sgpg_wait_q));
-		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
-		wqe->nsgpg -= mod->free_sgpgs;
-		mod->free_sgpgs = 0;
-	}
-
-	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
-}
-
-void
-bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
-{
-	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
-
-	bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
-	list_del(&wqe->qe);
-
-	if (wqe->nsgpg_total != wqe->nsgpg)
-		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
-				   wqe->nsgpg_total - wqe->nsgpg);
-}
-
-void
-bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
-		   void *cbarg)
-{
-	INIT_LIST_HEAD(&wqe->sgpg_q);
-	wqe->cbfn = cbfn;
-	wqe->cbarg = cbarg;
-}
-
-

+ 0 - 79
drivers/scsi/bfa/bfa_sgpg_priv.h

@@ -1,79 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  hal_sgpg.h BFA SG page module
- */
-
-#ifndef __BFA_SGPG_PRIV_H__
-#define __BFA_SGPG_PRIV_H__
-
-#include <cs/bfa_q.h>
-
-#define BFA_SGPG_MIN	(16)
-
-/**
- * Alignment macro for SG page allocation
- */
-#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1)) \
-			& ~(sizeof(struct bfi_sgpg_s) - 1))
-
-struct bfa_sgpg_wqe_s {
-	struct list_head qe;	/*  queue sg page element	*/
-	int	nsgpg;		/*  pages to be allocated	*/
-	int	nsgpg_total;	/*  total pages required	*/
-	void	(*cbfn) (void *cbarg);
-				/*  callback function		*/
-	void	*cbarg;		/*  callback arg		*/
-	struct list_head sgpg_q;	/*  queue of alloced sgpgs	*/
-};
-
-struct bfa_sgpg_s {
-	struct list_head 	qe;	/*  queue sg page element	*/
-	struct bfi_sgpg_s *sgpg; /*  va of SG page		*/
-	union bfi_addr_u sgpg_pa;/*  pa of SG page		*/
-};
-
-/**
- * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
- * SG pages required.
- */
-#define BFA_SGPG_NPAGE(_nsges)  (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
-
-struct bfa_sgpg_mod_s {
-	struct bfa_s *bfa;
-	int		num_sgpgs;	/*  number of SG pages		*/
-	int		free_sgpgs;	/*  number of free SG pages	*/
-	struct bfa_sgpg_s *hsgpg_arr;	/*  BFA SG page array	*/
-	struct bfi_sgpg_s *sgpg_arr;	/*  actual SG page array	*/
-	u64	sgpg_arr_pa;	/*  SG page array DMA addr	*/
-	struct list_head sgpg_q;	/*  queue of free SG pages	*/
-	struct list_head sgpg_wait_q; /*  wait queue for SG pages	*/
-};
-#define BFA_SGPG_MOD(__bfa)	(&(__bfa)->modules.sgpg_mod)
-
-bfa_status_t	bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
-								int nsgpgs);
-void		bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q,
-								int nsgpgs);
-void		bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
-				   void (*cbfn) (void *cbarg), void *cbarg);
-void		bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe,
-								int nsgpgs);
-void		bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
-
-#endif /* __BFA_SGPG_PRIV_H__ */

+ 0 - 38
drivers/scsi/bfa/bfa_sm.c

@@ -1,38 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  bfasm.c BFA State machine utility functions
- */
-
-#include <cs/bfa_sm.h>
-
-/**
- *  cs_sm_api
- */
-
-int
-bfa_sm_to_state(struct bfa_sm_table_s *smt, bfa_sm_t sm)
-{
-	int             i = 0;
-
-	while (smt[i].sm && smt[i].sm != sm)
-		i++;
-	return smt[i].state;
-}
-
-

+ 5423 - 0
drivers/scsi/bfa/bfa_svc.c

@@ -0,0 +1,5423 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include "bfa_os_inc.h"
+#include "bfa_plog.h"
+#include "bfa_cs.h"
+#include "bfa_modules.h"
+#include "bfad_drv.h"
+
+BFA_TRC_FILE(HAL, FCXP);
+BFA_MODULE(fcxp);
+BFA_MODULE(sgpg);
+BFA_MODULE(lps);
+BFA_MODULE(fcport);
+BFA_MODULE(rport);
+BFA_MODULE(uf);
+
+/**
+ * LPS related definitions
+ */
+#define BFA_LPS_MIN_LPORTS      (1)
+#define BFA_LPS_MAX_LPORTS      (256)
+
+/*
+ * Maximum Vports supported per physical port or vf.
+ */
+#define BFA_LPS_MAX_VPORTS_SUPP_CB  255
+#define BFA_LPS_MAX_VPORTS_SUPP_CT  190
+
+/**
+ *  lps_pvt BFA LPS private functions
+ */
+
+enum bfa_lps_event {
+	BFA_LPS_SM_LOGIN	= 1,	/* login request from user	*/
+	BFA_LPS_SM_LOGOUT	= 2,	/* logout request from user	*/
+	BFA_LPS_SM_FWRSP	= 3,	/* f/w response to login/logout	*/
+	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue	*/
+	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user		*/
+	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline		*/
+	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link	*/
+};
+
+/**
+ * FC PORT related definitions
+ */
+/*
+ * The port is considered disabled if corresponding physical port or IOC are
+ * disabled explicitly
+ */
+#define BFA_PORT_IS_DISABLED(bfa) \
+	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
+	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
+
+
+/**
+ * BFA port state machine events
+ */
+enum bfa_fcport_sm_event {
+	BFA_FCPORT_SM_START	= 1,	/*  start port state machine	*/
+	BFA_FCPORT_SM_STOP	= 2,	/*  stop port state machine	*/
+	BFA_FCPORT_SM_ENABLE	= 3,	/*  enable port		*/
+	BFA_FCPORT_SM_DISABLE	= 4,	/*  disable port state machine */
+	BFA_FCPORT_SM_FWRSP	= 5,	/*  firmware enable/disable rsp */
+	BFA_FCPORT_SM_LINKUP	= 6,	/*  firmware linkup event	*/
+	BFA_FCPORT_SM_LINKDOWN	= 7,	/*  firmware linkup down	*/
+	BFA_FCPORT_SM_QRESUME	= 8,	/*  CQ space available	*/
+	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
+};
+
+/**
+ * BFA port link notification state machine events
+ */
+
+enum bfa_fcport_ln_sm_event {
+	BFA_FCPORT_LN_SM_LINKUP		= 1,	/*  linkup event	*/
+	BFA_FCPORT_LN_SM_LINKDOWN	= 2,	/*  linkdown event	*/
+	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
+};
+
+/**
+ * RPORT related definitions
+ */
+#define bfa_rport_offline_cb(__rp) do {					\
+	if ((__rp)->bfa->fcs)						\
+		bfa_cb_rport_offline((__rp)->rport_drv);      \
+	else {								\
+		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
+				__bfa_cb_rport_offline, (__rp));      \
+	}								\
+} while (0)
+
+#define bfa_rport_online_cb(__rp) do {					\
+	if ((__rp)->bfa->fcs)						\
+		bfa_cb_rport_online((__rp)->rport_drv);      \
+	else {								\
+		bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe,		\
+				  __bfa_cb_rport_online, (__rp));      \
+		}							\
+} while (0)
+
+
+enum bfa_rport_event {
+	BFA_RPORT_SM_CREATE	= 1,	/*  rport create event		*/
+	BFA_RPORT_SM_DELETE	= 2,	/*  deleting an existing rport	*/
+	BFA_RPORT_SM_ONLINE	= 3,	/*  rport is online		*/
+	BFA_RPORT_SM_OFFLINE	= 4,	/*  rport is offline		*/
+	BFA_RPORT_SM_FWRSP	= 5,	/*  firmware response		*/
+	BFA_RPORT_SM_HWFAIL	= 6,	/*  IOC h/w failure		*/
+	BFA_RPORT_SM_QOS_SCN	= 7,	/*  QoS SCN from firmware	*/
+	BFA_RPORT_SM_SET_SPEED	= 8,	/*  Set Rport Speed		*/
+	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue	*/
+};
+
+/**
+ * forward declarations FCXP related functions
+ */
+static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
+static void	hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+				struct bfi_fcxp_send_rsp_s *fcxp_rsp);
+static void	hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
+				struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
+static void	bfa_fcxp_qresume(void *cbarg);
+static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
+				struct bfi_fcxp_send_req_s *send_req);
+
+/**
+ * forward declarations for LPS functions
+ */
+static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+				u32 *dm_len);
+static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
+				struct bfa_iocfc_cfg_s *cfg,
+				struct bfa_meminfo_s *meminfo,
+				struct bfa_pcidev_s *pcidev);
+static void bfa_lps_detach(struct bfa_s *bfa);
+static void bfa_lps_start(struct bfa_s *bfa);
+static void bfa_lps_stop(struct bfa_s *bfa);
+static void bfa_lps_iocdisable(struct bfa_s *bfa);
+static void bfa_lps_login_rsp(struct bfa_s *bfa,
+				struct bfi_lps_login_rsp_s *rsp);
+static void bfa_lps_logout_rsp(struct bfa_s *bfa,
+				struct bfi_lps_logout_rsp_s *rsp);
+static void bfa_lps_reqq_resume(void *lps_arg);
+static void bfa_lps_free(struct bfa_lps_s *lps);
+static void bfa_lps_send_login(struct bfa_lps_s *lps);
+static void bfa_lps_send_logout(struct bfa_lps_s *lps);
+static void bfa_lps_login_comp(struct bfa_lps_s *lps);
+static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
+static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
+
+/**
+ * forward declaration for LPS state machine
+ */
+static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
+					event);
+static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
+static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
+					event);
+
+/**
+ * forward declaration for FC Port functions
+ */
+static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
+static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
+static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
+static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
+static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
+			enum bfa_port_linkstate event, bfa_boolean_t trunk);
+static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
+				enum bfa_port_linkstate event);
+static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
+static void bfa_fcport_stats_get_timeout(void *cbarg);
+static void bfa_fcport_stats_clr_timeout(void *cbarg);
+static void bfa_trunk_iocdisable(struct bfa_s *bfa);
+
+/**
+ * forward declaration for FC PORT state machine
+ */
+static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+
+static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+					enum bfa_fcport_ln_sm_event event);
+
+static struct bfa_sm_table_s hal_port_sm_table[] = {
+	{BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
+	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
+	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
+	{BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
+	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
+	{BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
+	{BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
+	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
+	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
+};
+
+
+/**
+ * forward declaration for RPORT related functions
+ */
+static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
+static void		bfa_rport_free(struct bfa_rport_s *rport);
+static bfa_boolean_t	bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
+static bfa_boolean_t	bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
+static bfa_boolean_t	bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
+static void		__bfa_cb_rport_online(void *cbarg,
+						bfa_boolean_t complete);
+static void		__bfa_cb_rport_offline(void *cbarg,
+						bfa_boolean_t complete);
+
+/**
+ * forward declaration for RPORT state machine
+ */
+static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_created(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_online(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_offline(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_deleting(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
+					enum bfa_rport_event event);
+
+/**
+ * PLOG related definitions
+ */
+static int
+plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
+{
+	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
+		(pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
+		return 1;
+
+	if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
+		(pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
+		return 1;
+
+	return 0;
+}
+
+static void
+bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
+{
+	u16 tail;
+	struct bfa_plog_rec_s *pl_recp;
+
+	if (plog->plog_enabled == 0)
+		return;
+
+	if (plkd_validate_logrec(pl_rec)) {
+		bfa_assert(0);
+		return;
+	}
+
+	tail = plog->tail;
+
+	pl_recp = &(plog->plog_recs[tail]);
+
+	bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
+
+	pl_recp->tv = bfa_os_get_log_time();
+	BFA_PL_LOG_REC_INCR(plog->tail);
+
+	if (plog->head == plog->tail)
+		BFA_PL_LOG_REC_INCR(plog->head);
+}
+
+void
+bfa_plog_init(struct bfa_plog_s *plog)
+{
+	bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
+
+	bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
+	plog->head = plog->tail = 0;
+	plog->plog_enabled = 1;
+}
+
+void
+bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+		enum bfa_plog_eid event,
+		u16 misc, char *log_str)
+{
+	struct bfa_plog_rec_s  lp;
+
+	if (plog->plog_enabled) {
+		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		lp.mid = mid;
+		lp.eid = event;
+		lp.log_type = BFA_PL_LOG_TYPE_STRING;
+		lp.misc = misc;
+		strncpy(lp.log_entry.string_log, log_str,
+			BFA_PL_STRING_LOG_SZ - 1);
+		lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
+		bfa_plog_add(plog, &lp);
+	}
+}
+
+void
+bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+		enum bfa_plog_eid event,
+		u16 misc, u32 *intarr, u32 num_ints)
+{
+	struct bfa_plog_rec_s  lp;
+	u32 i;
+
+	if (num_ints > BFA_PL_INT_LOG_SZ)
+		num_ints = BFA_PL_INT_LOG_SZ;
+
+	if (plog->plog_enabled) {
+		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		lp.mid = mid;
+		lp.eid = event;
+		lp.log_type = BFA_PL_LOG_TYPE_INT;
+		lp.misc = misc;
+
+		for (i = 0; i < num_ints; i++)
+			bfa_os_assign(lp.log_entry.int_log[i],
+					intarr[i]);
+
+		lp.log_num_ints = (u8) num_ints;
+
+		bfa_plog_add(plog, &lp);
+	}
+}
+
+void
+bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+			enum bfa_plog_eid event,
+			u16 misc, struct fchs_s *fchdr)
+{
+	struct bfa_plog_rec_s  lp;
+	u32	*tmp_int = (u32 *) fchdr;
+	u32	ints[BFA_PL_INT_LOG_SZ];
+
+	if (plog->plog_enabled) {
+		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+
+		ints[0] = tmp_int[0];
+		ints[1] = tmp_int[1];
+		ints[2] = tmp_int[4];
+
+		bfa_plog_intarr(plog, mid, event, misc, ints, 3);
+	}
+}
+
+void
+bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
+		      enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
+		      u32 pld_w0)
+{
+	struct bfa_plog_rec_s  lp;
+	u32	*tmp_int = (u32 *) fchdr;
+	u32	ints[BFA_PL_INT_LOG_SZ];
+
+	if (plog->plog_enabled) {
+		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+
+		ints[0] = tmp_int[0];
+		ints[1] = tmp_int[1];
+		ints[2] = tmp_int[4];
+		ints[3] = pld_w0;
+
+		bfa_plog_intarr(plog, mid, event, misc, ints, 4);
+	}
+}
+
+void
+bfa_plog_clear(struct bfa_plog_s *plog)
+{
+	plog->head = plog->tail = 0;
+}
+
+void
+bfa_plog_enable(struct bfa_plog_s *plog)
+{
+	plog->plog_enabled = 1;
+}
+
+void
+bfa_plog_disable(struct bfa_plog_s *plog)
+{
+	plog->plog_enabled = 0;
+}
+
+bfa_boolean_t
+bfa_plog_get_setting(struct bfa_plog_s *plog)
+{
+	return (bfa_boolean_t)plog->plog_enabled;
+}
+
+/**
+ *  fcxp_pvt BFA FCXP private functions
+ */
+
+static void
+claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
+{
+	u8	       *dm_kva = NULL;
+	u64	dm_pa;
+	u32	buf_pool_sz;
+
+	dm_kva = bfa_meminfo_dma_virt(mi);
+	dm_pa = bfa_meminfo_dma_phys(mi);
+
+	buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
+
+	/*
+	 * Initialize the fcxp req payload list
+	 */
+	mod->req_pld_list_kva = dm_kva;
+	mod->req_pld_list_pa = dm_pa;
+	dm_kva += buf_pool_sz;
+	dm_pa += buf_pool_sz;
+	bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
+
+	/*
+	 * Initialize the fcxp rsp payload list
+	 */
+	buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
+	mod->rsp_pld_list_kva = dm_kva;
+	mod->rsp_pld_list_pa = dm_pa;
+	dm_kva += buf_pool_sz;
+	dm_pa += buf_pool_sz;
+	bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
+
+	bfa_meminfo_dma_virt(mi) = dm_kva;
+	bfa_meminfo_dma_phys(mi) = dm_pa;
+}
+
+static void
+claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
+{
+	u16	i;
+	struct bfa_fcxp_s *fcxp;
+
+	fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
+	bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
+
+	INIT_LIST_HEAD(&mod->fcxp_free_q);
+	INIT_LIST_HEAD(&mod->fcxp_active_q);
+
+	mod->fcxp_list = fcxp;
+
+	for (i = 0; i < mod->num_fcxps; i++) {
+		fcxp->fcxp_mod = mod;
+		fcxp->fcxp_tag = i;
+
+		list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+		bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
+		fcxp->reqq_waiting = BFA_FALSE;
+
+		fcxp = fcxp + 1;
+	}
+
+	bfa_meminfo_kva(mi) = (void *)fcxp;
+}
+
+static void
+bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+		 u32 *dm_len)
+{
+	u16	num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
+
+	if (num_fcxp_reqs == 0)
+		return;
+
+	/*
+	 * Account for req/rsp payload
+	 */
+	*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
+	if (cfg->drvcfg.min_cfg)
+		*dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
+	else
+		*dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
+
+	/*
+	 * Account for fcxp structs
+	 */
+	*ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
+}
+
+static void
+bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
+	mod->bfa = bfa;
+	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
+
+	/**
+	 * Initialize FCXP request and response payload sizes.
+	 */
+	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
+	if (!cfg->drvcfg.min_cfg)
+		mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
+
+	INIT_LIST_HEAD(&mod->wait_q);
+
+	claim_fcxp_req_rsp_mem(mod, meminfo);
+	claim_fcxps_mem(mod, meminfo);
+}
+
+static void
+bfa_fcxp_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_fcxp_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+	struct bfa_fcxp_s *fcxp;
+	struct list_head	      *qe, *qen;
+
+	list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
+		fcxp = (struct bfa_fcxp_s *) qe;
+		if (fcxp->caller == NULL) {
+			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+					BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
+			bfa_fcxp_free(fcxp);
+		} else {
+			fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
+			bfa_cb_queue(bfa, &fcxp->hcb_qe,
+				     __bfa_fcxp_send_cbfn, fcxp);
+		}
+	}
+}
+
+static struct bfa_fcxp_s *
+bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
+{
+	struct bfa_fcxp_s *fcxp;
+
+	bfa_q_deq(&fm->fcxp_free_q, &fcxp);
+
+	if (fcxp)
+		list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
+
+	return fcxp;
+}
+
+static void
+bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
+	       struct bfa_s *bfa,
+	       u8 *use_ibuf,
+	       u32 *nr_sgles,
+	       bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
+	       bfa_fcxp_get_sglen_t *r_sglen_cbfn,
+	       struct list_head *r_sgpg_q,
+	       int n_sgles,
+	       bfa_fcxp_get_sgaddr_t sga_cbfn,
+	       bfa_fcxp_get_sglen_t sglen_cbfn)
+{
+
+	bfa_assert(bfa != NULL);
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	if (n_sgles == 0) {
+		*use_ibuf = 1;
+	} else {
+		bfa_assert(*sga_cbfn != NULL);
+		bfa_assert(*sglen_cbfn != NULL);
+
+		*use_ibuf = 0;
+		*r_sga_cbfn = sga_cbfn;
+		*r_sglen_cbfn = sglen_cbfn;
+
+		*nr_sgles = n_sgles;
+
+		/*
+		 * alloc required sgpgs
+		 */
+		if (n_sgles > BFI_SGE_INLINE)
+			bfa_assert(0);
+	}
+
+}
+
+static void
+bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
+	       void *caller, struct bfa_s *bfa, int nreq_sgles,
+	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
+	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+{
+
+	bfa_assert(bfa != NULL);
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	fcxp->caller = caller;
+
+	bfa_fcxp_init_reqrsp(fcxp, bfa,
+		&fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
+		&fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
+		nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
+
+	bfa_fcxp_init_reqrsp(fcxp, bfa,
+		&fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
+		&fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
+		nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
+
+}
+
+static void
+bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+	struct bfa_fcxp_wqe_s *wqe;
+
+	bfa_q_deq(&mod->wait_q, &wqe);
+	if (wqe) {
+		bfa_trc(mod->bfa, fcxp->fcxp_tag);
+
+		bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
+			wqe->nrsp_sgles, wqe->req_sga_cbfn,
+			wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
+			wqe->rsp_sglen_cbfn);
+
+		wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
+		return;
+	}
+
+	bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
+	list_del(&fcxp->qe);
+	list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
+}
+
+static void
+bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
+		   bfa_status_t req_status, u32 rsp_len,
+		   u32 resid_len, struct fchs_s *rsp_fchs)
+{
+	/* discarded fcxp completion */
+}
+
+static void
+__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcxp_s *fcxp = cbarg;
+
+	if (complete) {
+		fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+				fcxp->rsp_status, fcxp->rsp_len,
+				fcxp->residue_len, &fcxp->rsp_fchs);
+	} else {
+		bfa_fcxp_free(fcxp);
+	}
+}
+
+static void
+hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
+	struct bfa_fcxp_s	*fcxp;
+	u16		fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
+
+	bfa_trc(bfa, fcxp_tag);
+
+	fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
+
+	/**
+	 * @todo f/w should not set residue to non-0 when everything
+	 *	 is received.
+	 */
+	if (fcxp_rsp->req_status == BFA_STATUS_OK)
+		fcxp_rsp->residue_len = 0;
+	else
+		fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
+
+	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
+
+	bfa_assert(fcxp->send_cbfn != NULL);
+
+	hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
+
+	if (fcxp->send_cbfn != NULL) {
+		bfa_trc(mod->bfa, (NULL == fcxp->caller));
+		if (fcxp->caller == NULL) {
+			fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
+					fcxp_rsp->req_status, fcxp_rsp->rsp_len,
+					fcxp_rsp->residue_len, &fcxp_rsp->fchs);
+			/*
+			 * fcxp automatically freed on return from the callback
+			 */
+			bfa_fcxp_free(fcxp);
+		} else {
+			fcxp->rsp_status = fcxp_rsp->req_status;
+			fcxp->rsp_len = fcxp_rsp->rsp_len;
+			fcxp->residue_len = fcxp_rsp->residue_len;
+			fcxp->rsp_fchs = fcxp_rsp->fchs;
+
+			bfa_cb_queue(bfa, &fcxp->hcb_qe,
+					__bfa_fcxp_send_cbfn, fcxp);
+		}
+	} else {
+		bfa_trc(bfa, (NULL == fcxp->send_cbfn));
+	}
+}
+
+static void
+hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
+{
+	union bfi_addr_u      sga_zero = { {0} };
+
+	sge->sg_len = reqlen;
+	sge->flags = BFI_SGE_DATA_LAST;
+	bfa_dma_addr_set(sge[0].sga, req_pa);
+	bfa_sge_to_be(sge);
+	sge++;
+
+	sge->sga = sga_zero;
+	sge->sg_len = reqlen;
+	sge->flags = BFI_SGE_PGDLEN;
+	bfa_sge_to_be(sge);
+}
+
+static void
+hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
+		 struct fchs_s *fchs)
+{
+	/*
+	 * TODO: TX ox_id
+	 */
+	if (reqlen > 0) {
+		if (fcxp->use_ireqbuf) {
+			u32	pld_w0 =
+				*((u32 *) BFA_FCXP_REQ_PLD(fcxp));
+
+			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+					BFA_PL_EID_TX,
+					reqlen + sizeof(struct fchs_s), fchs,
+					pld_w0);
+		} else {
+			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+					BFA_PL_EID_TX,
+					reqlen + sizeof(struct fchs_s),
+					fchs);
+		}
+	} else {
+		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
+			       reqlen + sizeof(struct fchs_s), fchs);
+	}
+}
+
+static void
+hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
+		 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
+{
+	if (fcxp_rsp->rsp_len > 0) {
+		if (fcxp->use_irspbuf) {
+			u32	pld_w0 =
+				*((u32 *) BFA_FCXP_RSP_PLD(fcxp));
+
+			bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
+					      BFA_PL_EID_RX,
+					      (u16) fcxp_rsp->rsp_len,
+					      &fcxp_rsp->fchs, pld_w0);
+		} else {
+			bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
+				       BFA_PL_EID_RX,
+				       (u16) fcxp_rsp->rsp_len,
+				       &fcxp_rsp->fchs);
+		}
+	} else {
+		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
+			       (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
+	}
+}
+
+/**
+ * Handler to resume sending fcxp when space in available in cpe queue.
+ */
+static void
+bfa_fcxp_qresume(void *cbarg)
+{
+	struct bfa_fcxp_s		*fcxp = cbarg;
+	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
+	struct bfi_fcxp_send_req_s	*send_req;
+
+	fcxp->reqq_waiting = BFA_FALSE;
+	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+	bfa_fcxp_queue(fcxp, send_req);
+}
+
+/**
+ * Queue fcxp send request to foimrware.
+ */
+static void
+bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
+{
+	struct bfa_s			*bfa = fcxp->fcxp_mod->bfa;
+	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
+	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
+	struct bfa_rport_s		*rport = reqi->bfa_rport;
+
+	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
+		    bfa_lpuid(bfa));
+
+	send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
+	if (rport) {
+		send_req->rport_fw_hndl = rport->fw_handle;
+		send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
+		if (send_req->max_frmsz == 0)
+			send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+	} else {
+		send_req->rport_fw_hndl = 0;
+		send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+	}
+
+	send_req->vf_id = bfa_os_htons(reqi->vf_id);
+	send_req->lp_tag = reqi->lp_tag;
+	send_req->class = reqi->class;
+	send_req->rsp_timeout = rspi->rsp_timeout;
+	send_req->cts = reqi->cts;
+	send_req->fchs = reqi->fchs;
+
+	send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
+	send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
+
+	/*
+	 * setup req sgles
+	 */
+	if (fcxp->use_ireqbuf == 1) {
+		hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
+					BFA_FCXP_REQ_PLD_PA(fcxp));
+	} else {
+		if (fcxp->nreq_sgles > 0) {
+			bfa_assert(fcxp->nreq_sgles == 1);
+			hal_fcxp_set_local_sges(send_req->req_sge,
+						reqi->req_tot_len,
+						fcxp->req_sga_cbfn(fcxp->caller,
+								   0));
+		} else {
+			bfa_assert(reqi->req_tot_len == 0);
+			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+		}
+	}
+
+	/*
+	 * setup rsp sgles
+	 */
+	if (fcxp->use_irspbuf == 1) {
+		bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
+
+		hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
+					BFA_FCXP_RSP_PLD_PA(fcxp));
+
+	} else {
+		if (fcxp->nrsp_sgles > 0) {
+			bfa_assert(fcxp->nrsp_sgles == 1);
+			hal_fcxp_set_local_sges(send_req->rsp_sge,
+						rspi->rsp_maxlen,
+						fcxp->rsp_sga_cbfn(fcxp->caller,
+								   0));
+		} else {
+			bfa_assert(rspi->rsp_maxlen == 0);
+			hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
+		}
+	}
+
+	hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
+
+	bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
+
+	bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
+	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
+}
+
+/**
+ *  hal_fcxp_api BFA FCXP API
+ */
+
+/**
+ * Allocate an FCXP instance to send a response or to send a request
+ * that has a response. Request/response buffers are allocated by caller.
+ *
+ * @param[in]	bfa		BFA bfa instance
+ * @param[in]	nreq_sgles	Number of SG elements required for request
+ *				buffer. 0, if fcxp internal buffers are	used.
+ *				Use bfa_fcxp_get_reqbuf() to get the
+ *				internal req buffer.
+ * @param[in]	req_sgles	SG elements describing request buffer. Will be
+ *				copied in by BFA and hence can be freed on
+ *				return from this function.
+ * @param[in]	get_req_sga	function ptr to be called to get a request SG
+ *				Address (given the sge index).
+ * @param[in]	get_req_sglen	function ptr to be called to get a request SG
+ *				len (given the sge index).
+ * @param[in]	get_rsp_sga	function ptr to be called to get a response SG
+ *				Address (given the sge index).
+ * @param[in]	get_rsp_sglen	function ptr to be called to get a response SG
+ *				len (given the sge index).
+ *
+ * @return FCXP instance. NULL on failure.
+ */
+struct bfa_fcxp_s *
+bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
+	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
+	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+{
+	struct bfa_fcxp_s *fcxp = NULL;
+
+	bfa_assert(bfa != NULL);
+
+	fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
+	if (fcxp == NULL)
+		return NULL;
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
+			req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
+
+	return fcxp;
+}
+
+/**
+ * Get the internal request buffer pointer
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ *
+ * @return		pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+	void	*reqbuf;
+
+	bfa_assert(fcxp->use_ireqbuf == 1);
+	reqbuf = ((u8 *)mod->req_pld_list_kva) +
+		fcxp->fcxp_tag * mod->req_pld_sz;
+	return reqbuf;
+}
+
+u32
+bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+	return mod->req_pld_sz;
+}
+
+/**
+ * Get the internal response buffer pointer
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ *
+ * @return		pointer to the internal request buffer
+ */
+void *
+bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+	void	*rspbuf;
+
+	bfa_assert(fcxp->use_irspbuf == 1);
+
+	rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
+		fcxp->fcxp_tag * mod->rsp_pld_sz;
+	return rspbuf;
+}
+
+/**
+ *		Free the BFA FCXP
+ *
+ * @param[in]	fcxp			BFA fcxp pointer
+ *
+ * @return		void
+ */
+void
+bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
+{
+	struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
+
+	bfa_assert(fcxp != NULL);
+	bfa_trc(mod->bfa, fcxp->fcxp_tag);
+	bfa_fcxp_put(fcxp);
+}
+
+/**
+ * Send a FCXP request
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ * @param[in]	rport	BFA rport pointer. Could be left NULL for WKA rports
+ * @param[in]	vf_id	virtual Fabric ID
+ * @param[in]	lp_tag	lport tag
+ * @param[in]	cts	use Continous sequence
+ * @param[in]	cos	fc Class of Service
+ * @param[in]	reqlen	request length, does not include FCHS length
+ * @param[in]	fchs	fc Header Pointer. The header content will be copied
+ *			in by BFA.
+ *
+ * @param[in]	cbfn	call back function to be called on receiving
+ *								the response
+ * @param[in]	cbarg	arg for cbfn
+ * @param[in]	rsp_timeout
+ *			response timeout
+ *
+ * @return		bfa_status_t
+ */
+void
+bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
+	      u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
+	      u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
+	      void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
+{
+	struct bfa_s			*bfa  = fcxp->fcxp_mod->bfa;
+	struct bfa_fcxp_req_info_s	*reqi = &fcxp->req_info;
+	struct bfa_fcxp_rsp_info_s	*rspi = &fcxp->rsp_info;
+	struct bfi_fcxp_send_req_s	*send_req;
+
+	bfa_trc(bfa, fcxp->fcxp_tag);
+
+	/**
+	 * setup request/response info
+	 */
+	reqi->bfa_rport = rport;
+	reqi->vf_id = vf_id;
+	reqi->lp_tag = lp_tag;
+	reqi->class = cos;
+	rspi->rsp_timeout = rsp_timeout;
+	reqi->cts = cts;
+	reqi->fchs = *fchs;
+	reqi->req_tot_len = reqlen;
+	rspi->rsp_maxlen = rsp_maxlen;
+	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
+	fcxp->send_cbarg = cbarg;
+
+	/**
+	 * If no room in CPE queue, wait for space in request queue
+	 */
+	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
+	if (!send_req) {
+		bfa_trc(bfa, fcxp->fcxp_tag);
+		fcxp->reqq_waiting = BFA_TRUE;
+		bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
+		return;
+	}
+
+	bfa_fcxp_queue(fcxp, send_req);
+}
+
+/**
+ * Abort a BFA FCXP
+ *
+ * @param[in]	fcxp	BFA fcxp pointer
+ *
+ * @return		void
+ */
+bfa_status_t
+bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
+{
+	bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
+	bfa_assert(0);
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+	       bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
+	       void *caller, int nreq_sgles,
+	       int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
+	       bfa_fcxp_get_sglen_t req_sglen_cbfn,
+	       bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
+	       bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	bfa_assert(list_empty(&mod->fcxp_free_q));
+
+	wqe->alloc_cbfn = alloc_cbfn;
+	wqe->alloc_cbarg = alloc_cbarg;
+	wqe->caller = caller;
+	wqe->bfa = bfa;
+	wqe->nreq_sgles = nreq_sgles;
+	wqe->nrsp_sgles = nrsp_sgles;
+	wqe->req_sga_cbfn = req_sga_cbfn;
+	wqe->req_sglen_cbfn = req_sglen_cbfn;
+	wqe->rsp_sga_cbfn = rsp_sga_cbfn;
+	wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
+
+	list_add_tail(&wqe->qe, &mod->wait_q);
+}
+
+void
+bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
+	list_del(&wqe->qe);
+}
+
+void
+bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
+{
+	/**
+	 * If waiting for room in request queue, cancel reqq wait
+	 * and free fcxp.
+	 */
+	if (fcxp->reqq_waiting) {
+		fcxp->reqq_waiting = BFA_FALSE;
+		bfa_reqq_wcancel(&fcxp->reqq_wqe);
+		bfa_fcxp_free(fcxp);
+		return;
+	}
+
+	fcxp->send_cbfn = bfa_fcxp_null_comp;
+}
+
+
+
+/**
+ *  hal_fcxp_public BFA FCXP public functions
+ */
+
+void
+bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+	switch (msg->mhdr.msg_id) {
+	case BFI_FCXP_I2H_SEND_RSP:
+		hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
+		break;
+
+	default:
+		bfa_trc(bfa, msg->mhdr.msg_id);
+		bfa_assert(0);
+	}
+}
+
+u32
+bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
+{
+	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
+
+	return mod->rsp_pld_sz;
+}
+
+
+/**
+ *  BFA LPS state machine functions
+ */
+
+/**
+ * Init state -- no login
+ */
+static void
+bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_LOGIN:
+		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+			bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
+			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+		} else {
+			bfa_sm_set_state(lps, bfa_lps_sm_login);
+			bfa_lps_send_login(lps);
+		}
+
+		if (lps->fdisc)
+			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0, "FDISC Request");
+		else
+			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0, "FLOGI Request");
+		break;
+
+	case BFA_LPS_SM_LOGOUT:
+		bfa_lps_logout_comp(lps);
+		break;
+
+	case BFA_LPS_SM_DELETE:
+		bfa_lps_free(lps);
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+	case BFA_LPS_SM_OFFLINE:
+		break;
+
+	case BFA_LPS_SM_FWRSP:
+		/*
+		 * Could happen when fabric detects loopback and discards
+		 * the lps request. Fw will eventually sent out the timeout
+		 * Just ignore
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/**
+ * login is in progress -- awaiting response from firmware
+ */
+static void
+bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_FWRSP:
+		if (lps->status == BFA_STATUS_OK) {
+			bfa_sm_set_state(lps, bfa_lps_sm_online);
+			if (lps->fdisc)
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0, "FDISC Accept");
+			else
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
+		} else {
+			bfa_sm_set_state(lps, bfa_lps_sm_init);
+			if (lps->fdisc)
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0,
+					"FDISC Fail (RJT or timeout)");
+			else
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+					BFA_PL_EID_LOGIN, 0,
+					"FLOGI Fail (RJT or timeout)");
+		}
+		bfa_lps_login_comp(lps);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/**
+ * login pending - awaiting space in request queue
+ */
+static void
+bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_RESUME:
+		bfa_sm_set_state(lps, bfa_lps_sm_login);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+		/*
+		 * Login was not even sent out; so when getting out
+		 * of this state, it will appear like a login retry
+		 * after Clear virtual link
+		 */
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/**
+ * login complete
+ */
+static void
+bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_LOGOUT:
+		if (bfa_reqq_full(lps->bfa, lps->reqq)) {
+			bfa_sm_set_state(lps, bfa_lps_sm_logowait);
+			bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
+		} else {
+			bfa_sm_set_state(lps, bfa_lps_sm_logout);
+			bfa_lps_send_logout(lps);
+		}
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_LOGO, 0, "Logout");
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+
+		/* Let the vport module know about this event */
+		bfa_lps_cvl_event(lps);
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+	case BFA_LPS_SM_DELETE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/**
+ * logout in progress - awaiting firmware response
+ */
+static void
+bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_FWRSP:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_lps_logout_comp(lps);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+/**
+ * logout pending -- awaiting space in request queue
+ */
+static void
+bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
+{
+	bfa_trc(lps->bfa, lps->lp_tag);
+	bfa_trc(lps->bfa, event);
+
+	switch (event) {
+	case BFA_LPS_SM_RESUME:
+		bfa_sm_set_state(lps, bfa_lps_sm_logout);
+		bfa_lps_send_logout(lps);
+		break;
+
+	case BFA_LPS_SM_OFFLINE:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+		bfa_reqq_wcancel(&lps->wqe);
+		break;
+
+	default:
+		bfa_sm_fault(lps->bfa, event);
+	}
+}
+
+
+
+/**
+ *  lps_pvt BFA LPS private functions
+ */
+
+/**
+ * return memory requirement
+ */
+static void
+bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+	u32 *dm_len)
+{
+	if (cfg->drvcfg.min_cfg)
+		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
+	else
+		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
+}
+
+/**
+ * bfa module attach at initialization time
+ */
+static void
+bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+	struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+	int			i;
+
+	bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
+	mod->num_lps = BFA_LPS_MAX_LPORTS;
+	if (cfg->drvcfg.min_cfg)
+		mod->num_lps = BFA_LPS_MIN_LPORTS;
+	else
+		mod->num_lps = BFA_LPS_MAX_LPORTS;
+	mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
+
+	bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
+
+	INIT_LIST_HEAD(&mod->lps_free_q);
+	INIT_LIST_HEAD(&mod->lps_active_q);
+
+	for (i = 0; i < mod->num_lps; i++, lps++) {
+		lps->bfa	= bfa;
+		lps->lp_tag	= (u8) i;
+		lps->reqq	= BFA_REQQ_LPS;
+		bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
+		list_add_tail(&lps->qe, &mod->lps_free_q);
+	}
+}
+
+static void
+bfa_lps_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_lps_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_lps_stop(struct bfa_s *bfa)
+{
+}
+
+/**
+ * IOC in disabled state -- consider all lps offline
+ */
+static void
+bfa_lps_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+	struct list_head		*qe, *qen;
+
+	list_for_each_safe(qe, qen, &mod->lps_active_q) {
+		lps = (struct bfa_lps_s *) qe;
+		bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+	}
+}
+
+/**
+ * Firmware login response
+ */
+static void
+bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+
+	bfa_assert(rsp->lp_tag < mod->num_lps);
+	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+
+	lps->status = rsp->status;
+	switch (rsp->status) {
+	case BFA_STATUS_OK:
+		lps->fport	= rsp->f_port;
+		lps->npiv_en	= rsp->npiv_en;
+		lps->lp_pid	= rsp->lp_pid;
+		lps->pr_bbcred	= bfa_os_ntohs(rsp->bb_credit);
+		lps->pr_pwwn	= rsp->port_name;
+		lps->pr_nwwn	= rsp->node_name;
+		lps->auth_req	= rsp->auth_req;
+		lps->lp_mac	= rsp->lp_mac;
+		lps->brcd_switch = rsp->brcd_switch;
+		lps->fcf_mac	= rsp->fcf_mac;
+
+		break;
+
+	case BFA_STATUS_FABRIC_RJT:
+		lps->lsrjt_rsn = rsp->lsrjt_rsn;
+		lps->lsrjt_expl = rsp->lsrjt_expl;
+
+		break;
+
+	case BFA_STATUS_EPROTOCOL:
+		lps->ext_status = rsp->ext_status;
+
+		break;
+
+	default:
+		/* Nothing to do with other status */
+		break;
+	}
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+}
+
+/**
+ * Firmware logout response
+ */
+static void
+bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+
+	bfa_assert(rsp->lp_tag < mod->num_lps);
+	lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
+}
+
+/**
+ * Firmware received a Clear virtual link request (for FCoE)
+ */
+static void
+bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+
+	lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
+}
+
+/**
+ * Space is available in request queue, resume queueing request to firmware.
+ */
+static void
+bfa_lps_reqq_resume(void *lps_arg)
+{
+	struct bfa_lps_s	*lps = lps_arg;
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
+}
+
+/**
+ * lps is freed -- triggered by vport delete
+ */
+static void
+bfa_lps_free(struct bfa_lps_s *lps)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(lps->bfa);
+
+	lps->lp_pid = 0;
+	list_del(&lps->qe);
+	list_add_tail(&lps->qe, &mod->lps_free_q);
+}
+
+/**
+ * send login request to firmware
+ */
+static void
+bfa_lps_send_login(struct bfa_lps_s *lps)
+{
+	struct bfi_lps_login_req_s	*m;
+
+	m = bfa_reqq_next(lps->bfa, lps->reqq);
+	bfa_assert(m);
+
+	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
+		bfa_lpuid(lps->bfa));
+
+	m->lp_tag	= lps->lp_tag;
+	m->alpa		= lps->alpa;
+	m->pdu_size	= bfa_os_htons(lps->pdusz);
+	m->pwwn		= lps->pwwn;
+	m->nwwn		= lps->nwwn;
+	m->fdisc	= lps->fdisc;
+	m->auth_en	= lps->auth_en;
+
+	bfa_reqq_produce(lps->bfa, lps->reqq);
+}
+
+/**
+ * send logout request to firmware
+ */
+static void
+bfa_lps_send_logout(struct bfa_lps_s *lps)
+{
+	struct bfi_lps_logout_req_s *m;
+
+	m = bfa_reqq_next(lps->bfa, lps->reqq);
+	bfa_assert(m);
+
+	bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
+		bfa_lpuid(lps->bfa));
+
+	m->lp_tag    = lps->lp_tag;
+	m->port_name = lps->pwwn;
+	bfa_reqq_produce(lps->bfa, lps->reqq);
+}
+
+/**
+ * Indirect login completion handler for non-fcs
+ */
+static void
+bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
+{
+	struct bfa_lps_s *lps	= arg;
+
+	if (!complete)
+		return;
+
+	if (lps->fdisc)
+		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
+	else
+		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
+}
+
+/**
+ * Login completion handler -- direct call for fcs, queue for others
+ */
+static void
+bfa_lps_login_comp(struct bfa_lps_s *lps)
+{
+	if (!lps->bfa->fcs) {
+		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
+			lps);
+		return;
+	}
+
+	if (lps->fdisc)
+		bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
+	else
+		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
+}
+
+/**
+ * Indirect logout completion handler for non-fcs
+ */
+static void
+bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
+{
+	struct bfa_lps_s *lps	= arg;
+
+	if (!complete)
+		return;
+
+	if (lps->fdisc)
+		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+}
+
+/**
+ * Logout completion handler -- direct call for fcs, queue for others
+ */
+static void
+bfa_lps_logout_comp(struct bfa_lps_s *lps)
+{
+	if (!lps->bfa->fcs) {
+		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
+			lps);
+		return;
+	}
+	if (lps->fdisc)
+		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
+}
+
+/**
+ * Clear virtual link completion handler for non-fcs
+ */
+static void
+bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
+{
+	struct bfa_lps_s *lps	= arg;
+
+	if (!complete)
+		return;
+
+	/* Clear virtual link to base port will result in link down */
+	if (lps->fdisc)
+		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+/**
+ * Received Clear virtual link event --direct call for fcs,
+ * queue for others
+ */
+static void
+bfa_lps_cvl_event(struct bfa_lps_s *lps)
+{
+	if (!lps->bfa->fcs) {
+		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
+			lps);
+		return;
+	}
+
+	/* Clear virtual link to base port will result in link down */
+	if (lps->fdisc)
+		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+
+
+/**
+ *  lps_public BFA LPS public functions
+ */
+
+u32
+bfa_lps_get_max_vport(struct bfa_s *bfa)
+{
+	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
+		return BFA_LPS_MAX_VPORTS_SUPP_CT;
+	else
+		return BFA_LPS_MAX_VPORTS_SUPP_CB;
+}
+
+/**
+ * Allocate a lport srvice tag.
+ */
+struct bfa_lps_s  *
+bfa_lps_alloc(struct bfa_s *bfa)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps = NULL;
+
+	bfa_q_deq(&mod->lps_free_q, &lps);
+
+	if (lps == NULL)
+		return NULL;
+
+	list_add_tail(&lps->qe, &mod->lps_active_q);
+
+	bfa_sm_set_state(lps, bfa_lps_sm_init);
+	return lps;
+}
+
+/**
+ * Free lport service tag. This can be called anytime after an alloc.
+ * No need to wait for any pending login/logout completions.
+ */
+void
+bfa_lps_delete(struct bfa_lps_s *lps)
+{
+	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
+}
+
+/**
+ * Initiate a lport login.
+ */
+void
+bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
+	wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
+{
+	lps->uarg	= uarg;
+	lps->alpa	= alpa;
+	lps->pdusz	= pdusz;
+	lps->pwwn	= pwwn;
+	lps->nwwn	= nwwn;
+	lps->fdisc	= BFA_FALSE;
+	lps->auth_en	= auth_en;
+	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
+}
+
+/**
+ * Initiate a lport fdisc login.
+ */
+void
+bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
+	wwn_t nwwn)
+{
+	lps->uarg	= uarg;
+	lps->alpa	= 0;
+	lps->pdusz	= pdusz;
+	lps->pwwn	= pwwn;
+	lps->nwwn	= nwwn;
+	lps->fdisc	= BFA_TRUE;
+	lps->auth_en	= BFA_FALSE;
+	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
+}
+
+/**
+ * Initiate a lport logout (flogi).
+ */
+void
+bfa_lps_flogo(struct bfa_lps_s *lps)
+{
+	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
+}
+
+/**
+ * Initiate a lport FDSIC logout.
+ */
+void
+bfa_lps_fdisclogo(struct bfa_lps_s *lps)
+{
+	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
+}
+
+/**
+ * Discard a pending login request -- should be called only for
+ * link down handling.
+ */
+void
+bfa_lps_discard(struct bfa_lps_s *lps)
+{
+	bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
+}
+
+/**
+ * Return lport services tag
+ */
+u8
+bfa_lps_get_tag(struct bfa_lps_s *lps)
+{
+	return lps->lp_tag;
+}
+
+/**
+ * Return lport services tag given the pid
+ */
+u8
+bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s	*lps;
+	int			i;
+
+	for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
+		if (lps->lp_pid == pid)
+			return lps->lp_tag;
+	}
+
+	/* Return base port tag anyway */
+	return 0;
+}
+
+/**
+ * return if fabric login indicates support for NPIV
+ */
+bfa_boolean_t
+bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
+{
+	return lps->npiv_en;
+}
+
+/**
+ * Return TRUE if attached to F-Port, else return FALSE
+ */
+bfa_boolean_t
+bfa_lps_is_fport(struct bfa_lps_s *lps)
+{
+	return lps->fport;
+}
+
+/**
+ * Return TRUE if attached to a Brocade Fabric
+ */
+bfa_boolean_t
+bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
+{
+	return lps->brcd_switch;
+}
+/**
+ * return TRUE if authentication is required
+ */
+bfa_boolean_t
+bfa_lps_is_authreq(struct bfa_lps_s *lps)
+{
+	return lps->auth_req;
+}
+
+bfa_eproto_status_t
+bfa_lps_get_extstatus(struct bfa_lps_s *lps)
+{
+	return lps->ext_status;
+}
+
+/**
+ * return port id assigned to the lport
+ */
+u32
+bfa_lps_get_pid(struct bfa_lps_s *lps)
+{
+	return lps->lp_pid;
+}
+
+/**
+ * return port id assigned to the base lport
+ */
+u32
+bfa_lps_get_base_pid(struct bfa_s *bfa)
+{
+	struct bfa_lps_mod_s	*mod = BFA_LPS_MOD(bfa);
+
+	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
+}
+
+/**
+ * Return bb_credit assigned in FLOGI response
+ */
+u16
+bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
+{
+	return lps->pr_bbcred;
+}
+
+/**
+ * Return peer port name
+ */
+wwn_t
+bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
+{
+	return lps->pr_pwwn;
+}
+
+/**
+ * Return peer node name
+ */
+wwn_t
+bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
+{
+	return lps->pr_nwwn;
+}
+
+/**
+ * return reason code if login request is rejected
+ */
+u8
+bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
+{
+	return lps->lsrjt_rsn;
+}
+
+/**
+ * return explanation code if login request is rejected
+ */
+u8
+bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
+{
+	return lps->lsrjt_expl;
+}
+
+/**
+ * Return fpma/spma MAC for lport
+ */
+mac_t
+bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
+{
+	return lps->lp_mac;
+}
+
+/**
+ * LPS firmware message class handler.
+ */
+void
+bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	union bfi_lps_i2h_msg_u	msg;
+
+	bfa_trc(bfa, m->mhdr.msg_id);
+	msg.msg = m;
+
+	switch (m->mhdr.msg_id) {
+	case BFI_LPS_H2I_LOGIN_RSP:
+		bfa_lps_login_rsp(bfa, msg.login_rsp);
+		break;
+
+	case BFI_LPS_H2I_LOGOUT_RSP:
+		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
+		break;
+
+	case BFI_LPS_H2I_CVL_EVENT:
+		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
+		break;
+
+	default:
+		bfa_trc(bfa, m->mhdr.msg_id);
+		bfa_assert(0);
+	}
+}
+
+/**
+ * FC PORT state machine functions
+ */
+static void
+bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		/**
+		 * Start event after IOC is configured and BFA is started.
+		 */
+		if (bfa_fcport_send_enable(fcport)) {
+			bfa_trc(fcport->bfa, BFA_TRUE);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		} else {
+			bfa_trc(fcport->bfa, BFA_FALSE);
+			bfa_sm_set_state(fcport,
+					bfa_fcport_sm_enabling_qwait);
+		}
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/**
+		 * Port is persistently configured to be in enabled state. Do
+		 * not change state. Port enabling is done when START event is
+		 * received.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/**
+		 * If a port is persistently configured to be disabled, the
+		 * first event will a port disable request.
+		 */
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+				enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		bfa_fcport_send_enable(fcport);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/**
+		 * Already enable is in progress.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/**
+		 * Just send disable request to firmware when room becomes
+		 * available in request queue.
+		 */
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/**
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_FWRSP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+		bfa_fcport_update_linkinfo(fcport);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+
+		bfa_assert(fcport->event_cbfn);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/**
+		 * Already being enabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_disabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_LINKUP:
+		bfa_fcport_update_linkinfo(fcport);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+		bfa_assert(fcport->event_cbfn);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
+		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
+
+			bfa_trc(fcport->bfa,
+				pevent->link_state.vc_fcf.fcf.fipenabled);
+			bfa_trc(fcport->bfa,
+				pevent->link_state.vc_fcf.fcf.fipfailed);
+
+			if (pevent->link_state.vc_fcf.fcf.fipfailed)
+				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+					BFA_PL_EID_FIP_FCF_DISC, 0,
+					"FIP FCF Discovery Failed");
+			else
+				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+					BFA_PL_EID_FIP_FCF_DISC, 0,
+					"FIP FCF Discovered");
+		}
+
+		bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port online: WWN = %s\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_LINKDOWN:
+		/**
+		 * Possible to get link down event.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		/**
+		 * Already enabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_disabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+	enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_ENABLE:
+		/**
+		 * Already enabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_disabling_qwait);
+
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port offline: WWN = %s\n", pwwn_buf);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port disabled: WWN = %s\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_LINKDOWN:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+			BFA_LOG(KERN_INFO, bfad, log_level,
+				"Base port offline: WWN = %s\n", pwwn_buf);
+		else
+			BFA_LOG(KERN_ERR, bfad, log_level,
+				"Base port (WWN = %s) "
+				"lost fabric connectivity\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_fcport_reset_linkinfo(fcport);
+		wwn2str(pwwn_buf, fcport->pwwn);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+			BFA_LOG(KERN_INFO, bfad, log_level,
+				"Base port offline: WWN = %s\n", pwwn_buf);
+		else
+			BFA_LOG(KERN_ERR, bfad, log_level,
+				"Base port (WWN = %s) "
+				"lost fabric connectivity\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
+		wwn2str(pwwn_buf, fcport->pwwn);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+			BFA_LOG(KERN_INFO, bfad, log_level,
+				"Base port offline: WWN = %s\n", pwwn_buf);
+		else
+			BFA_LOG(KERN_ERR, bfad, log_level,
+				"Base port (WWN = %s) "
+				"lost fabric connectivity\n", pwwn_buf);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+				 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		bfa_fcport_send_disable(fcport);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/**
+		 * Already being disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/**
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
+				 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		bfa_fcport_send_disable(fcport);
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/**
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_FWRSP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/**
+		 * Already being disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port enabled: WWN = %s\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		/**
+		 * Possible to get link events when doing back-to-back
+		 * enable/disables.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event)
+{
+	char pwwn_buf[BFA_STRING_32];
+	struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		/**
+		 * Ignore start event for a port that is disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+				BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
+		wwn2str(pwwn_buf, fcport->pwwn);
+		BFA_LOG(KERN_INFO, bfad, log_level,
+			"Base port enabled: WWN = %s\n", pwwn_buf);
+		break;
+
+	case BFA_FCPORT_SM_DISABLE:
+		/**
+		 * Already disabled.
+		 */
+		break;
+
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		break;
+
+	default:
+		bfa_sm_fault(fcport->bfa, event);
+	}
+}
+
+static void
+bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+			 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+		break;
+
+	default:
+		/**
+		 * Ignore all other events.
+		 */
+		;
+	}
+}
+
+/**
+ * Port is enabled. IOC is down/failed.
+ */
+static void
+bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+			 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		else
+			bfa_sm_set_state(fcport,
+					 bfa_fcport_sm_enabling_qwait);
+		break;
+
+	default:
+		/**
+		 * Ignore all events.
+		 */
+		;
+	}
+}
+
+/**
+ * Port is disabled. IOC is down/failed.
+ */
+static void
+bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+			 enum bfa_fcport_sm_event event)
+{
+	bfa_trc(fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_SM_START:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		break;
+
+	case BFA_FCPORT_SM_ENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		break;
+
+	default:
+		/**
+		 * Ignore all events.
+		 */
+		;
+	}
+}
+
+/**
+ * Link state is down
+ */
+static void
+bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for down notification
+ */
+static void
+bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for down notification and there is a pending up
+ */
+static void
+bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is up
+ */
+static void
+bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for up notification
+ */
+static void
+bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for up notification and there is a pending down
+ */
+static void
+bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for up notification and there are pending down and up
+ */
+static void
+bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+			enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+
+
+/**
+ *  hal_port_private
+ */
+
+static void
+__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_ln_s *ln = cbarg;
+
+	if (complete)
+		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
+	else
+		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
+}
+
+/**
+ * Send SCN notification to upper layers.
+ * trunk - false if caller is fcport to ignore fcport event in trunked mode
+ */
+static void
+bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
+	bfa_boolean_t trunk)
+{
+	if (fcport->cfg.trunked && !trunk)
+		return;
+
+	switch (event) {
+	case BFA_PORT_LINKUP:
+		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
+		break;
+	case BFA_PORT_LINKDOWN:
+		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
+		break;
+	default:
+		bfa_assert(0);
+	}
+}
+
+static void
+bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
+{
+	struct bfa_fcport_s *fcport = ln->fcport;
+
+	if (fcport->bfa->fcs) {
+		fcport->event_cbfn(fcport->event_cbarg, event);
+		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
+	} else {
+		ln->ln_event = event;
+		bfa_cb_queue(fcport->bfa, &ln->ln_qe,
+			__bfa_cb_fcport_event, ln);
+	}
+}
+
+#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
+							BFA_CACHELINE_SZ))
+
+static void
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+		u32 *dm_len)
+{
+	*dm_len += FCPORT_STATS_DMA_SZ;
+}
+
+static void
+bfa_fcport_qresume(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = cbarg;
+
+	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
+}
+
+static void
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
+{
+	u8		*dm_kva;
+	u64	dm_pa;
+
+	dm_kva = bfa_meminfo_dma_virt(meminfo);
+	dm_pa  = bfa_meminfo_dma_phys(meminfo);
+
+	fcport->stats_kva = dm_kva;
+	fcport->stats_pa  = dm_pa;
+	fcport->stats	  = (union bfa_fcport_stats_u *) dm_kva;
+
+	dm_kva += FCPORT_STATS_DMA_SZ;
+	dm_pa  += FCPORT_STATS_DMA_SZ;
+
+	bfa_meminfo_dma_virt(meminfo) = dm_kva;
+	bfa_meminfo_dma_phys(meminfo) = dm_pa;
+}
+
+/**
+ * Memory initialization.
+ */
+static void
+bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
+	struct bfa_fcport_ln_s *ln = &fcport->ln;
+	struct bfa_timeval_s tv;
+
+	bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
+	fcport->bfa = bfa;
+	ln->fcport = fcport;
+
+	bfa_fcport_mem_claim(fcport, meminfo);
+
+	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
+	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+
+	/**
+	 * initialize time stamp for stats reset
+	 */
+	bfa_os_gettimeofday(&tv);
+	fcport->stats_reset_time = tv.tv_sec;
+
+	/**
+	 * initialize and set default configuration
+	 */
+	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
+	port_cfg->speed = BFA_PORT_SPEED_AUTO;
+	port_cfg->trunked = BFA_FALSE;
+	port_cfg->maxfrsize = 0;
+
+	port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
+
+	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
+}
+
+static void
+bfa_fcport_detach(struct bfa_s *bfa)
+{
+}
+
+/**
+ * Called when IOC is ready.
+ */
+static void
+bfa_fcport_start(struct bfa_s *bfa)
+{
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
+}
+
+/**
+ * Called before IOC is stopped.
+ */
+static void
+bfa_fcport_stop(struct bfa_s *bfa)
+{
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
+	bfa_trunk_iocdisable(bfa);
+}
+
+/**
+ * Called when IOC failure is detected.
+ */
+static void
+bfa_fcport_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
+	bfa_trunk_iocdisable(bfa);
+}
+
+static void
+bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
+{
+	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+
+	fcport->speed = pevent->link_state.speed;
+	fcport->topology = pevent->link_state.topology;
+
+	if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
+		fcport->myalpa = 0;
+
+	/* QoS Details */
+	bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
+	bfa_os_assign(fcport->qos_vc_attr,
+		pevent->link_state.vc_fcf.qos_vc_attr);
+
+	/**
+	 * update trunk state if applicable
+	 */
+	if (!fcport->cfg.trunked)
+		trunk->attr.state = BFA_TRUNK_DISABLED;
+
+	/* update FCoE specific */
+	fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
+
+	bfa_trc(fcport->bfa, fcport->speed);
+	bfa_trc(fcport->bfa, fcport->topology);
+}
+
+static void
+bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
+{
+	fcport->speed = BFA_PORT_SPEED_UNKNOWN;
+	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
+}
+
+/**
+ * Send port enable message to firmware.
+ */
+static bfa_boolean_t
+bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
+{
+	struct bfi_fcport_enable_req_s *m;
+
+	/**
+	 * Increment message tag before queue check, so that responses to old
+	 * requests are discarded.
+	 */
+	fcport->msgtag++;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+	if (!m) {
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+							&fcport->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
+			bfa_lpuid(fcport->bfa));
+	m->nwwn = fcport->nwwn;
+	m->pwwn = fcport->pwwn;
+	m->port_cfg = fcport->cfg;
+	m->msgtag = fcport->msgtag;
+	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
+	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
+	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
+	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+	return BFA_TRUE;
+}
+
+/**
+ * Send port disable message to firmware.
+ */
+static	bfa_boolean_t
+bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
+{
+	struct bfi_fcport_req_s *m;
+
+	/**
+	 * Increment message tag before queue check, so that responses to old
+	 * requests are discarded.
+	 */
+	fcport->msgtag++;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+	if (!m) {
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+							&fcport->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
+			bfa_lpuid(fcport->bfa));
+	m->msgtag = fcport->msgtag;
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+
+	return BFA_TRUE;
+}
+
+static void
+bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
+{
+	fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
+	fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
+
+	bfa_trc(fcport->bfa, fcport->pwwn);
+	bfa_trc(fcport->bfa, fcport->nwwn);
+}
+
+static void
+bfa_fcport_send_txcredit(void *port_cbarg)
+{
+
+	struct bfa_fcport_s *fcport = port_cbarg;
+	struct bfi_fcport_set_svc_params_req_s *m;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+	if (!m) {
+		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
+		return;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
+			bfa_lpuid(fcport->bfa));
+	m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+}
+
+static void
+bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
+	struct bfa_qos_stats_s *s)
+{
+	u32	*dip = (u32 *) d;
+	u32	*sip = (u32 *) s;
+	int		i;
+
+	/* Now swap the 32 bit fields */
+	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
+		dip[i] = bfa_os_ntohl(sip[i]);
+}
+
+static void
+bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
+	struct bfa_fcoe_stats_s *s)
+{
+	u32	*dip = (u32 *) d;
+	u32	*sip = (u32 *) s;
+	int		i;
+
+	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
+	     i = i + 2) {
+#ifdef __BIGENDIAN
+		dip[i] = bfa_os_ntohl(sip[i]);
+		dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
+#else
+		dip[i] = bfa_os_ntohl(sip[i + 1]);
+		dip[i + 1] = bfa_os_ntohl(sip[i]);
+#endif
+	}
+}
+
+static void
+__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_s *fcport = cbarg;
+
+	if (complete) {
+		if (fcport->stats_status == BFA_STATUS_OK) {
+			struct bfa_timeval_s tv;
+
+			/* Swap FC QoS or FCoE stats */
+			if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
+				bfa_fcport_qos_stats_swap(
+					&fcport->stats_ret->fcqos,
+					&fcport->stats->fcqos);
+			} else {
+				bfa_fcport_fcoe_stats_swap(
+					&fcport->stats_ret->fcoe,
+					&fcport->stats->fcoe);
+
+				bfa_os_gettimeofday(&tv);
+				fcport->stats_ret->fcoe.secs_reset =
+					tv.tv_sec - fcport->stats_reset_time;
+			}
+		}
+		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+	} else {
+		fcport->stats_busy = BFA_FALSE;
+		fcport->stats_status = BFA_STATUS_OK;
+	}
+}
+
+static void
+bfa_fcport_stats_get_timeout(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+	bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+	if (fcport->stats_qfull) {
+		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+		fcport->stats_qfull = BFA_FALSE;
+	}
+
+	fcport->stats_status = BFA_STATUS_ETIMER;
+	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
+		fcport);
+}
+
+static void
+bfa_fcport_send_stats_get(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfi_fcport_req_s *msg;
+
+	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+	if (!msg) {
+		fcport->stats_qfull = BFA_TRUE;
+		bfa_reqq_winit(&fcport->stats_reqq_wait,
+				bfa_fcport_send_stats_get, fcport);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+				&fcport->stats_reqq_wait);
+		return;
+	}
+	fcport->stats_qfull = BFA_FALSE;
+
+	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
+			bfa_lpuid(fcport->bfa));
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+}
+
+static void
+__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_s *fcport = cbarg;
+
+	if (complete) {
+		struct bfa_timeval_s tv;
+
+		/**
+		 * re-initialize time stamp for stats reset
+		 */
+		bfa_os_gettimeofday(&tv);
+		fcport->stats_reset_time = tv.tv_sec;
+
+		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+	} else {
+		fcport->stats_busy = BFA_FALSE;
+		fcport->stats_status = BFA_STATUS_OK;
+	}
+}
+
+static void
+bfa_fcport_stats_clr_timeout(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+	bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+	if (fcport->stats_qfull) {
+		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+		fcport->stats_qfull = BFA_FALSE;
+	}
+
+	fcport->stats_status = BFA_STATUS_ETIMER;
+	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+			__bfa_cb_fcport_stats_clr, fcport);
+}
+
+static void
+bfa_fcport_send_stats_clear(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfi_fcport_req_s *msg;
+
+	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+	if (!msg) {
+		fcport->stats_qfull = BFA_TRUE;
+		bfa_reqq_winit(&fcport->stats_reqq_wait,
+				bfa_fcport_send_stats_clear, fcport);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+						&fcport->stats_reqq_wait);
+		return;
+	}
+	fcport->stats_qfull = BFA_FALSE;
+
+	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
+			bfa_lpuid(fcport->bfa));
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+}
+
+/**
+ * Handle trunk SCN event from firmware.
+ */
+static void
+bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
+{
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+	struct bfi_fcport_trunk_link_s *tlink;
+	struct bfa_trunk_link_attr_s *lattr;
+	enum bfa_trunk_state state_prev;
+	int i;
+	int link_bm = 0;
+
+	bfa_trc(fcport->bfa, fcport->cfg.trunked);
+	bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
+		   scn->trunk_state == BFA_TRUNK_OFFLINE);
+
+	bfa_trc(fcport->bfa, trunk->attr.state);
+	bfa_trc(fcport->bfa, scn->trunk_state);
+	bfa_trc(fcport->bfa, scn->trunk_speed);
+
+	/**
+	 * Save off new state for trunk attribute query
+	 */
+	state_prev = trunk->attr.state;
+	if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
+		trunk->attr.state = scn->trunk_state;
+	trunk->attr.speed = scn->trunk_speed;
+	for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
+		lattr = &trunk->attr.link_attr[i];
+		tlink = &scn->tlink[i];
+
+		lattr->link_state = tlink->state;
+		lattr->trunk_wwn  = tlink->trunk_wwn;
+		lattr->fctl	  = tlink->fctl;
+		lattr->speed	  = tlink->speed;
+		lattr->deskew	  = bfa_os_ntohl(tlink->deskew);
+
+		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
+			fcport->speed	 = tlink->speed;
+			fcport->topology = BFA_PORT_TOPOLOGY_P2P;
+			link_bm |= 1 << i;
+		}
+
+		bfa_trc(fcport->bfa, lattr->link_state);
+		bfa_trc(fcport->bfa, lattr->trunk_wwn);
+		bfa_trc(fcport->bfa, lattr->fctl);
+		bfa_trc(fcport->bfa, lattr->speed);
+		bfa_trc(fcport->bfa, lattr->deskew);
+	}
+
+	switch (link_bm) {
+	case 3:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
+		break;
+	case 2:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
+		break;
+	case 1:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
+		break;
+	default:
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
+	}
+
+	/**
+	 * Notify upper layers if trunk state changed.
+	 */
+	if ((state_prev != trunk->attr.state) ||
+		(scn->trunk_state == BFA_TRUNK_OFFLINE)) {
+		bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
+			BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
+	}
+}
+
+static void
+bfa_trunk_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	int i = 0;
+
+	/**
+	 * In trunked mode, notify upper layers that link is down
+	 */
+	if (fcport->cfg.trunked) {
+		if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
+			bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
+
+		fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
+		fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
+		for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
+			fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
+			fcport->trunk.attr.link_attr[i].fctl =
+						BFA_TRUNK_LINK_FCTL_NORMAL;
+			fcport->trunk.attr.link_attr[i].link_state =
+						BFA_TRUNK_LINK_STATE_DN_LINKDN;
+			fcport->trunk.attr.link_attr[i].speed =
+						BFA_PORT_SPEED_UNKNOWN;
+			fcport->trunk.attr.link_attr[i].deskew = 0;
+		}
+	}
+}
+
+
+
+/**
+ *  hal_port_public
+ */
+
+/**
+ * Called to initialize port attributes
+ */
+void
+bfa_fcport_init(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	/**
+	 * Initialize port attributes from IOC hardware data.
+	 */
+	bfa_fcport_set_wwns(fcport);
+	if (fcport->cfg.maxfrsize == 0)
+		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
+	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
+	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
+
+	bfa_assert(fcport->cfg.maxfrsize);
+	bfa_assert(fcport->cfg.rx_bbcredit);
+	bfa_assert(fcport->speed_sup);
+}
+
+/**
+ * Firmware message handler.
+ */
+void
+bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	union bfi_fcport_i2h_msg_u i2hmsg;
+
+	i2hmsg.msg = msg;
+	fcport->event_arg.i2hmsg = i2hmsg;
+
+	bfa_trc(bfa, msg->mhdr.msg_id);
+	bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
+
+	switch (msg->mhdr.msg_id) {
+	case BFI_FCPORT_I2H_ENABLE_RSP:
+		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
+		break;
+
+	case BFI_FCPORT_I2H_DISABLE_RSP:
+		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
+		break;
+
+	case BFI_FCPORT_I2H_EVENT:
+		if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
+		else
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
+		break;
+
+	case BFI_FCPORT_I2H_TRUNK_SCN:
+		bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
+		break;
+
+	case BFI_FCPORT_I2H_STATS_GET_RSP:
+		/*
+		 * check for timer pop before processing the rsp
+		 */
+		if (fcport->stats_busy == BFA_FALSE ||
+		    fcport->stats_status == BFA_STATUS_ETIMER)
+			break;
+
+		bfa_timer_stop(&fcport->timer);
+		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
+		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+				__bfa_cb_fcport_stats_get, fcport);
+		break;
+
+	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
+		/*
+		 * check for timer pop before processing the rsp
+		 */
+		if (fcport->stats_busy == BFA_FALSE ||
+		    fcport->stats_status == BFA_STATUS_ETIMER)
+			break;
+
+		bfa_timer_stop(&fcport->timer);
+		fcport->stats_status = BFA_STATUS_OK;
+		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+				__bfa_cb_fcport_stats_clr, fcport);
+		break;
+
+	case BFI_FCPORT_I2H_ENABLE_AEN:
+		bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
+		break;
+
+	case BFI_FCPORT_I2H_DISABLE_AEN:
+		bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
+		break;
+
+	default:
+		bfa_assert(0);
+	break;
+	}
+}
+
+
+
+/**
+ *  hal_port_api
+ */
+
+/**
+ * Registered callback for port events.
+ */
+void
+bfa_fcport_event_register(struct bfa_s *bfa,
+				void (*cbfn) (void *cbarg,
+				enum bfa_port_linkstate event),
+				void *cbarg)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	fcport->event_cbfn = cbfn;
+	fcport->event_cbarg = cbarg;
+}
+
+bfa_status_t
+bfa_fcport_enable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (bfa_ioc_is_disabled(&bfa->ioc))
+		return BFA_STATUS_IOC_DISABLED;
+
+	if (fcport->diag_busy)
+		return BFA_STATUS_DIAG_BUSY;
+
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_disable(struct bfa_s *bfa)
+{
+
+	if (bfa_ioc_is_disabled(&bfa->ioc))
+		return BFA_STATUS_IOC_DISABLED;
+
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Configure port speed.
+ */
+bfa_status_t
+bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, speed);
+
+	if (fcport->cfg.trunked == BFA_TRUE)
+		return BFA_STATUS_TRUNK_ENABLED;
+	if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
+		bfa_trc(bfa, fcport->speed_sup);
+		return BFA_STATUS_UNSUPP_SPEED;
+	}
+
+	fcport->cfg.speed = speed;
+
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Get current speed.
+ */
+enum bfa_port_speed
+bfa_fcport_get_speed(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->speed;
+}
+
+/**
+ * Configure port topology.
+ */
+bfa_status_t
+bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, topology);
+	bfa_trc(bfa, fcport->cfg.topology);
+
+	switch (topology) {
+	case BFA_PORT_TOPOLOGY_P2P:
+	case BFA_PORT_TOPOLOGY_LOOP:
+	case BFA_PORT_TOPOLOGY_AUTO:
+		break;
+
+	default:
+		return BFA_STATUS_EINVAL;
+	}
+
+	fcport->cfg.topology = topology;
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Get current topology.
+ */
+enum bfa_port_topology
+bfa_fcport_get_topology(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->topology;
+}
+
+bfa_status_t
+bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, alpa);
+	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+	bfa_trc(bfa, fcport->cfg.hardalpa);
+
+	fcport->cfg.cfg_hardalpa = BFA_TRUE;
+	fcport->cfg.hardalpa = alpa;
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+	bfa_trc(bfa, fcport->cfg.hardalpa);
+
+	fcport->cfg.cfg_hardalpa = BFA_FALSE;
+	return BFA_STATUS_OK;
+}
+
+bfa_boolean_t
+bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	*alpa = fcport->cfg.hardalpa;
+	return fcport->cfg.cfg_hardalpa;
+}
+
+u8
+bfa_fcport_get_myalpa(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->myalpa;
+}
+
+bfa_status_t
+bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, maxfrsize);
+	bfa_trc(bfa, fcport->cfg.maxfrsize);
+
+	/* with in range */
+	if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
+		return BFA_STATUS_INVLD_DFSZ;
+
+	/* power of 2, if not the max frame size of 2112 */
+	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
+		return BFA_STATUS_INVLD_DFSZ;
+
+	fcport->cfg.maxfrsize = maxfrsize;
+	return BFA_STATUS_OK;
+}
+
+u16
+bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.maxfrsize;
+}
+
+u8
+bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.rx_bbcredit;
+}
+
+void
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
+	bfa_fcport_send_txcredit(fcport);
+}
+
+/**
+ * Get port attributes.
+ */
+
+wwn_t
+bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	if (node)
+		return fcport->nwwn;
+	else
+		return fcport->pwwn;
+}
+
+void
+bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
+
+	attr->nwwn = fcport->nwwn;
+	attr->pwwn = fcport->pwwn;
+
+	attr->factorypwwn =  bfa_ioc_get_mfg_pwwn(&bfa->ioc);
+	attr->factorynwwn =  bfa_ioc_get_mfg_nwwn(&bfa->ioc);
+
+	bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
+		sizeof(struct bfa_port_cfg_s));
+	/* speed attributes */
+	attr->pport_cfg.speed = fcport->cfg.speed;
+	attr->speed_supported = fcport->speed_sup;
+	attr->speed = fcport->speed;
+	attr->cos_supported = FC_CLASS_3;
+
+	/* topology attributes */
+	attr->pport_cfg.topology = fcport->cfg.topology;
+	attr->topology = fcport->topology;
+	attr->pport_cfg.trunked = fcport->cfg.trunked;
+
+	/* beacon attributes */
+	attr->beacon = fcport->beacon;
+	attr->link_e2e_beacon = fcport->link_e2e_beacon;
+	attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
+	attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
+
+	attr->pport_cfg.path_tov  = bfa_fcpim_path_tov_get(bfa);
+	attr->pport_cfg.q_depth  = bfa_fcpim_qdepth_get(bfa);
+	attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
+	if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
+		attr->port_state = BFA_PORT_ST_IOCDIS;
+	else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
+		attr->port_state = BFA_PORT_ST_FWMISMATCH;
+
+	/* FCoE vlan */
+	attr->fcoe_vlan = fcport->fcoe_vlan;
+}
+
+#define BFA_FCPORT_STATS_TOV	1000
+
+/**
+ * Fetch port statistics (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+	bfa_cb_port_t cbfn, void *cbarg)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (fcport->stats_busy) {
+		bfa_trc(bfa, fcport->stats_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fcport->stats_busy  = BFA_TRUE;
+	fcport->stats_ret   = stats;
+	fcport->stats_cbfn  = cbfn;
+	fcport->stats_cbarg = cbarg;
+
+	bfa_fcport_send_stats_get(fcport);
+
+	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
+			fcport, BFA_FCPORT_STATS_TOV);
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Reset port statistics (FCQoS or FCoE).
+ */
+bfa_status_t
+bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	if (fcport->stats_busy) {
+		bfa_trc(bfa, fcport->stats_busy);
+		return BFA_STATUS_DEVBUSY;
+	}
+
+	fcport->stats_busy  = BFA_TRUE;
+	fcport->stats_cbfn  = cbfn;
+	fcport->stats_cbarg = cbarg;
+
+	bfa_fcport_send_stats_clear(fcport);
+
+	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
+			fcport, BFA_FCPORT_STATS_TOV);
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Fetch FCQoS port statistics
+ */
+bfa_status_t
+bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+	bfa_cb_port_t cbfn, void *cbarg)
+{
+	/* Meaningful only for FC mode */
+	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
+}
+
+/**
+ * Reset FCoE port statistics
+ */
+bfa_status_t
+bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
+{
+	/* Meaningful only for FC mode */
+	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
+}
+
+/**
+ * Fetch FCQoS port statistics
+ */
+bfa_status_t
+bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+	bfa_cb_port_t cbfn, void *cbarg)
+{
+	/* Meaningful only for FCoE mode */
+	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
+}
+
+/**
+ * Reset FCoE port statistics
+ */
+bfa_status_t
+bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
+{
+	/* Meaningful only for FCoE mode */
+	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
+}
+
+void
+bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	qos_attr->state = fcport->qos_attr.state;
+	qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
+}
+
+void
+bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
+	struct bfa_qos_vc_attr_s *qos_vc_attr)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
+	u32 i = 0;
+
+	qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
+	qos_vc_attr->shared_credit  = bfa_os_ntohs(bfa_vc_attr->shared_credit);
+	qos_vc_attr->elp_opmode_flags  =
+			bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
+
+	/* Individual VC info */
+	while (i < qos_vc_attr->total_vc_count) {
+		qos_vc_attr->vc_info[i].vc_credit	=
+				bfa_vc_attr->vc_info[i].vc_credit;
+		qos_vc_attr->vc_info[i].borrow_credit	=
+				bfa_vc_attr->vc_info[i].borrow_credit;
+		qos_vc_attr->vc_info[i].priority	=
+				bfa_vc_attr->vc_info[i].priority;
+		++i;
+	}
+}
+
+/**
+ * Fetch port attributes.
+ */
+bfa_boolean_t
+bfa_fcport_is_disabled(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
+		BFA_PORT_ST_DISABLED;
+
+}
+
+bfa_boolean_t
+bfa_fcport_is_ratelim(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
+
+}
+
+void
+bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
+
+	bfa_trc(bfa, on_off);
+	bfa_trc(bfa, fcport->cfg.qos_enabled);
+
+	bfa_trc(bfa, ioc_type);
+
+	if (ioc_type == BFA_IOC_TYPE_FC) {
+		fcport->cfg.qos_enabled = on_off;
+		/**
+		 * Notify fcpim of the change in QoS state
+		 */
+		bfa_fcpim_update_ioredirect(bfa);
+	}
+}
+
+void
+bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, on_off);
+	bfa_trc(bfa, fcport->cfg.ratelimit);
+
+	fcport->cfg.ratelimit = on_off;
+	if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
+		fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
+}
+
+/**
+ * Configure default minimum ratelim speed
+ */
+bfa_status_t
+bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, speed);
+
+	/* Auto and speeds greater than the supported speed, are invalid */
+	if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
+		bfa_trc(bfa, fcport->speed_sup);
+		return BFA_STATUS_UNSUPP_SPEED;
+	}
+
+	fcport->cfg.trl_def_speed = speed;
+
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Get default minimum ratelim speed
+ */
+enum bfa_port_speed
+bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, fcport->cfg.trl_def_speed);
+	return fcport->cfg.trl_def_speed;
+
+}
+void
+bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, status);
+	bfa_trc(bfa, fcport->diag_busy);
+
+	fcport->diag_busy = status;
+}
+
+void
+bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+	bfa_boolean_t link_e2e_beacon)
+{
+	struct bfa_s *bfa = dev;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	bfa_trc(bfa, beacon);
+	bfa_trc(bfa, link_e2e_beacon);
+	bfa_trc(bfa, fcport->beacon);
+	bfa_trc(bfa, fcport->link_e2e_beacon);
+
+	fcport->beacon = beacon;
+	fcport->link_e2e_beacon = link_e2e_beacon;
+}
+
+bfa_boolean_t
+bfa_fcport_is_linkup(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return	(!fcport->cfg.trunked &&
+		 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
+		(fcport->cfg.trunked &&
+		 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
+}
+
+bfa_boolean_t
+bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+
+	return fcport->cfg.qos_enabled;
+}
+
+bfa_status_t
+bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
+
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+
+	bfa_trc(bfa, fcport->cfg.trunked);
+	bfa_trc(bfa, trunk->attr.state);
+	*attr = trunk->attr;
+	attr->port_id = bfa_lps_get_base_pid(bfa);
+
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_trunk_enable_cfg(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+
+	bfa_trc(bfa, 1);
+	trunk->attr.state = BFA_TRUNK_OFFLINE;
+	fcport->cfg.trunked = BFA_TRUE;
+}
+
+bfa_status_t
+bfa_trunk_enable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+
+	bfa_trc(bfa, 1);
+
+	trunk->attr.state   = BFA_TRUNK_OFFLINE;
+	bfa_fcport_disable(bfa);
+	fcport->cfg.trunked = BFA_TRUE;
+	bfa_fcport_enable(bfa);
+
+	return BFA_STATUS_OK;
+}
+
+bfa_status_t
+bfa_trunk_disable(struct bfa_s *bfa)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
+
+	bfa_trc(bfa, 0);
+	trunk->attr.state   = BFA_TRUNK_DISABLED;
+	bfa_fcport_disable(bfa);
+	fcport->cfg.trunked = BFA_FALSE;
+	bfa_fcport_enable(bfa);
+	return BFA_STATUS_OK;
+}
+
+
+/**
+ * Rport State machine functions
+ */
+/**
+ * Beginning state, only online event expected.
+ */
+static void
+bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_CREATE:
+		bfa_stats(rp, sm_un_cr);
+		bfa_sm_set_state(rp, bfa_rport_sm_created);
+		break;
+
+	default:
+		bfa_stats(rp, sm_un_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+static void
+bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_ONLINE:
+		bfa_stats(rp, sm_cr_on);
+		if (bfa_rport_send_fwcreate(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_cr_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_cr_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	default:
+		bfa_stats(rp, sm_cr_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Waiting for rport create response from firmware.
+ */
+static void
+bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_fwc_rsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_online);
+		bfa_rport_online_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwc_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
+		break;
+
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_fwc_off);
+		bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwc_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwc_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Request queue is full, awaiting queue resume to send create request.
+ */
+static void
+bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_QRESUME:
+		bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		bfa_rport_send_fwcreate(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwc_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_fwc_off);
+		bfa_sm_set_state(rp, bfa_rport_sm_offline);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwc_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwc_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Online state - normal parking state.
+ */
+static void
+bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	struct bfi_rport_qos_scn_s *qos_scn;
+
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_on_off);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_on_del);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_on_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	case BFA_RPORT_SM_SET_SPEED:
+		bfa_rport_send_fwspeed(rp);
+		break;
+
+	case BFA_RPORT_SM_QOS_SCN:
+		qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
+		rp->qos_attr = qos_scn->new_qos_attr;
+		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
+		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
+		bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
+		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
+
+		qos_scn->old_qos_attr.qos_flow_id  =
+			bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
+		qos_scn->new_qos_attr.qos_flow_id  =
+			bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
+
+		if (qos_scn->old_qos_attr.qos_flow_id !=
+			qos_scn->new_qos_attr.qos_flow_id)
+			bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
+						    qos_scn->old_qos_attr,
+						    qos_scn->new_qos_attr);
+		if (qos_scn->old_qos_attr.qos_priority !=
+			qos_scn->new_qos_attr.qos_priority)
+			bfa_cb_rport_qos_scn_prio(rp->rport_drv,
+						  qos_scn->old_qos_attr,
+						  qos_scn->new_qos_attr);
+		break;
+
+	default:
+		bfa_stats(rp, sm_on_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Firmware rport is being deleted - awaiting f/w response.
+ */
+static void
+bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_fwd_rsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_offline);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwd_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwd_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwd_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+static void
+bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_QRESUME:
+		bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+		bfa_rport_send_fwdelete(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_fwd_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_fwd_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_fwd_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Offline state.
+ */
+static void
+bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_off_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_ONLINE:
+		bfa_stats(rp, sm_off_on);
+		if (bfa_rport_send_fwcreate(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_off_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	default:
+		bfa_stats(rp, sm_off_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Rport is deleted, waiting for firmware response to delete.
+ */
+static void
+bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_del_fwrsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_del_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	default:
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+static void
+bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_QRESUME:
+		bfa_stats(rp, sm_del_fwrsp);
+		bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		bfa_rport_send_fwdelete(rp);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_del_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_reqq_wcancel(&rp->reqq_wait);
+		bfa_rport_free(rp);
+		break;
+
+	default:
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Waiting for rport create response from firmware. A delete is pending.
+ */
+static void
+bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
+				enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_delp_fwrsp);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_delp_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	default:
+		bfa_stats(rp, sm_delp_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * Waiting for rport create response from firmware. Rport offline is pending.
+ */
+static void
+bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
+				 enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_FWRSP:
+		bfa_stats(rp, sm_offp_fwrsp);
+		if (bfa_rport_send_fwdelete(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_offp_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		bfa_stats(rp, sm_offp_hwf);
+		bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
+		break;
+
+	default:
+		bfa_stats(rp, sm_offp_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+/**
+ * IOC h/w failed.
+ */
+static void
+bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
+{
+	bfa_trc(rp->bfa, rp->rport_tag);
+	bfa_trc(rp->bfa, event);
+
+	switch (event) {
+	case BFA_RPORT_SM_OFFLINE:
+		bfa_stats(rp, sm_iocd_off);
+		bfa_rport_offline_cb(rp);
+		break;
+
+	case BFA_RPORT_SM_DELETE:
+		bfa_stats(rp, sm_iocd_del);
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+		bfa_rport_free(rp);
+		break;
+
+	case BFA_RPORT_SM_ONLINE:
+		bfa_stats(rp, sm_iocd_on);
+		if (bfa_rport_send_fwcreate(rp))
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
+		else
+			bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
+		break;
+
+	case BFA_RPORT_SM_HWFAIL:
+		break;
+
+	default:
+		bfa_stats(rp, sm_iocd_unexp);
+		bfa_sm_fault(rp->bfa, event);
+	}
+}
+
+
+
+/**
+ *  bfa_rport_private BFA rport private functions
+ */
+
+static void
+__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_rport_s *rp = cbarg;
+
+	if (complete)
+		bfa_cb_rport_online(rp->rport_drv);
+}
+
+static void
+__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_rport_s *rp = cbarg;
+
+	if (complete)
+		bfa_cb_rport_offline(rp->rport_drv);
+}
+
+static void
+bfa_rport_qresume(void *cbarg)
+{
+	struct bfa_rport_s	*rp = cbarg;
+
+	bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
+}
+
+static void
+bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+		u32 *dm_len)
+{
+	if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
+		cfg->fwcfg.num_rports = BFA_RPORT_MIN;
+
+	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
+}
+
+static void
+bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		     struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+	struct bfa_rport_s *rp;
+	u16 i;
+
+	INIT_LIST_HEAD(&mod->rp_free_q);
+	INIT_LIST_HEAD(&mod->rp_active_q);
+
+	rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
+	mod->rps_list = rp;
+	mod->num_rports = cfg->fwcfg.num_rports;
+
+	bfa_assert(mod->num_rports &&
+		   !(mod->num_rports & (mod->num_rports - 1)));
+
+	for (i = 0; i < mod->num_rports; i++, rp++) {
+		bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
+		rp->bfa = bfa;
+		rp->rport_tag = i;
+		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
+
+		/**
+		 *  - is unused
+		 */
+		if (i)
+			list_add_tail(&rp->qe, &mod->rp_free_q);
+
+		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
+	}
+
+	/**
+	 * consume memory
+	 */
+	bfa_meminfo_kva(meminfo) = (u8 *) rp;
+}
+
+static void
+bfa_rport_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_rport_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_rport_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_rport_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
+	struct bfa_rport_s *rport;
+	struct list_head *qe, *qen;
+
+	list_for_each_safe(qe, qen, &mod->rp_active_q) {
+		rport = (struct bfa_rport_s *) qe;
+		bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
+	}
+}
+
+static struct bfa_rport_s *
+bfa_rport_alloc(struct bfa_rport_mod_s *mod)
+{
+	struct bfa_rport_s *rport;
+
+	bfa_q_deq(&mod->rp_free_q, &rport);
+	if (rport)
+		list_add_tail(&rport->qe, &mod->rp_active_q);
+
+	return rport;
+}
+
+static void
+bfa_rport_free(struct bfa_rport_s *rport)
+{
+	struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
+
+	bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
+	list_del(&rport->qe);
+	list_add_tail(&rport->qe, &mod->rp_free_q);
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
+{
+	struct bfi_rport_create_req_s *m;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+	if (!m) {
+		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
+			bfa_lpuid(rp->bfa));
+	m->bfa_handle = rp->rport_tag;
+	m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
+	m->pid = rp->rport_info.pid;
+	m->lp_tag = rp->rport_info.lp_tag;
+	m->local_pid = rp->rport_info.local_pid;
+	m->fc_class = rp->rport_info.fc_class;
+	m->vf_en = rp->rport_info.vf_en;
+	m->vf_id = rp->rport_info.vf_id;
+	m->cisc = rp->rport_info.cisc;
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
+{
+	struct bfi_rport_delete_req_s *m;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+	if (!m) {
+		bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
+			bfa_lpuid(rp->bfa));
+	m->fw_handle = rp->fw_handle;
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+	return BFA_TRUE;
+}
+
+static bfa_boolean_t
+bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
+{
+	struct bfa_rport_speed_req_s *m;
+
+	/**
+	 * check for room in queue to send request now
+	 */
+	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
+	if (!m) {
+		bfa_trc(rp->bfa, rp->rport_info.speed);
+		return BFA_FALSE;
+	}
+
+	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
+			bfa_lpuid(rp->bfa));
+	m->fw_handle = rp->fw_handle;
+	m->speed = (u8)rp->rport_info.speed;
+
+	/**
+	 * queue I/O message to firmware
+	 */
+	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
+	return BFA_TRUE;
+}
+
+
+
+/**
+ *  bfa_rport_public
+ */
+
+/**
+ * Rport interrupt processing.
+ */
+void
+bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
+{
+	union bfi_rport_i2h_msg_u msg;
+	struct bfa_rport_s *rp;
+
+	bfa_trc(bfa, m->mhdr.msg_id);
+
+	msg.msg = m;
+
+	switch (m->mhdr.msg_id) {
+	case BFI_RPORT_I2H_CREATE_RSP:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
+		rp->fw_handle = msg.create_rsp->fw_handle;
+		rp->qos_attr = msg.create_rsp->qos_attr;
+		bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
+		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
+		break;
+
+	case BFI_RPORT_I2H_DELETE_RSP:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
+		bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
+		bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
+		break;
+
+	case BFI_RPORT_I2H_QOS_SCN:
+		rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
+		rp->event_arg.fw_msg = msg.qos_scn_evt;
+		bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
+		break;
+
+	default:
+		bfa_trc(bfa, m->mhdr.msg_id);
+		bfa_assert(0);
+	}
+}
+
+
+
+/**
+ *  bfa_rport_api
+ */
+
+struct bfa_rport_s *
+bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
+{
+	struct bfa_rport_s *rp;
+
+	rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
+
+	if (rp == NULL)
+		return NULL;
+
+	rp->bfa = bfa;
+	rp->rport_drv = rport_drv;
+	bfa_rport_clear_stats(rp);
+
+	bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
+	bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
+
+	return rp;
+}
+
+void
+bfa_rport_delete(struct bfa_rport_s *rport)
+{
+	bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
+}
+
+void
+bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
+{
+	bfa_assert(rport_info->max_frmsz != 0);
+
+	/**
+	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
+	 * responses. Default to minimum size.
+	 */
+	if (rport_info->max_frmsz == 0) {
+		bfa_trc(rport->bfa, rport->rport_tag);
+		rport_info->max_frmsz = FC_MIN_PDUSZ;
+	}
+
+	bfa_os_assign(rport->rport_info, *rport_info);
+	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
+}
+
+void
+bfa_rport_offline(struct bfa_rport_s *rport)
+{
+	bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
+}
+
+void
+bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
+{
+	bfa_assert(speed != 0);
+	bfa_assert(speed != BFA_PORT_SPEED_AUTO);
+
+	rport->rport_info.speed = speed;
+	bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
+}
+
+void
+bfa_rport_get_stats(struct bfa_rport_s *rport,
+	struct bfa_rport_hal_stats_s *stats)
+{
+	*stats = rport->stats;
+}
+
+void
+bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
+					struct bfa_rport_qos_attr_s *qos_attr)
+{
+	qos_attr->qos_priority  = rport->qos_attr.qos_priority;
+	qos_attr->qos_flow_id  = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
+
+}
+
+void
+bfa_rport_clear_stats(struct bfa_rport_s *rport)
+{
+	bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
+}
+
+
+/**
+ * SGPG related functions
+ */
+
+/**
+ * Compute and return memory needed by FCP(im) module.
+ */
+static void
+bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
+		u32 *dm_len)
+{
+	if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
+		cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
+
+	*km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
+	*dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
+}
+
+
+static void
+bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		    struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+	int i;
+	struct bfa_sgpg_s *hsgpg;
+	struct bfi_sgpg_s *sgpg;
+	u64 align_len;
+
+	union {
+		u64 pa;
+		union bfi_addr_u addr;
+	} sgpg_pa, sgpg_pa_tmp;
+
+	INIT_LIST_HEAD(&mod->sgpg_q);
+	INIT_LIST_HEAD(&mod->sgpg_wait_q);
+
+	bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
+
+	mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
+	mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
+	align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
+	mod->sgpg_arr_pa += align_len;
+	mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
+						align_len);
+	mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
+						align_len);
+
+	hsgpg = mod->hsgpg_arr;
+	sgpg = mod->sgpg_arr;
+	sgpg_pa.pa = mod->sgpg_arr_pa;
+	mod->free_sgpgs = mod->num_sgpgs;
+
+	bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
+
+	for (i = 0; i < mod->num_sgpgs; i++) {
+		bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
+		bfa_os_memset(sgpg, 0, sizeof(*sgpg));
+
+		hsgpg->sgpg = sgpg;
+		sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
+		hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
+		list_add_tail(&hsgpg->qe, &mod->sgpg_q);
+
+		hsgpg++;
+		sgpg++;
+		sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
+	}
+
+	bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
+	bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
+	bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
+}
+
+static void
+bfa_sgpg_detach(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_sgpg_start(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_sgpg_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_sgpg_iocdisable(struct bfa_s *bfa)
+{
+}
+
+
+
+/**
+ *  hal_sgpg_public BFA SGPG public functions
+ */
+
+bfa_status_t
+bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+	struct bfa_sgpg_s *hsgpg;
+	int i;
+
+	bfa_trc_fp(bfa, nsgpgs);
+
+	if (mod->free_sgpgs < nsgpgs)
+		return BFA_STATUS_ENOMEM;
+
+	for (i = 0; i < nsgpgs; i++) {
+		bfa_q_deq(&mod->sgpg_q, &hsgpg);
+		bfa_assert(hsgpg);
+		list_add_tail(&hsgpg->qe, sgpg_q);
+	}
+
+	mod->free_sgpgs -= nsgpgs;
+	return BFA_STATUS_OK;
+}
+
+void
+bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+	struct bfa_sgpg_wqe_s *wqe;
+
+	bfa_trc_fp(bfa, nsgpg);
+
+	mod->free_sgpgs += nsgpg;
+	bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
+
+	list_splice_tail_init(sgpg_q, &mod->sgpg_q);
+
+	if (list_empty(&mod->sgpg_wait_q))
+		return;
+
+	/**
+	 * satisfy as many waiting requests as possible
+	 */
+	do {
+		wqe = bfa_q_first(&mod->sgpg_wait_q);
+		if (mod->free_sgpgs < wqe->nsgpg)
+			nsgpg = mod->free_sgpgs;
+		else
+			nsgpg = wqe->nsgpg;
+		bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
+		wqe->nsgpg -= nsgpg;
+		if (wqe->nsgpg == 0) {
+			list_del(&wqe->qe);
+			wqe->cbfn(wqe->cbarg);
+		}
+	} while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
+}
+
+void
+bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+
+	bfa_assert(nsgpg > 0);
+	bfa_assert(nsgpg > mod->free_sgpgs);
+
+	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
+
+	/**
+	 * allocate any left to this one first
+	 */
+	if (mod->free_sgpgs) {
+		/**
+		 * no one else is waiting for SGPG
+		 */
+		bfa_assert(list_empty(&mod->sgpg_wait_q));
+		list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
+		wqe->nsgpg -= mod->free_sgpgs;
+		mod->free_sgpgs = 0;
+	}
+
+	list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
+}
+
+void
+bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
+{
+	struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
+
+	bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
+	list_del(&wqe->qe);
+
+	if (wqe->nsgpg_total != wqe->nsgpg)
+		bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
+				   wqe->nsgpg_total - wqe->nsgpg);
+}
+
+void
+bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
+		   void *cbarg)
+{
+	INIT_LIST_HEAD(&wqe->sgpg_q);
+	wqe->cbfn = cbfn;
+	wqe->cbarg = cbarg;
+}
+
+/**
+ *  UF related functions
+ */
+/*
+ *****************************************************************************
+ * Internal functions
+ *****************************************************************************
+ */
+static void
+__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_uf_s   *uf = cbarg;
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
+
+	if (complete)
+		ufm->ufrecv(ufm->cbarg, uf);
+}
+
+static void
+claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+{
+	u32 uf_pb_tot_sz;
+
+	ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
+	ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
+	uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
+							BFA_DMA_ALIGN_SZ);
+
+	bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
+	bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
+
+	bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
+}
+
+static void
+claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+{
+	struct bfi_uf_buf_post_s *uf_bp_msg;
+	struct bfi_sge_s      *sge;
+	union bfi_addr_u      sga_zero = { {0} };
+	u16 i;
+	u16 buf_len;
+
+	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
+	uf_bp_msg = ufm->uf_buf_posts;
+
+	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
+	     i++, uf_bp_msg++) {
+		bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
+
+		uf_bp_msg->buf_tag = i;
+		buf_len = sizeof(struct bfa_uf_buf_s);
+		uf_bp_msg->buf_len = bfa_os_htons(buf_len);
+		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
+			    bfa_lpuid(ufm->bfa));
+
+		sge = uf_bp_msg->sge;
+		sge[0].sg_len = buf_len;
+		sge[0].flags = BFI_SGE_DATA_LAST;
+		bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
+		bfa_sge_to_be(sge);
+
+		sge[1].sg_len = buf_len;
+		sge[1].flags = BFI_SGE_PGDLEN;
+		sge[1].sga = sga_zero;
+		bfa_sge_to_be(&sge[1]);
+	}
+
+	/**
+	 * advance pointer beyond consumed memory
+	 */
+	bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
+}
+
+static void
+claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+{
+	u16 i;
+	struct bfa_uf_s   *uf;
+
+	/*
+	 * Claim block of memory for UF list
+	 */
+	ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
+
+	/*
+	 * Initialize UFs and queue it in UF free queue
+	 */
+	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
+		bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
+		uf->bfa = ufm->bfa;
+		uf->uf_tag = i;
+		uf->pb_len = sizeof(struct bfa_uf_buf_s);
+		uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
+		uf->buf_pa = ufm_pbs_pa(ufm, i);
+		list_add_tail(&uf->qe, &ufm->uf_free_q);
+	}
+
+	/**
+	 * advance memory pointer
+	 */
+	bfa_meminfo_kva(mi) = (u8 *) uf;
+}
+
+static void
+uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
+{
+	claim_uf_pbs(ufm, mi);
+	claim_ufs(ufm, mi);
+	claim_uf_post_msgs(ufm, mi);
+}
+
+static void
+bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
+{
+	u32 num_ufs = cfg->fwcfg.num_uf_bufs;
+
+	/*
+	 * dma-able memory for UF posted bufs
+	 */
+	*dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
+							BFA_DMA_ALIGN_SZ);
+
+	/*
+	 * kernel Virtual memory for UFs and UF buf post msg copies
+	 */
+	*ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
+	*ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
+}
+
+static void
+bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+		  struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+
+	bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
+	ufm->bfa = bfa;
+	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
+	INIT_LIST_HEAD(&ufm->uf_free_q);
+	INIT_LIST_HEAD(&ufm->uf_posted_q);
+
+	uf_mem_claim(ufm, meminfo);
+}
+
+static void
+bfa_uf_detach(struct bfa_s *bfa)
+{
+}
+
+static struct bfa_uf_s *
+bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
+{
+	struct bfa_uf_s   *uf;
+
+	bfa_q_deq(&uf_mod->uf_free_q, &uf);
+	return uf;
+}
+
+static void
+bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
+{
+	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
+}
+
+static bfa_status_t
+bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
+{
+	struct bfi_uf_buf_post_s *uf_post_msg;
+
+	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
+	if (!uf_post_msg)
+		return BFA_STATUS_FAILED;
+
+	bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
+		      sizeof(struct bfi_uf_buf_post_s));
+	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
+
+	bfa_trc(ufm->bfa, uf->uf_tag);
+
+	list_add_tail(&uf->qe, &ufm->uf_posted_q);
+	return BFA_STATUS_OK;
+}
+
+static void
+bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
+{
+	struct bfa_uf_s   *uf;
+
+	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
+		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
+			break;
+	}
+}
+
+static void
+uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+	u16 uf_tag = m->buf_tag;
+	struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
+	struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
+	u8 *buf = &uf_buf->d[0];
+	struct fchs_s *fchs;
+
+	m->frm_len = bfa_os_ntohs(m->frm_len);
+	m->xfr_len = bfa_os_ntohs(m->xfr_len);
+
+	fchs = (struct fchs_s *)uf_buf;
+
+	list_del(&uf->qe);	/* dequeue from posted queue */
+
+	uf->data_ptr = buf;
+	uf->data_len = m->xfr_len;
+
+	bfa_assert(uf->data_len >= sizeof(struct fchs_s));
+
+	if (uf->data_len == sizeof(struct fchs_s)) {
+		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
+			       uf->data_len, (struct fchs_s *)buf);
+	} else {
+		u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
+		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
+				      BFA_PL_EID_RX, uf->data_len,
+				      (struct fchs_s *)buf, pld_w0);
+	}
+
+	if (bfa->fcs)
+		__bfa_cb_uf_recv(uf, BFA_TRUE);
+	else
+		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
+}
+
+static void
+bfa_uf_stop(struct bfa_s *bfa)
+{
+}
+
+static void
+bfa_uf_iocdisable(struct bfa_s *bfa)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+	struct bfa_uf_s *uf;
+	struct list_head *qe, *qen;
+
+	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
+		uf = (struct bfa_uf_s *) qe;
+		list_del(&uf->qe);
+		bfa_uf_put(ufm, uf);
+	}
+}
+
+static void
+bfa_uf_start(struct bfa_s *bfa)
+{
+	bfa_uf_post_all(BFA_UF_MOD(bfa));
+}
+
+
+
+/**
+ *  hal_uf_api
+ */
+
+/**
+ * Register handler for all unsolicted recieve frames.
+ *
+ * @param[in]	bfa		BFA instance
+ * @param[in]	ufrecv	receive handler function
+ * @param[in]	cbarg	receive handler arg
+ */
+void
+bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
+{
+	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
+
+	ufm->ufrecv = ufrecv;
+	ufm->cbarg = cbarg;
+}
+
+/**
+ *	Free an unsolicited frame back to BFA.
+ *
+ * @param[in]		uf		unsolicited frame to be freed
+ *
+ * @return None
+ */
+void
+bfa_uf_free(struct bfa_uf_s *uf)
+{
+	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
+	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
+}
+
+
+
+/**
+ *  uf_pub BFA uf module public functions
+ */
+void
+bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+{
+	bfa_trc(bfa, msg->mhdr.msg_id);
+
+	switch (msg->mhdr.msg_id) {
+	case BFI_UF_I2H_FRM_RCVD:
+		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
+		break;
+
+	default:
+		bfa_trc(bfa, msg->mhdr.msg_id);
+		bfa_assert(0);
+	}
+}
+
+

+ 657 - 0
drivers/scsi/bfa/bfa_svc.h

@@ -0,0 +1,657 @@
+/*
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef __BFA_SVC_H__
+#define __BFA_SVC_H__
+
+#include "bfa_cs.h"
+#include "bfi_ms.h"
+
+
+/**
+ * Scatter-gather DMA related defines
+ */
+#define BFA_SGPG_MIN	(16)
+
+/**
+ * Alignment macro for SG page allocation
+ */
+#define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1))	\
+			      & ~(sizeof(struct bfi_sgpg_s) - 1))
+
+struct bfa_sgpg_wqe_s {
+	struct list_head qe;	/*  queue sg page element	*/
+	int	nsgpg;		/*  pages to be allocated	*/
+	int	nsgpg_total;	/*  total pages required	*/
+	void	(*cbfn) (void *cbarg);	/*  callback function	*/
+	void	*cbarg;		/*  callback arg		*/
+	struct list_head sgpg_q;	/*  queue of alloced sgpgs	*/
+};
+
+struct bfa_sgpg_s {
+	struct list_head  qe;	/*  queue sg page element	*/
+	struct bfi_sgpg_s *sgpg;	/*  va of SG page		*/
+	union bfi_addr_u sgpg_pa;	/*  pa of SG page		*/
+};
+
+/**
+ * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
+ * SG pages required.
+ */
+#define BFA_SGPG_NPAGE(_nsges)  (((_nsges) / BFI_SGPG_DATA_SGES) + 1)
+
+struct bfa_sgpg_mod_s {
+	struct bfa_s *bfa;
+	int		num_sgpgs;	/*  number of SG pages		*/
+	int		free_sgpgs;	/*  number of free SG pages	*/
+	struct bfa_sgpg_s	*hsgpg_arr;	/*  BFA SG page array	*/
+	struct bfi_sgpg_s *sgpg_arr;	/*  actual SG page array	*/
+	u64	sgpg_arr_pa;	/*  SG page array DMA addr	*/
+	struct list_head	sgpg_q;		/*  queue of free SG pages */
+	struct list_head	sgpg_wait_q;	/*  wait queue for SG pages */
+};
+#define BFA_SGPG_MOD(__bfa)	(&(__bfa)->modules.sgpg_mod)
+
+bfa_status_t bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q,
+			     int nsgpgs);
+void bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs);
+void bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe,
+		    void (*cbfn) (void *cbarg), void *cbarg);
+void bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpgs);
+void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
+
+
+/**
+ * FCXP related defines
+ */
+#define BFA_FCXP_MIN		(1)
+#define BFA_FCXP_MAX_IBUF_SZ	(2 * 1024 + 256)
+#define BFA_FCXP_MAX_LBUF_SZ	(4 * 1024 + 256)
+
+struct bfa_fcxp_mod_s {
+	struct bfa_s      *bfa;		/* backpointer to BFA */
+	struct bfa_fcxp_s *fcxp_list;	/* array of FCXPs */
+	u16	num_fcxps;	/* max num FCXP requests */
+	struct list_head  fcxp_free_q;	/* free FCXPs */
+	struct list_head  fcxp_active_q;	/* active FCXPs */
+	void		*req_pld_list_kva;	/* list of FCXP req pld */
+	u64	req_pld_list_pa;	/* list of FCXP req pld */
+	void		*rsp_pld_list_kva;	/* list of FCXP resp pld */
+	u64	rsp_pld_list_pa;	/* list of FCXP resp pld */
+	struct list_head  wait_q;		/* wait queue for free fcxp */
+	u32	req_pld_sz;
+	u32	rsp_pld_sz;
+};
+
+#define BFA_FCXP_MOD(__bfa)		(&(__bfa)->modules.fcxp_mod)
+#define BFA_FCXP_FROM_TAG(__mod, __tag)	(&(__mod)->fcxp_list[__tag])
+
+typedef void    (*fcxp_send_cb_t) (struct bfa_s *ioc, struct bfa_fcxp_s *fcxp,
+				   void *cb_arg, bfa_status_t req_status,
+				   u32 rsp_len, u32 resid_len,
+				   struct fchs_s *rsp_fchs);
+
+typedef u64 (*bfa_fcxp_get_sgaddr_t) (void *bfad_fcxp, int sgeid);
+typedef u32 (*bfa_fcxp_get_sglen_t) (void *bfad_fcxp, int sgeid);
+typedef void (*bfa_cb_fcxp_send_t) (void *bfad_fcxp, struct bfa_fcxp_s *fcxp,
+				    void *cbarg, enum bfa_status req_status,
+				    u32 rsp_len, u32 resid_len,
+				    struct fchs_s *rsp_fchs);
+typedef void (*bfa_fcxp_alloc_cbfn_t) (void *cbarg, struct bfa_fcxp_s *fcxp);
+
+
+
+/**
+ * Information needed for a FCXP request
+ */
+struct bfa_fcxp_req_info_s {
+	struct bfa_rport_s *bfa_rport;
+					/** Pointer to the bfa rport that was
+					 * returned from bfa_rport_create().
+					 * This could be left NULL for WKA or
+					 * for FCXP interactions before the
+					 * rport nexus is established
+					 */
+	struct fchs_s	fchs;	/*  request FC header structure */
+	u8		cts;	/*  continous sequence */
+	u8		class;	/*  FC class for the request/response */
+	u16	max_frmsz;	/*  max send frame size */
+	u16	vf_id;	/*  vsan tag if applicable */
+	u8		lp_tag;	/*  lport tag */
+	u32	req_tot_len;	/*  request payload total length */
+};
+
+struct bfa_fcxp_rsp_info_s {
+	struct fchs_s	rsp_fchs;
+				/** !< Response frame's FC header will
+				 * be sent back in this field */
+	u8		rsp_timeout;
+				/** !< timeout in seconds, 0-no response
+				 */
+	u8		rsvd2[3];
+	u32	rsp_maxlen;	/*  max response length expected */
+};
+
+struct bfa_fcxp_s {
+	struct list_head	qe;		/*  fcxp queue element */
+	bfa_sm_t	sm;		/*  state machine */
+	void		*caller;	/*  driver or fcs */
+	struct bfa_fcxp_mod_s *fcxp_mod;
+	/*  back pointer to fcxp mod */
+	u16	fcxp_tag;	/*  internal tag */
+	struct bfa_fcxp_req_info_s req_info;
+	/*  request info */
+	struct bfa_fcxp_rsp_info_s rsp_info;
+	/*  response info */
+	u8	use_ireqbuf;	/*  use internal req buf */
+	u8		use_irspbuf;	/*  use internal rsp buf */
+	u32	nreq_sgles;	/*  num request SGLEs */
+	u32	nrsp_sgles;	/*  num response SGLEs */
+	struct list_head req_sgpg_q;	/*  SG pages for request buf */
+	struct list_head req_sgpg_wqe;	/*  wait queue for req SG page */
+	struct list_head rsp_sgpg_q;	/*  SG pages for response buf */
+	struct list_head rsp_sgpg_wqe;	/*  wait queue for rsp SG page */
+
+	bfa_fcxp_get_sgaddr_t req_sga_cbfn;
+	/*  SG elem addr user function */
+	bfa_fcxp_get_sglen_t req_sglen_cbfn;
+	/*  SG elem len user function */
+	bfa_fcxp_get_sgaddr_t rsp_sga_cbfn;
+	/*  SG elem addr user function */
+	bfa_fcxp_get_sglen_t rsp_sglen_cbfn;
+	/*  SG elem len user function */
+	bfa_cb_fcxp_send_t send_cbfn;   /*  send completion callback */
+	void		*send_cbarg;	/*  callback arg */
+	struct bfa_sge_s   req_sge[BFA_FCXP_MAX_SGES];
+	/*  req SG elems */
+	struct bfa_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];
+	/*  rsp SG elems */
+	u8		rsp_status;	/*  comp: rsp status */
+	u32	rsp_len;	/*  comp: actual response len */
+	u32	residue_len;	/*  comp: residual rsp length */
+	struct fchs_s	rsp_fchs;	/*  comp: response fchs */
+	struct bfa_cb_qe_s    hcb_qe;	/*  comp: callback qelem */
+	struct bfa_reqq_wait_s	reqq_wqe;
+	bfa_boolean_t	reqq_waiting;
+};
+
+struct bfa_fcxp_wqe_s {
+	struct list_head		qe;
+	bfa_fcxp_alloc_cbfn_t	alloc_cbfn;
+	void		*alloc_cbarg;
+	void		*caller;
+	struct bfa_s	*bfa;
+	int		nreq_sgles;
+	int		nrsp_sgles;
+	bfa_fcxp_get_sgaddr_t	req_sga_cbfn;
+	bfa_fcxp_get_sglen_t	req_sglen_cbfn;
+	bfa_fcxp_get_sgaddr_t	rsp_sga_cbfn;
+	bfa_fcxp_get_sglen_t	rsp_sglen_cbfn;
+};
+
+#define BFA_FCXP_REQ_PLD(_fcxp)		(bfa_fcxp_get_reqbuf(_fcxp))
+#define BFA_FCXP_RSP_FCHS(_fcxp)	(&((_fcxp)->rsp_info.fchs))
+#define BFA_FCXP_RSP_PLD(_fcxp)		(bfa_fcxp_get_rspbuf(_fcxp))
+
+#define BFA_FCXP_REQ_PLD_PA(_fcxp)				\
+	((_fcxp)->fcxp_mod->req_pld_list_pa +			\
+	 ((_fcxp)->fcxp_mod->req_pld_sz  * (_fcxp)->fcxp_tag))
+
+#define BFA_FCXP_RSP_PLD_PA(_fcxp)				\
+	((_fcxp)->fcxp_mod->rsp_pld_list_pa +			\
+	 ((_fcxp)->fcxp_mod->rsp_pld_sz * (_fcxp)->fcxp_tag))
+
+void	bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+
+/**
+ * RPORT related defines
+ */
+#define BFA_RPORT_MIN	4
+
+struct bfa_rport_mod_s {
+	struct bfa_rport_s *rps_list;	/*  list of rports	*/
+	struct list_head	rp_free_q;	/*  free bfa_rports	*/
+	struct list_head	rp_active_q;	/*  free bfa_rports	*/
+	u16	num_rports;	/*  number of rports	*/
+};
+
+#define BFA_RPORT_MOD(__bfa)	(&(__bfa)->modules.rport_mod)
+
+/**
+ * Convert rport tag to RPORT
+ */
+#define BFA_RPORT_FROM_TAG(__bfa, _tag)				\
+	(BFA_RPORT_MOD(__bfa)->rps_list +			\
+	 ((_tag) & (BFA_RPORT_MOD(__bfa)->num_rports - 1)))
+
+/*
+ * protected functions
+ */
+void	bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+/**
+ *	BFA rport information.
+ */
+struct bfa_rport_info_s {
+	u16	max_frmsz;	/*  max rcv pdu size		    */
+	u32	pid:24,	/*  remote port ID		    */
+		lp_tag:8;	/*  tag			    */
+	u32	local_pid:24,	/*  local port ID		    */
+		cisc:8;	/*  CIRO supported		    */
+	u8	fc_class;	/*  supported FC classes. enum fc_cos */
+	u8	vf_en;		/*  virtual fabric enable	    */
+	u16	vf_id;		/*  virtual fabric ID		    */
+	enum bfa_port_speed speed;	/*  Rport's current speed	    */
+};
+
+/**
+ * BFA rport data structure
+ */
+struct bfa_rport_s {
+	struct list_head	qe;	/*  queue element		    */
+	bfa_sm_t	sm;		/*  state machine		    */
+	struct bfa_s	*bfa;		/*  backpointer to BFA		    */
+	void		*rport_drv;	/*  fcs/driver rport object	    */
+	u16	fw_handle;	/*  firmware rport handle	    */
+	u16	rport_tag;	/*  BFA rport tag		    */
+	struct bfa_rport_info_s rport_info; /*  rport info from fcs/driver */
+	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq     */
+	struct bfa_cb_qe_s hcb_qe;	/*  BFA callback qelem		    */
+	struct bfa_rport_hal_stats_s stats; /*  BFA rport statistics	    */
+	struct bfa_rport_qos_attr_s qos_attr;
+	union a {
+		bfa_status_t	status;	/*  f/w status */
+		void		*fw_msg; /*  QoS scn event		    */
+	} event_arg;
+};
+#define BFA_RPORT_FC_COS(_rport)	((_rport)->rport_info.fc_class)
+
+
+/**
+ * UF - unsolicited receive related defines
+ */
+
+#define BFA_UF_MIN	(4)
+
+
+struct bfa_uf_s {
+	struct list_head	qe;	/*  queue element		*/
+	struct bfa_s		*bfa;	/*  bfa instance		*/
+	u16	uf_tag;		/*  identifying tag fw msgs	*/
+	u16	vf_id;
+	u16	src_rport_handle;
+	u16	rsvd;
+	u8		*data_ptr;
+	u16	data_len;	/*  actual receive length	*/
+	u16	pb_len;		/*  posted buffer length	*/
+	void		*buf_kva;	/*  buffer virtual address	*/
+	u64	buf_pa;		/*  buffer physical address	*/
+	struct bfa_cb_qe_s hcb_qe;	/*  comp: BFA comp qelem	*/
+	struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
+};
+
+/**
+ *      Callback prototype for unsolicited frame receive handler.
+ *
+ * @param[in]           cbarg           callback arg for receive handler
+ * @param[in]           uf              unsolicited frame descriptor
+ *
+ * @return None
+ */
+typedef void (*bfa_cb_uf_recv_t) (void *cbarg, struct bfa_uf_s *uf);
+
+struct bfa_uf_mod_s {
+	struct bfa_s *bfa;		/*  back pointer to BFA */
+	struct bfa_uf_s *uf_list;	/*  array of UFs */
+	u16	num_ufs;	/*  num unsolicited rx frames */
+	struct list_head	uf_free_q;	/*  free UFs */
+	struct list_head	uf_posted_q;	/*  UFs posted to IOC */
+	struct bfa_uf_buf_s *uf_pbs_kva;	/*  list UF bufs request pld */
+	u64	uf_pbs_pa;	/*  phy addr for UF bufs */
+	struct bfi_uf_buf_post_s *uf_buf_posts;
+	/*  pre-built UF post msgs */
+	bfa_cb_uf_recv_t ufrecv;	/*  uf recv handler function */
+	void		*cbarg;		/*  uf receive handler arg */
+};
+
+#define BFA_UF_MOD(__bfa)	(&(__bfa)->modules.uf_mod)
+
+#define ufm_pbs_pa(_ufmod, _uftag)					\
+	((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
+
+void	bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+#define BFA_UF_BUFSZ	(2 * 1024 + 256)
+
+/**
+ * @todo private
+ */
+struct bfa_uf_buf_s {
+	u8		d[BFA_UF_BUFSZ];
+};
+
+
+/**
+ * LPS - bfa lport login/logout service interface
+ */
+struct bfa_lps_s {
+	struct list_head	qe;	/*  queue element		*/
+	struct bfa_s	*bfa;		/*  parent bfa instance	*/
+	bfa_sm_t	sm;		/*  finite state machine	*/
+	u8		lp_tag;		/*  lport tag			*/
+	u8		reqq;		/*  lport request queue	*/
+	u8		alpa;		/*  ALPA for loop topologies	*/
+	u32	lp_pid;		/*  lport port ID		*/
+	bfa_boolean_t	fdisc;		/*  snd FDISC instead of FLOGI	*/
+	bfa_boolean_t	auth_en;	/*  enable authentication	*/
+	bfa_boolean_t	auth_req;	/*  authentication required	*/
+	bfa_boolean_t	npiv_en;	/*  NPIV is allowed by peer	*/
+	bfa_boolean_t	fport;		/*  attached peer is F_PORT	*/
+	bfa_boolean_t	brcd_switch;	/*  attached peer is brcd sw	*/
+	bfa_status_t	status;		/*  login status		*/
+	u16		pdusz;		/*  max receive PDU size	*/
+	u16		pr_bbcred;	/*  BB_CREDIT from peer		*/
+	u8		lsrjt_rsn;	/*  LSRJT reason		*/
+	u8		lsrjt_expl;	/*  LSRJT explanation		*/
+	wwn_t		pwwn;		/*  port wwn of lport		*/
+	wwn_t		nwwn;		/*  node wwn of lport		*/
+	wwn_t		pr_pwwn;	/*  port wwn of lport peer	*/
+	wwn_t		pr_nwwn;	/*  node wwn of lport peer	*/
+	mac_t		lp_mac;		/*  fpma/spma MAC for lport	*/
+	mac_t		fcf_mac;	/*  FCF MAC of lport		*/
+	struct bfa_reqq_wait_s	wqe;	/*  request wait queue element	*/
+	void		*uarg;		/*  user callback arg		*/
+	struct bfa_cb_qe_s hcb_qe;	/*  comp: callback qelem	*/
+	struct bfi_lps_login_rsp_s *loginrsp;
+	bfa_eproto_status_t ext_status;
+};
+
+struct bfa_lps_mod_s {
+	struct list_head		lps_free_q;
+	struct list_head		lps_active_q;
+	struct bfa_lps_s	*lps_arr;
+	int			num_lps;
+};
+
+#define BFA_LPS_MOD(__bfa)		(&(__bfa)->modules.lps_mod)
+#define BFA_LPS_FROM_TAG(__mod, __tag)	(&(__mod)->lps_arr[__tag])
+
+/*
+ * external functions
+ */
+void	bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+
+/**
+ * FCPORT related defines
+ */
+
+#define BFA_FCPORT(_bfa)	(&((_bfa)->modules.port))
+typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
+
+/**
+ * Link notification data structure
+ */
+struct bfa_fcport_ln_s {
+	struct bfa_fcport_s	*fcport;
+	bfa_sm_t		sm;
+	struct bfa_cb_qe_s	ln_qe;	/*  BFA callback queue elem for ln */
+	enum bfa_port_linkstate ln_event; /*  ln event for callback */
+};
+
+struct bfa_fcport_trunk_s {
+	struct bfa_trunk_attr_s	attr;
+};
+
+/**
+ * BFA FC port data structure
+ */
+struct bfa_fcport_s {
+	struct bfa_s		*bfa;	/*  parent BFA instance */
+	bfa_sm_t		sm;	/*  port state machine */
+	wwn_t			nwwn;	/*  node wwn of physical port */
+	wwn_t			pwwn;	/*  port wwn of physical oprt */
+	enum bfa_port_speed speed_sup;
+	/*  supported speeds */
+	enum bfa_port_speed speed;	/*  current speed */
+	enum bfa_port_topology topology;	/*  current topology */
+	u8			myalpa;	/*  my ALPA in LOOP topology */
+	u8			rsvd[3];
+	struct bfa_port_cfg_s	cfg;	/*  current port configuration */
+	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
+	struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
+	struct bfa_reqq_wait_s	reqq_wait;
+	/*  to wait for room in reqq */
+	struct bfa_reqq_wait_s	svcreq_wait;
+	/*  to wait for room in reqq */
+	struct bfa_reqq_wait_s	stats_reqq_wait;
+	/*  to wait for room in reqq (stats) */
+	void			*event_cbarg;
+	void			(*event_cbfn) (void *cbarg,
+					       enum bfa_port_linkstate event);
+	union {
+		union bfi_fcport_i2h_msg_u i2hmsg;
+	} event_arg;
+	void			*bfad;	/*  BFA driver handle */
+	struct bfa_fcport_ln_s	ln; /*  Link Notification */
+	struct bfa_cb_qe_s	hcb_qe;	/*  BFA callback queue elem */
+	struct bfa_timer_s	timer;	/*  timer */
+	u32		msgtag;	/*  fimrware msg tag for reply */
+	u8			*stats_kva;
+	u64		stats_pa;
+	union bfa_fcport_stats_u *stats;
+	union bfa_fcport_stats_u *stats_ret; /*  driver stats location */
+	bfa_status_t		stats_status; /*  stats/statsclr status */
+	bfa_boolean_t		stats_busy; /*  outstanding stats/statsclr */
+	bfa_boolean_t		stats_qfull;
+	u32		stats_reset_time; /*  stats reset time stamp */
+	bfa_cb_port_t		stats_cbfn; /*  driver callback function */
+	void			*stats_cbarg; /* *!< user callback arg */
+	bfa_boolean_t		diag_busy; /*  diag busy status */
+	bfa_boolean_t		beacon; /*  port beacon status */
+	bfa_boolean_t		link_e2e_beacon; /*  link beacon status */
+	struct bfa_fcport_trunk_s trunk;
+	u16		fcoe_vlan;
+};
+
+#define BFA_FCPORT_MOD(__bfa)	(&(__bfa)->modules.fcport)
+
+/*
+ * protected functions
+ */
+void bfa_fcport_init(struct bfa_s *bfa);
+void bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+
+/*
+ * bfa fcport API functions
+ */
+bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
+				  enum bfa_port_speed speed);
+enum bfa_port_speed bfa_fcport_get_speed(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
+				     enum bfa_port_topology topo);
+enum bfa_port_topology bfa_fcport_get_topology(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
+bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
+u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
+u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
+u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
+void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr);
+wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
+void bfa_fcport_event_register(struct bfa_s *bfa,
+			void (*event_cbfn) (void *cbarg,
+			enum bfa_port_linkstate event), void *event_cbarg);
+bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
+void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
+bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
+					  enum bfa_port_speed speed);
+enum bfa_port_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
+
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
+void bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
+		       bfa_boolean_t link_e2e_beacon);
+void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
+			     struct bfa_qos_attr_s *qos_attr);
+void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
+				struct bfa_qos_vc_attr_s *qos_vc_attr);
+bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
+				      union bfa_fcport_stats_u *stats,
+				      bfa_cb_port_t cbfn, void *cbarg);
+bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
+					void *cbarg);
+bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
+				       union bfa_fcport_stats_u *stats,
+				       bfa_cb_port_t cbfn, void *cbarg);
+bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
+					 void *cbarg);
+bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
+bfa_boolean_t	bfa_fcport_is_linkup(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
+				  union bfa_fcport_stats_u *stats,
+				  bfa_cb_port_t cbfn, void *cbarg);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn,
+				    void *cbarg);
+bfa_boolean_t bfa_fcport_is_qos_enabled(struct bfa_s *bfa);
+
+/*
+ * bfa rport API functions
+ */
+struct bfa_rport_s *bfa_rport_create(struct bfa_s *bfa, void *rport_drv);
+void bfa_rport_delete(struct bfa_rport_s *rport);
+void bfa_rport_online(struct bfa_rport_s *rport,
+		      struct bfa_rport_info_s *rport_info);
+void bfa_rport_offline(struct bfa_rport_s *rport);
+void bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed);
+void bfa_rport_get_stats(struct bfa_rport_s *rport,
+			 struct bfa_rport_hal_stats_s *stats);
+void bfa_rport_clear_stats(struct bfa_rport_s *rport);
+void bfa_cb_rport_online(void *rport);
+void bfa_cb_rport_offline(void *rport);
+void bfa_cb_rport_qos_scn_flowid(void *rport,
+				 struct bfa_rport_qos_attr_s old_qos_attr,
+				 struct bfa_rport_qos_attr_s new_qos_attr);
+void bfa_cb_rport_qos_scn_prio(void *rport,
+			       struct bfa_rport_qos_attr_s old_qos_attr,
+			       struct bfa_rport_qos_attr_s new_qos_attr);
+void bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
+			    struct bfa_rport_qos_attr_s *qos_attr);
+
+/*
+ * bfa fcxp API functions
+ */
+struct bfa_fcxp_s *bfa_fcxp_alloc(void *bfad_fcxp, struct bfa_s *bfa,
+				  int nreq_sgles, int nrsp_sgles,
+				  bfa_fcxp_get_sgaddr_t get_req_sga,
+				  bfa_fcxp_get_sglen_t get_req_sglen,
+				  bfa_fcxp_get_sgaddr_t get_rsp_sga,
+				  bfa_fcxp_get_sglen_t get_rsp_sglen);
+void bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
+				bfa_fcxp_alloc_cbfn_t alloc_cbfn,
+				void *cbarg, void *bfad_fcxp,
+				int nreq_sgles, int nrsp_sgles,
+				bfa_fcxp_get_sgaddr_t get_req_sga,
+				bfa_fcxp_get_sglen_t get_req_sglen,
+				bfa_fcxp_get_sgaddr_t get_rsp_sga,
+				bfa_fcxp_get_sglen_t get_rsp_sglen);
+void bfa_fcxp_walloc_cancel(struct bfa_s *bfa,
+			    struct bfa_fcxp_wqe_s *wqe);
+void bfa_fcxp_discard(struct bfa_fcxp_s *fcxp);
+
+void *bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp);
+void *bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp);
+
+void bfa_fcxp_free(struct bfa_fcxp_s *fcxp);
+
+void bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
+		   u16 vf_id, u8 lp_tag,
+		   bfa_boolean_t cts, enum fc_cos cos,
+		   u32 reqlen, struct fchs_s *fchs,
+		   bfa_cb_fcxp_send_t cbfn,
+		   void *cbarg,
+		   u32 rsp_maxlen, u8 rsp_timeout);
+bfa_status_t bfa_fcxp_abort(struct bfa_fcxp_s *fcxp);
+u32 bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp);
+u32 bfa_fcxp_get_maxrsp(struct bfa_s *bfa);
+
+static inline void *
+bfa_uf_get_frmbuf(struct bfa_uf_s *uf)
+{
+	return uf->data_ptr;
+}
+
+static inline   u16
+bfa_uf_get_frmlen(struct bfa_uf_s *uf)
+{
+	return uf->data_len;
+}
+
+/*
+ * bfa uf API functions
+ */
+void bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv,
+			  void *cbarg);
+void bfa_uf_free(struct bfa_uf_s *uf);
+
+/**
+ * bfa lport service api
+ */
+
+u32 bfa_lps_get_max_vport(struct bfa_s *bfa);
+struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
+void bfa_lps_delete(struct bfa_lps_s *lps);
+void bfa_lps_discard(struct bfa_lps_s *lps);
+void bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa,
+		   u16 pdusz, wwn_t pwwn, wwn_t nwwn,
+		   bfa_boolean_t auth_en);
+void bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz,
+		   wwn_t pwwn, wwn_t nwwn);
+void bfa_lps_flogo(struct bfa_lps_s *lps);
+void bfa_lps_fdisclogo(struct bfa_lps_s *lps);
+u8 bfa_lps_get_tag(struct bfa_lps_s *lps);
+bfa_boolean_t bfa_lps_is_npiv_en(struct bfa_lps_s *lps);
+bfa_boolean_t bfa_lps_is_fport(struct bfa_lps_s *lps);
+bfa_boolean_t bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps);
+bfa_boolean_t bfa_lps_is_authreq(struct bfa_lps_s *lps);
+bfa_eproto_status_t bfa_lps_get_extstatus(struct bfa_lps_s *lps);
+u32 bfa_lps_get_pid(struct bfa_lps_s *lps);
+u32 bfa_lps_get_base_pid(struct bfa_s *bfa);
+u8 bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid);
+u16 bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps);
+wwn_t bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps);
+wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
+u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
+u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
+mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
+void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
+void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
+void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
+
+void bfa_trunk_enable_cfg(struct bfa_s *bfa);
+bfa_status_t bfa_trunk_enable(struct bfa_s *bfa);
+bfa_status_t bfa_trunk_disable(struct bfa_s *bfa);
+bfa_status_t bfa_trunk_get_attr(struct bfa_s *bfa,
+		struct bfa_trunk_attr_s *attr);
+
+#endif /* __BFA_SVC_H__ */

+ 0 - 90
drivers/scsi/bfa/bfa_timer.c

@@ -1,90 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa_timer.h>
-#include <cs/bfa_debug.h>
-
-void
-bfa_timer_init(struct bfa_timer_mod_s *mod)
-{
-	INIT_LIST_HEAD(&mod->timer_q);
-}
-
-void
-bfa_timer_beat(struct bfa_timer_mod_s *mod)
-{
-	struct list_head        *qh = &mod->timer_q;
-	struct list_head        *qe, *qe_next;
-	struct bfa_timer_s *elem;
-	struct list_head         timedout_q;
-
-	INIT_LIST_HEAD(&timedout_q);
-
-	qe = bfa_q_next(qh);
-
-	while (qe != qh) {
-		qe_next = bfa_q_next(qe);
-
-		elem = (struct bfa_timer_s *) qe;
-		if (elem->timeout <= BFA_TIMER_FREQ) {
-			elem->timeout = 0;
-			list_del(&elem->qe);
-			list_add_tail(&elem->qe, &timedout_q);
-		} else {
-			elem->timeout -= BFA_TIMER_FREQ;
-		}
-
-		qe = qe_next;	/* go to next elem */
-	}
-
-	/*
-	 * Pop all the timeout entries
-	 */
-	while (!list_empty(&timedout_q)) {
-		bfa_q_deq(&timedout_q, &elem);
-		elem->timercb(elem->arg);
-	}
-}
-
-/**
- * Should be called with lock protection
- */
-void
-bfa_timer_begin(struct bfa_timer_mod_s *mod, struct bfa_timer_s *timer,
-		    void (*timercb) (void *), void *arg, unsigned int timeout)
-{
-
-	bfa_assert(timercb != NULL);
-	bfa_assert(!bfa_q_is_on_q(&mod->timer_q, timer));
-
-	timer->timeout = timeout;
-	timer->timercb = timercb;
-	timer->arg = arg;
-
-	list_add_tail(&timer->qe, &mod->timer_q);
-}
-
-/**
- * Should be called with lock protection
- */
-void
-bfa_timer_stop(struct bfa_timer_s *timer)
-{
-	bfa_assert(!list_empty(&timer->qe));
-
-	list_del(&timer->qe);
-}

+ 0 - 64
drivers/scsi/bfa/bfa_trcmod_priv.h

@@ -1,64 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  hal_trcmod.h BFA trace modules
- */
-
-#ifndef __BFA_TRCMOD_PRIV_H__
-#define __BFA_TRCMOD_PRIV_H__
-
-#include <cs/bfa_trc.h>
-
-/*
- * !!! Only append to the enums defined here to avoid any versioning
- * !!! needed between trace utility and driver version
- */
-enum {
-	BFA_TRC_HAL_INTR	= 1,
-	BFA_TRC_HAL_FCXP	= 2,
-	BFA_TRC_HAL_UF		= 3,
-	BFA_TRC_HAL_RPORT	= 4,
-	BFA_TRC_HAL_FCPIM	= 5,
-	BFA_TRC_HAL_IOIM	= 6,
-	BFA_TRC_HAL_TSKIM	= 7,
-	BFA_TRC_HAL_ITNIM	= 8,
-	BFA_TRC_HAL_FCPORT	= 9,
-	BFA_TRC_HAL_SGPG	= 10,
-	BFA_TRC_HAL_FLASH	= 11,
-	BFA_TRC_HAL_DEBUG	= 12,
-	BFA_TRC_HAL_WWN		= 13,
-	BFA_TRC_HAL_FLASH_RAW	= 14,
-	BFA_TRC_HAL_SBOOT	= 15,
-	BFA_TRC_HAL_SBOOT_IO	= 16,
-	BFA_TRC_HAL_SBOOT_INTR	= 17,
-	BFA_TRC_HAL_SBTEST	= 18,
-	BFA_TRC_HAL_IPFC	= 19,
-	BFA_TRC_HAL_IOCFC	= 20,
-	BFA_TRC_HAL_FCPTM	= 21,
-	BFA_TRC_HAL_IOTM	= 22,
-	BFA_TRC_HAL_TSKTM	= 23,
-	BFA_TRC_HAL_TIN		= 24,
-	BFA_TRC_HAL_LPS		= 25,
-	BFA_TRC_HAL_FCDIAG	= 26,
-	BFA_TRC_HAL_PBIND	= 27,
-	BFA_TRC_HAL_IOCFC_CT	= 28,
-	BFA_TRC_HAL_IOCFC_CB	= 29,
-	BFA_TRC_HAL_IOCFC_Q	= 30,
-};
-
-#endif /* __BFA_TRCMOD_PRIV_H__ */

+ 0 - 690
drivers/scsi/bfa/bfa_tskim.c

@@ -1,690 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include <bfa.h>
-#include <bfa_cb_ioim_macros.h>
-
-BFA_TRC_FILE(HAL, TSKIM);
-
-/**
- * task management completion handling
- */
-#define bfa_tskim_qcomp(__tskim, __cbfn) do {			\
-	bfa_cb_queue((__tskim)->bfa, &(__tskim)->hcb_qe,	\
-			 __cbfn, (__tskim));      \
-	bfa_tskim_notify_comp(__tskim);      \
-} while (0)
-
-#define bfa_tskim_notify_comp(__tskim) do {			 \
-	if ((__tskim)->notify)					 \
-		bfa_itnim_tskdone((__tskim)->itnim);      \
-} while (0)
-
-/*
- * forward declarations
- */
-static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
-static void     __bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete);
-static bfa_boolean_t bfa_tskim_match_scope(struct bfa_tskim_s *tskim,
-					       lun_t lun);
-static void     bfa_tskim_gather_ios(struct bfa_tskim_s *tskim);
-static void     bfa_tskim_cleanp_comp(void *tskim_cbarg);
-static void     bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim);
-static bfa_boolean_t bfa_tskim_send(struct bfa_tskim_s *tskim);
-static bfa_boolean_t bfa_tskim_send_abort(struct bfa_tskim_s *tskim);
-static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
-
-/**
- *  bfa_tskim_sm
- */
-
-enum bfa_tskim_event {
-	BFA_TSKIM_SM_START        = 1,  /*  TM command start            */
-	BFA_TSKIM_SM_DONE         = 2,  /*  TM completion               */
-	BFA_TSKIM_SM_QRESUME      = 3,  /*  resume after qfull          */
-	BFA_TSKIM_SM_HWFAIL       = 5,  /*  IOC h/w failure event       */
-	BFA_TSKIM_SM_HCB          = 6,  /*  BFA callback completion     */
-	BFA_TSKIM_SM_IOS_DONE     = 7,  /*  IO and sub TM completions   */
-	BFA_TSKIM_SM_CLEANUP      = 8,  /*  TM cleanup on ITN offline   */
-	BFA_TSKIM_SM_CLEANUP_DONE = 9,  /*  TM abort completion         */
-};
-
-static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
-					enum bfa_tskim_event event);
-static void     bfa_tskim_sm_active(struct bfa_tskim_s *tskim,
-					enum bfa_tskim_event event);
-static void     bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim,
-					 enum bfa_tskim_event event);
-static void     bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim,
-					 enum bfa_tskim_event event);
-static void     bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim,
-				       enum bfa_tskim_event event);
-static void     bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
-				       enum bfa_tskim_event event);
-static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
-				     enum bfa_tskim_event event);
-
-/**
- *      Task management command beginning state.
- */
-static void
-bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
-{
-	bfa_trc(tskim->bfa, event);
-
-	switch (event) {
-	case BFA_TSKIM_SM_START:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
-		bfa_tskim_gather_ios(tskim);
-
-		/**
-		 * If device is offline, do not send TM on wire. Just cleanup
-		 * any pending IO requests and complete TM request.
-		 */
-		if (!bfa_itnim_is_online(tskim->itnim)) {
-			bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
-			tskim->tsk_status = BFI_TSKIM_STS_OK;
-			bfa_tskim_cleanup_ios(tskim);
-			return;
-		}
-
-		if (!bfa_tskim_send(tskim)) {
-			bfa_sm_set_state(tskim, bfa_tskim_sm_qfull);
-			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
-					  &tskim->reqq_wait);
-		}
-		break;
-
-	default:
-		bfa_sm_fault(tskim->bfa, event);
-	}
-}
-
-/**
- * brief
- *	TM command is active, awaiting completion from firmware to
- *	cleanup IO requests in TM scope.
- */
-static void
-bfa_tskim_sm_active(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
-{
-	bfa_trc(tskim->bfa, event);
-
-	switch (event) {
-	case BFA_TSKIM_SM_DONE:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
-		bfa_tskim_cleanup_ios(tskim);
-		break;
-
-	case BFA_TSKIM_SM_CLEANUP:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
-		if (!bfa_tskim_send_abort(tskim)) {
-			bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup_qfull);
-			bfa_reqq_wait(tskim->bfa, tskim->itnim->reqq,
-				&tskim->reqq_wait);
-		}
-		break;
-
-	case BFA_TSKIM_SM_HWFAIL:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
-		bfa_tskim_iocdisable_ios(tskim);
-		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
-		break;
-
-	default:
-		bfa_sm_fault(tskim->bfa, event);
-	}
-}
-
-/**
- *	An active TM is being cleaned up since ITN is offline. Awaiting cleanup
- *	completion event from firmware.
- */
-static void
-bfa_tskim_sm_cleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
-{
-	bfa_trc(tskim->bfa, event);
-
-	switch (event) {
-	case BFA_TSKIM_SM_DONE:
-		/**
-		 * Ignore and wait for ABORT completion from firmware.
-		 */
-		break;
-
-	case BFA_TSKIM_SM_CLEANUP_DONE:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
-		bfa_tskim_cleanup_ios(tskim);
-		break;
-
-	case BFA_TSKIM_SM_HWFAIL:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
-		bfa_tskim_iocdisable_ios(tskim);
-		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
-		break;
-
-	default:
-		bfa_sm_fault(tskim->bfa, event);
-	}
-}
-
-static void
-bfa_tskim_sm_iocleanup(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
-{
-	bfa_trc(tskim->bfa, event);
-
-	switch (event) {
-	case BFA_TSKIM_SM_IOS_DONE:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
-		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_done);
-		break;
-
-	case BFA_TSKIM_SM_CLEANUP:
-		/**
-		 * Ignore, TM command completed on wire.
-		 * Notify TM conmpletion on IO cleanup completion.
-		 */
-		break;
-
-	case BFA_TSKIM_SM_HWFAIL:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
-		bfa_tskim_iocdisable_ios(tskim);
-		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
-		break;
-
-	default:
-		bfa_sm_fault(tskim->bfa, event);
-	}
-}
-
-/**
- *      Task management command is waiting for room in request CQ
- */
-static void
-bfa_tskim_sm_qfull(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
-{
-	bfa_trc(tskim->bfa, event);
-
-	switch (event) {
-	case BFA_TSKIM_SM_QRESUME:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
-		bfa_tskim_send(tskim);
-		break;
-
-	case BFA_TSKIM_SM_CLEANUP:
-		/**
-		 * No need to send TM on wire since ITN is offline.
-		 */
-		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
-		bfa_reqq_wcancel(&tskim->reqq_wait);
-		bfa_tskim_cleanup_ios(tskim);
-		break;
-
-	case BFA_TSKIM_SM_HWFAIL:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
-		bfa_reqq_wcancel(&tskim->reqq_wait);
-		bfa_tskim_iocdisable_ios(tskim);
-		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
-		break;
-
-	default:
-		bfa_sm_fault(tskim->bfa, event);
-	}
-}
-
-/**
- *      Task management command is active, awaiting for room in request CQ
- *	to send clean up request.
- */
-static void
-bfa_tskim_sm_cleanup_qfull(struct bfa_tskim_s *tskim,
-		enum bfa_tskim_event event)
-{
-	bfa_trc(tskim->bfa, event);
-
-	switch (event) {
-	case BFA_TSKIM_SM_DONE:
-		bfa_reqq_wcancel(&tskim->reqq_wait);
-		/**
-		 *
-		 * Fall through !!!
-		 */
-
-	case BFA_TSKIM_SM_QRESUME:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_cleanup);
-		bfa_tskim_send_abort(tskim);
-		break;
-
-	case BFA_TSKIM_SM_HWFAIL:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_hcb);
-		bfa_reqq_wcancel(&tskim->reqq_wait);
-		bfa_tskim_iocdisable_ios(tskim);
-		bfa_tskim_qcomp(tskim, __bfa_cb_tskim_failed);
-		break;
-
-	default:
-		bfa_sm_fault(tskim->bfa, event);
-	}
-}
-
-/**
- *      BFA callback is pending
- */
-static void
-bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim, enum bfa_tskim_event event)
-{
-	bfa_trc(tskim->bfa, event);
-
-	switch (event) {
-	case BFA_TSKIM_SM_HCB:
-		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
-		bfa_tskim_free(tskim);
-		break;
-
-	case BFA_TSKIM_SM_CLEANUP:
-		bfa_tskim_notify_comp(tskim);
-		break;
-
-	case BFA_TSKIM_SM_HWFAIL:
-		break;
-
-	default:
-		bfa_sm_fault(tskim->bfa, event);
-	}
-}
-
-
-
-/**
- *  bfa_tskim_private
- */
-
-static void
-__bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_tskim_s *tskim = cbarg;
-
-	if (!complete) {
-		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
-		return;
-	}
-
-	bfa_stats(tskim->itnim, tm_success);
-	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk, tskim->tsk_status);
-}
-
-static void
-__bfa_cb_tskim_failed(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_tskim_s *tskim = cbarg;
-
-	if (!complete) {
-		bfa_sm_send_event(tskim, BFA_TSKIM_SM_HCB);
-		return;
-	}
-
-	bfa_stats(tskim->itnim, tm_failures);
-	bfa_cb_tskim_done(tskim->bfa->bfad, tskim->dtsk,
-			   BFI_TSKIM_STS_FAILED);
-}
-
-static          bfa_boolean_t
-bfa_tskim_match_scope(struct bfa_tskim_s *tskim, lun_t lun)
-{
-	switch (tskim->tm_cmnd) {
-	case FCP_TM_TARGET_RESET:
-		return BFA_TRUE;
-
-	case FCP_TM_ABORT_TASK_SET:
-	case FCP_TM_CLEAR_TASK_SET:
-	case FCP_TM_LUN_RESET:
-	case FCP_TM_CLEAR_ACA:
-		return (tskim->lun == lun);
-
-	default:
-		bfa_assert(0);
-	}
-
-	return BFA_FALSE;
-}
-
-/**
- *      Gather affected IO requests and task management commands.
- */
-static void
-bfa_tskim_gather_ios(struct bfa_tskim_s *tskim)
-{
-	struct bfa_itnim_s *itnim = tskim->itnim;
-	struct bfa_ioim_s *ioim;
-	struct list_head        *qe, *qen;
-
-	INIT_LIST_HEAD(&tskim->io_q);
-
-	/**
-	 * Gather any active IO requests first.
-	 */
-	list_for_each_safe(qe, qen, &itnim->io_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-		if (bfa_tskim_match_scope
-		    (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
-			list_del(&ioim->qe);
-			list_add_tail(&ioim->qe, &tskim->io_q);
-		}
-	}
-
-	/**
-	 * Failback any pending IO requests immediately.
-	 */
-	list_for_each_safe(qe, qen, &itnim->pending_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-		if (bfa_tskim_match_scope
-		    (tskim, bfa_cb_ioim_get_lun(ioim->dio))) {
-			list_del(&ioim->qe);
-			list_add_tail(&ioim->qe, &ioim->fcpim->ioim_comp_q);
-			bfa_ioim_tov(ioim);
-		}
-	}
-}
-
-/**
- * 		IO cleanup completion
- */
-static void
-bfa_tskim_cleanp_comp(void *tskim_cbarg)
-{
-	struct bfa_tskim_s *tskim = tskim_cbarg;
-
-	bfa_stats(tskim->itnim, tm_io_comps);
-	bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
-}
-
-/**
- *      Gather affected IO requests and task management commands.
- */
-static void
-bfa_tskim_cleanup_ios(struct bfa_tskim_s *tskim)
-{
-	struct bfa_ioim_s *ioim;
-	struct list_head        *qe, *qen;
-
-	bfa_wc_init(&tskim->wc, bfa_tskim_cleanp_comp, tskim);
-
-	list_for_each_safe(qe, qen, &tskim->io_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-		bfa_wc_up(&tskim->wc);
-		bfa_ioim_cleanup_tm(ioim, tskim);
-	}
-
-	bfa_wc_wait(&tskim->wc);
-}
-
-/**
- *      Send task management request to firmware.
- */
-static bfa_boolean_t
-bfa_tskim_send(struct bfa_tskim_s *tskim)
-{
-	struct bfa_itnim_s *itnim = tskim->itnim;
-	struct bfi_tskim_req_s *m;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
-	if (!m)
-		return BFA_FALSE;
-
-	/**
-	 * build i/o request message next
-	 */
-	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
-			bfa_lpuid(tskim->bfa));
-
-	m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
-	m->itn_fhdl = tskim->itnim->rport->fw_handle;
-	m->t_secs = tskim->tsecs;
-	m->lun = tskim->lun;
-	m->tm_flags = tskim->tm_cmnd;
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(tskim->bfa, itnim->reqq);
-	return BFA_TRUE;
-}
-
-/**
- *      Send abort request to cleanup an active TM to firmware.
- */
-static bfa_boolean_t
-bfa_tskim_send_abort(struct bfa_tskim_s *tskim)
-{
-	struct bfa_itnim_s             *itnim = tskim->itnim;
-	struct bfi_tskim_abortreq_s    *m;
-
-	/**
-	 * check for room in queue to send request now
-	 */
-	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
-	if (!m)
-		return BFA_FALSE;
-
-	/**
-	 * build i/o request message next
-	 */
-	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
-			bfa_lpuid(tskim->bfa));
-
-	m->tsk_tag  = bfa_os_htons(tskim->tsk_tag);
-
-	/**
-	 * queue I/O message to firmware
-	 */
-	bfa_reqq_produce(tskim->bfa, itnim->reqq);
-	return BFA_TRUE;
-}
-
-/**
- *      Call to resume task management cmnd waiting for room in request queue.
- */
-static void
-bfa_tskim_qresume(void *cbarg)
-{
-	struct bfa_tskim_s *tskim = cbarg;
-
-	bfa_fcpim_stats(tskim->fcpim, qresumes);
-	bfa_stats(tskim->itnim, tm_qresumes);
-	bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
-}
-
-/**
- * Cleanup IOs associated with a task mangement command on IOC failures.
- */
-static void
-bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim)
-{
-	struct bfa_ioim_s *ioim;
-	struct list_head        *qe, *qen;
-
-	list_for_each_safe(qe, qen, &tskim->io_q) {
-		ioim = (struct bfa_ioim_s *) qe;
-		bfa_ioim_iocdisable(ioim);
-	}
-}
-
-
-
-/**
- *  bfa_tskim_friend
- */
-
-/**
- * Notification on completions from related ioim.
- */
-void
-bfa_tskim_iodone(struct bfa_tskim_s *tskim)
-{
-	bfa_wc_down(&tskim->wc);
-}
-
-/**
- * Handle IOC h/w failure notification from itnim.
- */
-void
-bfa_tskim_iocdisable(struct bfa_tskim_s *tskim)
-{
-	tskim->notify = BFA_FALSE;
-	bfa_stats(tskim->itnim, tm_iocdowns);
-	bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
-}
-
-/**
- * Cleanup TM command and associated IOs as part of ITNIM offline.
- */
-void
-bfa_tskim_cleanup(struct bfa_tskim_s *tskim)
-{
-	tskim->notify = BFA_TRUE;
-	bfa_stats(tskim->itnim, tm_cleanups);
-	bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
-}
-
-/**
- *      Memory allocation and initialization.
- */
-void
-bfa_tskim_attach(struct bfa_fcpim_mod_s *fcpim, struct bfa_meminfo_s *minfo)
-{
-	struct bfa_tskim_s *tskim;
-	u16        i;
-
-	INIT_LIST_HEAD(&fcpim->tskim_free_q);
-
-	tskim = (struct bfa_tskim_s *) bfa_meminfo_kva(minfo);
-	fcpim->tskim_arr = tskim;
-
-	for (i = 0; i < fcpim->num_tskim_reqs; i++, tskim++) {
-		/*
-		 * initialize TSKIM
-		 */
-		bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
-		tskim->tsk_tag = i;
-		tskim->bfa     = fcpim->bfa;
-		tskim->fcpim   = fcpim;
-		tskim->notify  = BFA_FALSE;
-		bfa_reqq_winit(&tskim->reqq_wait, bfa_tskim_qresume,
-				   tskim);
-		bfa_sm_set_state(tskim, bfa_tskim_sm_uninit);
-
-		list_add_tail(&tskim->qe, &fcpim->tskim_free_q);
-	}
-
-	bfa_meminfo_kva(minfo) = (u8 *) tskim;
-}
-
-void
-bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
-{
-    /**
-     * @todo
-     */
-}
-
-void
-bfa_tskim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
-	struct bfa_tskim_s *tskim;
-	u16        tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
-
-	tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
-	bfa_assert(tskim->tsk_tag == tsk_tag);
-
-	tskim->tsk_status = rsp->tsk_status;
-
-	/**
-	 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
-	 * requests. All other statuses are for normal completions.
-	 */
-	if (rsp->tsk_status == BFI_TSKIM_STS_ABORTED) {
-		bfa_stats(tskim->itnim, tm_cleanup_comps);
-		bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP_DONE);
-	} else {
-		bfa_stats(tskim->itnim, tm_fw_rsps);
-		bfa_sm_send_event(tskim, BFA_TSKIM_SM_DONE);
-	}
-}
-
-
-
-/**
- *  bfa_tskim_api
- */
-
-
-struct bfa_tskim_s *
-bfa_tskim_alloc(struct bfa_s *bfa, struct bfad_tskim_s *dtsk)
-{
-	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
-	struct bfa_tskim_s *tskim;
-
-	bfa_q_deq(&fcpim->tskim_free_q, &tskim);
-
-	if (!tskim)
-		bfa_fcpim_stats(fcpim, no_tskims);
-	else
-		tskim->dtsk = dtsk;
-
-	return tskim;
-}
-
-void
-bfa_tskim_free(struct bfa_tskim_s *tskim)
-{
-	bfa_assert(bfa_q_is_on_q_func(&tskim->itnim->tsk_q, &tskim->qe));
-	list_del(&tskim->qe);
-	list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
-}
-
-/**
- *      Start a task management command.
- *
- * @param[in]       tskim       BFA task management command instance
- * @param[in]       itnim       i-t nexus for the task management command
- * @param[in]       lun         lun, if applicable
- * @param[in]       tm_cmnd     Task management command code.
- * @param[in]       t_secs      Timeout in seconds
- *
- * @return None.
- */
-void
-bfa_tskim_start(struct bfa_tskim_s *tskim, struct bfa_itnim_s *itnim, lun_t lun,
-		    enum fcp_tm_cmnd tm_cmnd, u8 tsecs)
-{
-	tskim->itnim   = itnim;
-	tskim->lun     = lun;
-	tskim->tm_cmnd = tm_cmnd;
-	tskim->tsecs   = tsecs;
-	tskim->notify  = BFA_FALSE;
-	bfa_stats(itnim, tm_cmnds);
-
-	list_add_tail(&tskim->qe, &itnim->tsk_q);
-	bfa_sm_send_event(tskim, BFA_TSKIM_SM_START);
-}
-
-

+ 0 - 343
drivers/scsi/bfa/bfa_uf.c

@@ -1,343 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  bfa_uf.c BFA unsolicited frame receive implementation
- */
-
-#include <bfa.h>
-#include <bfa_svc.h>
-#include <bfi/bfi_uf.h>
-#include <cs/bfa_debug.h>
-
-BFA_TRC_FILE(HAL, UF);
-BFA_MODULE(uf);
-
-/*
- *****************************************************************************
- * Internal functions
- *****************************************************************************
- */
-static void
-__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_uf_s   *uf = cbarg;
-	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
-
-	if (complete)
-		ufm->ufrecv(ufm->cbarg, uf);
-}
-
-static void
-claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
-{
-	u32        uf_pb_tot_sz;
-
-	ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
-	ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
-	uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
-							BFA_DMA_ALIGN_SZ);
-
-	bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
-	bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
-
-	bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
-}
-
-static void
-claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
-{
-	struct bfi_uf_buf_post_s *uf_bp_msg;
-	struct bfi_sge_s      *sge;
-	union bfi_addr_u      sga_zero = { {0} };
-	u16        i;
-	u16        buf_len;
-
-	ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
-	uf_bp_msg = ufm->uf_buf_posts;
-
-	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
-	     i++, uf_bp_msg++) {
-		bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
-
-		uf_bp_msg->buf_tag = i;
-		buf_len = sizeof(struct bfa_uf_buf_s);
-		uf_bp_msg->buf_len = bfa_os_htons(buf_len);
-		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
-			    bfa_lpuid(ufm->bfa));
-
-		sge = uf_bp_msg->sge;
-		sge[0].sg_len = buf_len;
-		sge[0].flags = BFI_SGE_DATA_LAST;
-		bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
-		bfa_sge_to_be(sge);
-
-		sge[1].sg_len = buf_len;
-		sge[1].flags = BFI_SGE_PGDLEN;
-		sge[1].sga = sga_zero;
-		bfa_sge_to_be(&sge[1]);
-	}
-
-	/**
-	 * advance pointer beyond consumed memory
-	 */
-	bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
-}
-
-static void
-claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
-{
-	u16        i;
-	struct bfa_uf_s   *uf;
-
-	/*
-	 * Claim block of memory for UF list
-	 */
-	ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
-
-	/*
-	 * Initialize UFs and queue it in UF free queue
-	 */
-	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
-		bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
-		uf->bfa = ufm->bfa;
-		uf->uf_tag = i;
-		uf->pb_len = sizeof(struct bfa_uf_buf_s);
-		uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
-		uf->buf_pa = ufm_pbs_pa(ufm, i);
-		list_add_tail(&uf->qe, &ufm->uf_free_q);
-	}
-
-	/**
-	 * advance memory pointer
-	 */
-	bfa_meminfo_kva(mi) = (u8 *) uf;
-}
-
-static void
-uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
-{
-	claim_uf_pbs(ufm, mi);
-	claim_ufs(ufm, mi);
-	claim_uf_post_msgs(ufm, mi);
-}
-
-static void
-bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
-{
-	u32        num_ufs = cfg->fwcfg.num_uf_bufs;
-
-	/*
-	 * dma-able memory for UF posted bufs
-	 */
-	*dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
-							BFA_DMA_ALIGN_SZ);
-
-	/*
-	 * kernel Virtual memory for UFs and UF buf post msg copies
-	 */
-	*ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
-	*ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
-}
-
-static void
-bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
-		  struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
-{
-	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
-
-	bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
-	ufm->bfa = bfa;
-	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
-	INIT_LIST_HEAD(&ufm->uf_free_q);
-	INIT_LIST_HEAD(&ufm->uf_posted_q);
-
-	uf_mem_claim(ufm, meminfo);
-}
-
-static void
-bfa_uf_detach(struct bfa_s *bfa)
-{
-}
-
-static struct bfa_uf_s *
-bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
-{
-	struct bfa_uf_s   *uf;
-
-	bfa_q_deq(&uf_mod->uf_free_q, &uf);
-	return uf;
-}
-
-static void
-bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
-{
-	list_add_tail(&uf->qe, &uf_mod->uf_free_q);
-}
-
-static bfa_status_t
-bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
-{
-	struct bfi_uf_buf_post_s *uf_post_msg;
-
-	uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
-	if (!uf_post_msg)
-		return BFA_STATUS_FAILED;
-
-	bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
-		      sizeof(struct bfi_uf_buf_post_s));
-	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
-
-	bfa_trc(ufm->bfa, uf->uf_tag);
-
-	list_add_tail(&uf->qe, &ufm->uf_posted_q);
-	return BFA_STATUS_OK;
-}
-
-static void
-bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
-{
-	struct bfa_uf_s   *uf;
-
-	while ((uf = bfa_uf_get(uf_mod)) != NULL) {
-		if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
-			break;
-	}
-}
-
-static void
-uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
-{
-	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
-	u16        uf_tag = m->buf_tag;
-	struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
-	struct bfa_uf_s   *uf = &ufm->uf_list[uf_tag];
-	u8        *buf = &uf_buf->d[0];
-	struct fchs_s         *fchs;
-
-	m->frm_len = bfa_os_ntohs(m->frm_len);
-	m->xfr_len = bfa_os_ntohs(m->xfr_len);
-
-	fchs = (struct fchs_s *) uf_buf;
-
-	list_del(&uf->qe);	/* dequeue from posted queue */
-
-	uf->data_ptr = buf;
-	uf->data_len = m->xfr_len;
-
-	bfa_assert(uf->data_len >= sizeof(struct fchs_s));
-
-	if (uf->data_len == sizeof(struct fchs_s)) {
-		bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
-			       uf->data_len, (struct fchs_s *) buf);
-	} else {
-		u32        pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
-		bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
-				      BFA_PL_EID_RX, uf->data_len,
-				      (struct fchs_s *) buf, pld_w0);
-	}
-
-	if (bfa->fcs)
-		__bfa_cb_uf_recv(uf, BFA_TRUE);
-	else
-		bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
-}
-
-static void
-bfa_uf_stop(struct bfa_s *bfa)
-{
-}
-
-static void
-bfa_uf_iocdisable(struct bfa_s *bfa)
-{
-	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
-	struct bfa_uf_s   *uf;
-	struct list_head        *qe, *qen;
-
-	list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
-		uf = (struct bfa_uf_s *) qe;
-		list_del(&uf->qe);
-		bfa_uf_put(ufm, uf);
-	}
-}
-
-static void
-bfa_uf_start(struct bfa_s *bfa)
-{
-	bfa_uf_post_all(BFA_UF_MOD(bfa));
-}
-
-
-
-/**
- *  bfa_uf_api
- */
-
-/**
- * 		Register handler for all unsolicted recieve frames.
- *
- * @param[in]	bfa		BFA instance
- * @param[in]	ufrecv	receive handler function
- * @param[in]	cbarg	receive handler arg
- */
-void
-bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
-{
-	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
-
-	ufm->ufrecv = ufrecv;
-	ufm->cbarg = cbarg;
-}
-
-/**
- * 		Free an unsolicited frame back to BFA.
- *
- * @param[in]		uf		unsolicited frame to be freed
- *
- * @return None
- */
-void
-bfa_uf_free(struct bfa_uf_s *uf)
-{
-	bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
-	bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
-}
-
-
-
-/**
- *  uf_pub BFA uf module public functions
- */
-
-void
-bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
-{
-	bfa_trc(bfa, msg->mhdr.msg_id);
-
-	switch (msg->mhdr.msg_id) {
-	case BFI_UF_I2H_FRM_RCVD:
-		uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
-		break;
-
-	default:
-		bfa_trc(bfa, msg->mhdr.msg_id);
-		bfa_assert(0);
-	}
-}
-
-

+ 0 - 47
drivers/scsi/bfa/bfa_uf_priv.h

@@ -1,47 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-#ifndef __BFA_UF_PRIV_H__
-#define __BFA_UF_PRIV_H__
-
-#include <cs/bfa_sm.h>
-#include <bfa_svc.h>
-#include <bfi/bfi_uf.h>
-
-#define BFA_UF_MIN	(4)
-
-struct bfa_uf_mod_s {
-	struct bfa_s *bfa;		/*  back pointer to BFA */
-	struct bfa_uf_s *uf_list;	/*  array of UFs */
-	u16	num_ufs;	/*  num unsolicited rx frames */
-	struct list_head 	uf_free_q;	/*  free UFs */
-	struct list_head 	uf_posted_q;	/*  UFs posted to IOC */
-	struct bfa_uf_buf_s *uf_pbs_kva;	/*  list UF bufs request pld */
-	u64	uf_pbs_pa;	/*  phy addr for UF bufs */
-	struct bfi_uf_buf_post_s *uf_buf_posts;
-					/*  pre-built UF post msgs */
-	bfa_cb_uf_recv_t ufrecv;	/*  uf recv handler function */
-	void		*cbarg;		/*  uf receive handler arg */
-};
-
-#define BFA_UF_MOD(__bfa)	(&(__bfa)->modules.uf_mod)
-
-#define ufm_pbs_pa(_ufmod, _uftag)	\
-	((_ufmod)->uf_pbs_pa + sizeof(struct bfa_uf_buf_s) * (_uftag))
-
-void	bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
-
-#endif /* __BFA_UF_PRIV_H__ */

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 420 - 308
drivers/scsi/bfa/bfad.c


+ 145 - 96
drivers/scsi/bfa/bfad_attr.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -19,15 +19,8 @@
  *  bfa_attr.c Linux driver configuration interface module.
  *  bfa_attr.c Linux driver configuration interface module.
  */
  */
 
 
-#include <linux/slab.h>
 #include "bfad_drv.h"
 #include "bfad_drv.h"
 #include "bfad_im.h"
 #include "bfad_im.h"
-#include "bfad_trcmod.h"
-#include "bfad_attr.h"
-
-/**
- *  FC_transport_template FC transport template
- */
 
 
 /**
 /**
  * FC transport template entry, get SCSI target port ID.
  * FC transport template entry, get SCSI target port ID.
@@ -42,7 +35,7 @@ bfad_im_get_starget_port_id(struct scsi_target *starget)
 	u32        fc_id = -1;
 	u32        fc_id = -1;
 	unsigned long   flags;
 	unsigned long   flags;
 
 
-	shost = bfad_os_starget_to_shost(starget);
+	shost = dev_to_shost(starget->dev.parent);
 	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
 	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
 	bfad = im_port->bfad;
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -68,7 +61,7 @@ bfad_im_get_starget_node_name(struct scsi_target *starget)
 	u64             node_name = 0;
 	u64             node_name = 0;
 	unsigned long   flags;
 	unsigned long   flags;
 
 
-	shost = bfad_os_starget_to_shost(starget);
+	shost = dev_to_shost(starget->dev.parent);
 	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
 	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
 	bfad = im_port->bfad;
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -94,7 +87,7 @@ bfad_im_get_starget_port_name(struct scsi_target *starget)
 	u64             port_name = 0;
 	u64             port_name = 0;
 	unsigned long   flags;
 	unsigned long   flags;
 
 
-	shost = bfad_os_starget_to_shost(starget);
+	shost = dev_to_shost(starget->dev.parent);
 	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
 	im_port = (struct bfad_im_port_s *) shost->hostdata[0];
 	bfad = im_port->bfad;
 	bfad = im_port->bfad;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
@@ -118,17 +111,7 @@ bfad_im_get_host_port_id(struct Scsi_Host *shost)
 	struct bfad_port_s    *port = im_port->port;
 	struct bfad_port_s    *port = im_port->port;
 
 
 	fc_host_port_id(shost) =
 	fc_host_port_id(shost) =
-			bfa_os_hton3b(bfa_fcs_port_get_fcid(port->fcs_port));
-}
-
-
-
-
-
-struct Scsi_Host *
-bfad_os_starget_to_shost(struct scsi_target *starget)
-{
-	return dev_to_shost(starget->dev.parent);
+			bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
 }
 }
 
 
 /**
 /**
@@ -140,21 +123,21 @@ bfad_im_get_host_port_type(struct Scsi_Host *shost)
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_pport_attr_s attr;
+	struct bfa_lport_attr_s port_attr;
 
 
-	bfa_fcport_get_attr(&bfad->bfa, &attr);
+	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
 
 
-	switch (attr.port_type) {
-	case BFA_PPORT_TYPE_NPORT:
+	switch (port_attr.port_type) {
+	case BFA_PORT_TYPE_NPORT:
 		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
 		fc_host_port_type(shost) = FC_PORTTYPE_NPORT;
 		break;
 		break;
-	case BFA_PPORT_TYPE_NLPORT:
+	case BFA_PORT_TYPE_NLPORT:
 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
 		fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
 		break;
 		break;
-	case BFA_PPORT_TYPE_P2P:
+	case BFA_PORT_TYPE_P2P:
 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
 		fc_host_port_type(shost) = FC_PORTTYPE_PTP;
 		break;
 		break;
-	case BFA_PPORT_TYPE_LPORT:
+	case BFA_PORT_TYPE_LPORT:
 		fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
 		fc_host_port_type(shost) = FC_PORTTYPE_LPORT;
 		break;
 		break;
 	default:
 	default:
@@ -172,25 +155,28 @@ bfad_im_get_host_port_state(struct Scsi_Host *shost)
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_pport_attr_s attr;
+	struct bfa_port_attr_s attr;
 
 
 	bfa_fcport_get_attr(&bfad->bfa, &attr);
 	bfa_fcport_get_attr(&bfad->bfa, &attr);
 
 
 	switch (attr.port_state) {
 	switch (attr.port_state) {
-	case BFA_PPORT_ST_LINKDOWN:
+	case BFA_PORT_ST_LINKDOWN:
 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
 		fc_host_port_state(shost) = FC_PORTSTATE_LINKDOWN;
 		break;
 		break;
-	case BFA_PPORT_ST_LINKUP:
+	case BFA_PORT_ST_LINKUP:
 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
 		fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
 		break;
 		break;
-	case BFA_PPORT_ST_UNINIT:
-	case BFA_PPORT_ST_ENABLING_QWAIT:
-	case BFA_PPORT_ST_ENABLING:
-	case BFA_PPORT_ST_DISABLING_QWAIT:
-	case BFA_PPORT_ST_DISABLING:
-	case BFA_PPORT_ST_DISABLED:
-	case BFA_PPORT_ST_STOPPED:
-	case BFA_PPORT_ST_IOCDOWN:
+	case BFA_PORT_ST_DISABLED:
+	case BFA_PORT_ST_STOPPED:
+	case BFA_PORT_ST_IOCDOWN:
+	case BFA_PORT_ST_IOCDIS:
+		fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+		break;
+	case BFA_PORT_ST_UNINIT:
+	case BFA_PORT_ST_ENABLING_QWAIT:
+	case BFA_PORT_ST_ENABLING:
+	case BFA_PORT_ST_DISABLING_QWAIT:
+	case BFA_PORT_ST_DISABLING:
 	default:
 	default:
 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
 		fc_host_port_state(shost) = FC_PORTSTATE_UNKNOWN;
 		break;
 		break;
@@ -210,13 +196,9 @@ bfad_im_get_host_active_fc4s(struct Scsi_Host *shost)
 	memset(fc_host_active_fc4s(shost), 0,
 	memset(fc_host_active_fc4s(shost), 0,
 	       sizeof(fc_host_active_fc4s(shost)));
 	       sizeof(fc_host_active_fc4s(shost)));
 
 
-	if (port->supported_fc4s &
-		(BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
+	if (port->supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
 		fc_host_active_fc4s(shost)[2] = 1;
 		fc_host_active_fc4s(shost)[2] = 1;
 
 
-	if (port->supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
-		fc_host_active_fc4s(shost)[3] = 0x20;
-
 	fc_host_active_fc4s(shost)[7] = 1;
 	fc_host_active_fc4s(shost)[7] = 1;
 }
 }
 
 
@@ -229,29 +211,29 @@ bfad_im_get_host_speed(struct Scsi_Host *shost)
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_pport_attr_s attr;
-	unsigned long   flags;
+	struct bfa_port_attr_s attr;
 
 
-	spin_lock_irqsave(shost->host_lock, flags);
 	bfa_fcport_get_attr(&bfad->bfa, &attr);
 	bfa_fcport_get_attr(&bfad->bfa, &attr);
 	switch (attr.speed) {
 	switch (attr.speed) {
-	case BFA_PPORT_SPEED_8GBPS:
+	case BFA_PORT_SPEED_10GBPS:
+		fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+		break;
+	case BFA_PORT_SPEED_8GBPS:
 		fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 		fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
 		break;
 		break;
-	case BFA_PPORT_SPEED_4GBPS:
+	case BFA_PORT_SPEED_4GBPS:
 		fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
 		fc_host_speed(shost) = FC_PORTSPEED_4GBIT;
 		break;
 		break;
-	case BFA_PPORT_SPEED_2GBPS:
+	case BFA_PORT_SPEED_2GBPS:
 		fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
 		fc_host_speed(shost) = FC_PORTSPEED_2GBIT;
 		break;
 		break;
-	case BFA_PPORT_SPEED_1GBPS:
+	case BFA_PORT_SPEED_1GBPS:
 		fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
 		fc_host_speed(shost) = FC_PORTSPEED_1GBIT;
 		break;
 		break;
 	default:
 	default:
 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 		break;
 		break;
 	}
 	}
-	spin_unlock_irqrestore(shost->host_lock, flags);
 }
 }
 
 
 /**
 /**
@@ -265,7 +247,7 @@ bfad_im_get_host_fabric_name(struct Scsi_Host *shost)
 	struct bfad_port_s    *port = im_port->port;
 	struct bfad_port_s    *port = im_port->port;
 	wwn_t           fabric_nwwn = 0;
 	wwn_t           fabric_nwwn = 0;
 
 
-	fabric_nwwn = bfa_fcs_port_get_fabric_name(port->fcs_port);
+	fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
 
 
 	fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
 	fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
 
 
@@ -281,23 +263,44 @@ bfad_im_get_stats(struct Scsi_Host *shost)
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_hal_comp fcomp;
 	struct bfad_hal_comp fcomp;
+	union bfa_port_stats_u *fcstats;
 	struct fc_host_statistics *hstats;
 	struct fc_host_statistics *hstats;
 	bfa_status_t    rc;
 	bfa_status_t    rc;
 	unsigned long   flags;
 	unsigned long   flags;
 
 
+	fcstats = kzalloc(sizeof(union bfa_port_stats_u), GFP_KERNEL);
+	if (fcstats == NULL)
+		return NULL;
+
 	hstats = &bfad->link_stats;
 	hstats = &bfad->link_stats;
 	init_completion(&fcomp.comp);
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	memset(hstats, 0, sizeof(struct fc_host_statistics));
 	memset(hstats, 0, sizeof(struct fc_host_statistics));
-	rc =  bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
-				     (union bfa_pport_stats_u *) hstats,
-				     bfad_hcb_comp, &fcomp);
+	rc = bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
+				fcstats, bfad_hcb_comp, &fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	if (rc != BFA_STATUS_OK)
 	if (rc != BFA_STATUS_OK)
 		return NULL;
 		return NULL;
 
 
 	wait_for_completion(&fcomp.comp);
 	wait_for_completion(&fcomp.comp);
 
 
+	/* Fill the fc_host_statistics structure */
+	hstats->seconds_since_last_reset = fcstats->fc.secs_reset;
+	hstats->tx_frames = fcstats->fc.tx_frames;
+	hstats->tx_words  = fcstats->fc.tx_words;
+	hstats->rx_frames = fcstats->fc.rx_frames;
+	hstats->rx_words  = fcstats->fc.rx_words;
+	hstats->lip_count = fcstats->fc.lip_count;
+	hstats->nos_count = fcstats->fc.nos_count;
+	hstats->error_frames = fcstats->fc.error_frames;
+	hstats->dumped_frames = fcstats->fc.dropped_frames;
+	hstats->link_failure_count = fcstats->fc.link_failures;
+	hstats->loss_of_sync_count = fcstats->fc.loss_of_syncs;
+	hstats->loss_of_signal_count = fcstats->fc.loss_of_signals;
+	hstats->prim_seq_protocol_err_count = fcstats->fc.primseq_errs;
+	hstats->invalid_crc_count = fcstats->fc.invalid_crcs;
+
+	kfree(fcstats);
 	return hstats;
 	return hstats;
 }
 }
 
 
@@ -317,7 +320,7 @@ bfad_im_reset_stats(struct Scsi_Host *shost)
 	init_completion(&fcomp.comp);
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
 	rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
-		&fcomp);
+					&fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 
 	if (rc != BFA_STATUS_OK)
 	if (rc != BFA_STATUS_OK)
@@ -372,8 +375,8 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 		(struct bfad_im_port_s *) shost->hostdata[0];
 		(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s *bfad = im_port->bfad;
 	struct bfad_s *bfad = im_port->bfad;
-	struct bfa_port_cfg_s port_cfg;
-	struct bfad_pcfg_s *pcfg;
+	struct bfa_lport_cfg_s port_cfg;
+	struct bfad_vport_s *vp;
 	int status = 0, rc;
 	int status = 0, rc;
 	unsigned long flags;
 	unsigned long flags;
 
 
@@ -382,12 +385,14 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
 	u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
 	u64_to_wwn(fc_vport->port_name, (u8 *)&port_cfg.pwwn);
 	if (strlen(vname) > 0)
 	if (strlen(vname) > 0)
 		strcpy((char *)&port_cfg.sym_name, vname);
 		strcpy((char *)&port_cfg.sym_name, vname);
-	port_cfg.roles = BFA_PORT_ROLE_FCP_IM;
+	port_cfg.roles = BFA_LPORT_ROLE_FCP_IM;
 
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	list_for_each_entry(pcfg, &bfad->pbc_pcfg_list, list_entry) {
-		if (port_cfg.pwwn == pcfg->port_cfg.pwwn) {
-			port_cfg.preboot_vp = pcfg->port_cfg.preboot_vp;
+	list_for_each_entry(vp, &bfad->pbc_vport_list, list_entry) {
+		if (port_cfg.pwwn ==
+				vp->fcs_vport.lport.port_cfg.pwwn) {
+			port_cfg.preboot_vp =
+				vp->fcs_vport.lport.port_cfg.preboot_vp;
 			break;
 			break;
 		}
 		}
 	}
 	}
@@ -638,7 +643,7 @@ bfad_im_serial_num_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
 	char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
 	char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
 
 
 	bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
 	bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
@@ -652,7 +657,7 @@ bfad_im_model_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
 	char model[BFA_ADAPTER_MODEL_NAME_LEN];
 	char model[BFA_ADAPTER_MODEL_NAME_LEN];
 
 
 	bfa_get_adapter_model(&bfad->bfa, model);
 	bfa_get_adapter_model(&bfad->bfa, model);
@@ -666,10 +671,54 @@ bfad_im_model_desc_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN];
 	char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
 	char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
 
 
-	bfa_get_adapter_model(&bfad->bfa, model_descr);
+	bfa_get_adapter_model(&bfad->bfa, model);
+	if (!strcmp(model, "Brocade-425"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 4Gbps PCIe dual port FC HBA");
+	else if (!strcmp(model, "Brocade-825"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 8Gbps PCIe dual port FC HBA");
+	else if (!strcmp(model, "Brocade-42B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"HP 4Gbps PCIe dual port FC HBA");
+	else if (!strcmp(model, "Brocade-82B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"HP 8Gbps PCIe dual port FC HBA");
+	else if (!strcmp(model, "Brocade-1010"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 10Gbps single port CNA");
+	else if (!strcmp(model, "Brocade-1020"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 10Gbps dual port CNA");
+	else if (!strcmp(model, "Brocade-1007"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 10Gbps CNA");
+	else if (!strcmp(model, "Brocade-415"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 4Gbps PCIe single port FC HBA");
+	else if (!strcmp(model, "Brocade-815"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 8Gbps PCIe single port FC HBA");
+	else if (!strcmp(model, "Brocade-41B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"HP 4Gbps PCIe single port FC HBA");
+	else if (!strcmp(model, "Brocade-81B"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"HP 8Gbps PCIe single port FC HBA");
+	else if (!strcmp(model, "Brocade-804"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"HP Bladesystem C-class 8Gbps FC HBA");
+	else if (!strcmp(model, "Brocade-902"))
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Brocade 10Gbps CNA");
+	else
+		snprintf(model_descr, BFA_ADAPTER_MODEL_DESCR_LEN,
+			"Invalid Model");
+
 	return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
 	return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
 }
 }
 
 
@@ -683,7 +732,7 @@ bfad_im_node_name_show(struct device *dev, struct device_attribute *attr,
 	struct bfad_port_s    *port = im_port->port;
 	struct bfad_port_s    *port = im_port->port;
 	u64        nwwn;
 	u64        nwwn;
 
 
-	nwwn = bfa_fcs_port_get_nwwn(port->fcs_port);
+	nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
 	return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
 	return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
 }
 }
 
 
@@ -694,14 +743,14 @@ bfad_im_symbolic_name_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
-	char model[BFA_ADAPTER_MODEL_NAME_LEN];
-	char fw_ver[BFA_VERSION_LEN];
+	struct bfad_s *bfad = im_port->bfad;
+	struct bfa_lport_attr_s port_attr;
+	char symname[BFA_SYMNAME_MAXLEN];
 
 
-	bfa_get_adapter_model(&bfad->bfa, model);
-	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
-	return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n",
-		model, fw_ver, BFAD_DRIVER_VERSION);
+	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+	strncpy(symname, port_attr.port_cfg.sym_name.symname,
+			BFA_SYMNAME_MAXLEN);
+	return snprintf(buf, PAGE_SIZE, "%s\n", symname);
 }
 }
 
 
 static ssize_t
 static ssize_t
@@ -711,7 +760,7 @@ bfad_im_hw_version_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
 	char hw_ver[BFA_VERSION_LEN];
 	char hw_ver[BFA_VERSION_LEN];
 
 
 	bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
 	bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
@@ -732,7 +781,7 @@ bfad_im_optionrom_version_show(struct device *dev,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
 	char optrom_ver[BFA_VERSION_LEN];
 	char optrom_ver[BFA_VERSION_LEN];
 
 
 	bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
 	bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
@@ -746,7 +795,7 @@ bfad_im_fw_version_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
 	char fw_ver[BFA_VERSION_LEN];
 	char fw_ver[BFA_VERSION_LEN];
 
 
 	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
 	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
@@ -760,10 +809,10 @@ bfad_im_num_of_ports_show(struct device *dev, struct device_attribute *attr,
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct Scsi_Host *shost = class_to_shost(dev);
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
 
 
 	return snprintf(buf, PAGE_SIZE, "%d\n",
 	return snprintf(buf, PAGE_SIZE, "%d\n",
-		bfa_get_nports(&bfad->bfa));
+			bfa_get_nports(&bfad->bfa));
 }
 }
 
 
 static ssize_t
 static ssize_t
@@ -788,10 +837,10 @@ bfad_im_num_of_discovered_ports_show(struct device *dev,
 
 
 	rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
 	rports = kzalloc(sizeof(wwn_t) * nrports , GFP_ATOMIC);
 	if (rports == NULL)
 	if (rports == NULL)
-		return -ENOMEM;
+		return snprintf(buf, PAGE_SIZE, "Failed\n");
 
 
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_fcs_port_get_rports(port->fcs_port, rports, &nrports);
+	bfa_fcs_lport_get_rports(port->fcs_port, rports, &nrports);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	kfree(rports);
 	kfree(rports);
 
 
@@ -837,19 +886,19 @@ struct device_attribute *bfad_im_host_attrs[] = {
 };
 };
 
 
 struct device_attribute *bfad_im_vport_attrs[] = {
 struct device_attribute *bfad_im_vport_attrs[] = {
-    &dev_attr_serial_number,
-    &dev_attr_model,
-    &dev_attr_model_description,
-    &dev_attr_node_name,
-    &dev_attr_symbolic_name,
-    &dev_attr_hardware_version,
-    &dev_attr_driver_version,
-    &dev_attr_option_rom_version,
-    &dev_attr_firmware_version,
-    &dev_attr_number_of_ports,
-    &dev_attr_driver_name,
-    &dev_attr_number_of_discovered_ports,
-    NULL,
+	&dev_attr_serial_number,
+	&dev_attr_model,
+	&dev_attr_model_description,
+	&dev_attr_node_name,
+	&dev_attr_symbolic_name,
+	&dev_attr_hardware_version,
+	&dev_attr_driver_version,
+	&dev_attr_option_rom_version,
+	&dev_attr_firmware_version,
+	&dev_attr_number_of_ports,
+	&dev_attr_driver_name,
+	&dev_attr_number_of_discovered_ports,
+	NULL,
 };
 };
 
 
 
 

+ 0 - 56
drivers/scsi/bfa/bfad_attr.h

@@ -1,56 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFAD_ATTR_H__
-#define __BFAD_ATTR_H__
-
-/**
- *  FC_transport_template FC transport template
- */
-
-struct Scsi_Host*
-bfad_os_dev_to_shost(struct scsi_target *starget);
-
-/**
- * FC transport template entry, get SCSI target port ID.
- */
-void
-bfad_im_get_starget_port_id(struct scsi_target *starget);
-
-/**
- * FC transport template entry, get SCSI target nwwn.
- */
-void
-bfad_im_get_starget_node_name(struct scsi_target *starget);
-
-/**
- * FC transport template entry, get SCSI target pwwn.
- */
-void
-bfad_im_get_starget_port_name(struct scsi_target *starget);
-
-/**
- * FC transport template entry, get SCSI host port ID.
- */
-void
-bfad_im_get_host_port_id(struct Scsi_Host *shost);
-
-struct Scsi_Host*
-bfad_os_starget_to_shost(struct scsi_target *starget);
-
-
-#endif /*  __BFAD_ATTR_H__ */

+ 5 - 5
drivers/scsi/bfa/bfad_debugfs.c

@@ -17,8 +17,8 @@
 
 
 #include <linux/debugfs.h>
 #include <linux/debugfs.h>
 
 
-#include <bfad_drv.h>
-#include <bfad_im.h>
+#include "bfad_drv.h"
+#include "bfad_im.h"
 
 
 /*
 /*
  * BFA debufs interface
  * BFA debufs interface
@@ -28,7 +28,7 @@
  * mount -t debugfs none /sys/kernel/debug
  * mount -t debugfs none /sys/kernel/debug
  *
  *
  * BFA Hierarchy:
  * BFA Hierarchy:
- * 	- bfa/host#
+ *	- bfa/host#
  * where the host number corresponds to the one under /sys/class/scsi_host/host#
  * where the host number corresponds to the one under /sys/class/scsi_host/host#
  *
  *
  * Debugging service available per host:
  * Debugging service available per host:
@@ -217,7 +217,7 @@ bfad_debugfs_read(struct file *file, char __user *buf,
 #define BFA_REG_ADDRSZ(__bfa)	\
 #define BFA_REG_ADDRSZ(__bfa)	\
 	((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ?	\
 	((bfa_ioc_devid(&(__bfa)->ioc) == BFA_PCI_DEVICE_ID_CT) ?	\
 		BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
 		BFA_REG_CT_ADDRSZ : BFA_REG_CB_ADDRSZ)
-#define BFA_REG_ADDRMSK(__bfa)  ((uint32_t)(BFA_REG_ADDRSZ(__bfa) - 1))
+#define BFA_REG_ADDRMSK(__bfa)  ((u32)(BFA_REG_ADDRSZ(__bfa) - 1))
 
 
 static bfa_status_t
 static bfa_status_t
 bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
 bfad_reg_offset_check(struct bfa_s *bfa, u32 offset, u32 len)
@@ -359,7 +359,7 @@ bfad_debugfs_write_regwr(struct file *file, const char __user *buf,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	reg_addr = (uint32_t *) ((uint8_t *) bfa_ioc_bar0(ioc) + addr);
+	reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	bfa_reg_write(reg_addr, val);
 	bfa_reg_write(reg_addr, val);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);

+ 151 - 103
drivers/scsi/bfa/bfad_drv.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -28,30 +28,27 @@
 
 
 #include "bfa_os_inc.h"
 #include "bfa_os_inc.h"
 
 
-#include <bfa.h>
-#include <bfa_svc.h>
-#include <fcs/bfa_fcs.h>
-#include <defs/bfa_defs_pci.h>
-#include <defs/bfa_defs_port.h>
-#include <defs/bfa_defs_rport.h>
-#include <fcs/bfa_fcs_rport.h>
-#include <defs/bfa_defs_vport.h>
-#include <fcs/bfa_fcs_vport.h>
-
-#include <cs/bfa_plog.h>
-#include "aen/bfa_aen.h"
-#include <log/bfa_log_linux.h>
-
-#define BFAD_DRIVER_NAME        "bfa"
+#include "bfa_modules.h"
+#include "bfa_fcs.h"
+#include "bfa_defs_fcs.h"
+
+#include "bfa_plog.h"
+#include "bfa_cs.h"
+
+#define BFAD_DRIVER_NAME	"bfa"
 #ifdef BFA_DRIVER_VERSION
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
 #else
-#define BFAD_DRIVER_VERSION    "2.2.2.1"
+#define BFAD_DRIVER_VERSION    "2.3.2.0"
 #endif
 #endif
 
 
-
+#define BFAD_PROTO_NAME FCPI_NAME
 #define BFAD_IRQ_FLAGS IRQF_SHARED
 #define BFAD_IRQ_FLAGS IRQF_SHARED
 
 
+#ifndef FC_PORTSPEED_8GBIT
+#define FC_PORTSPEED_8GBIT 0x10
+#endif
+
 /*
 /*
  * BFAD flags
  * BFAD flags
  */
  */
@@ -62,9 +59,9 @@
 #define BFAD_HAL_START_DONE			0x00000010
 #define BFAD_HAL_START_DONE			0x00000010
 #define BFAD_PORT_ONLINE			0x00000020
 #define BFAD_PORT_ONLINE			0x00000020
 #define BFAD_RPORT_ONLINE			0x00000040
 #define BFAD_RPORT_ONLINE			0x00000040
-#define BFAD_FCS_INIT_DONE                      0x00000080
-#define BFAD_HAL_INIT_FAIL                      0x00000100
-#define BFAD_FC4_PROBE_DONE                     0x00000200
+#define BFAD_FCS_INIT_DONE			0x00000080
+#define BFAD_HAL_INIT_FAIL			0x00000100
+#define BFAD_FC4_PROBE_DONE			0x00000200
 #define BFAD_PORT_DELETE			0x00000001
 #define BFAD_PORT_DELETE			0x00000001
 
 
 /*
 /*
@@ -77,8 +74,8 @@
 /*
 /*
  * BFAD configuration parameter default values
  * BFAD configuration parameter default values
  */
  */
-#define BFAD_LUN_QUEUE_DEPTH 		32
-#define BFAD_IO_MAX_SGE 		SG_ALL
+#define BFAD_LUN_QUEUE_DEPTH	32
+#define BFAD_IO_MAX_SGE		SG_ALL
 
 
 #define bfad_isr_t irq_handler_t
 #define bfad_isr_t irq_handler_t
 
 
@@ -87,6 +84,16 @@
 struct bfad_msix_s {
 struct bfad_msix_s {
 	struct bfad_s *bfad;
 	struct bfad_s *bfad;
 	struct msix_entry msix;
 	struct msix_entry msix;
+	char name[32];
+};
+
+/*
+ * Only append to the enums defined here to avoid any versioning
+ * needed between trace utility and driver version
+ */
+enum {
+	BFA_TRC_LDRV_BFAD		= 1,
+	BFA_TRC_LDRV_IM			= 2,
 };
 };
 
 
 enum bfad_port_pvb_type {
 enum bfad_port_pvb_type {
@@ -101,17 +108,13 @@ enum bfad_port_pvb_type {
  */
  */
 struct bfad_port_s {
 struct bfad_port_s {
 	struct list_head list_entry;
 	struct list_head list_entry;
-	struct bfad_s         *bfad;
-	struct bfa_fcs_port_s *fcs_port;
-	u32        roles;
-	s32         flags;
-	u32        supported_fc4s;
-	u8		ipfc_flags;
+	struct bfad_s	*bfad;
+	struct bfa_fcs_lport_s *fcs_port;
+	u32	roles;
+	s32		flags;
+	u32	supported_fc4s;
 	enum bfad_port_pvb_type pvb_type;
 	enum bfad_port_pvb_type pvb_type;
 	struct bfad_im_port_s *im_port;	/* IM specific data */
 	struct bfad_im_port_s *im_port;	/* IM specific data */
-	struct bfad_tm_port_s *tm_port;	/* TM specific data */
-	struct bfad_ipfc_port_s *ipfc_port;	/* IPFC specific data */
-
 	/* port debugfs specific data */
 	/* port debugfs specific data */
 	struct dentry *port_debugfs_root;
 	struct dentry *port_debugfs_root;
 };
 };
@@ -124,7 +127,6 @@ struct bfad_vport_s {
 	struct bfa_fcs_vport_s fcs_vport;
 	struct bfa_fcs_vport_s fcs_vport;
 	struct completion *comp_del;
 	struct completion *comp_del;
 	struct list_head list_entry;
 	struct list_head list_entry;
-	struct bfa_port_cfg_s port_cfg;
 };
 };
 
 
 /*
 /*
@@ -137,20 +139,35 @@ struct bfad_vf_s {
 };
 };
 
 
 struct bfad_cfg_param_s {
 struct bfad_cfg_param_s {
-	u32        rport_del_timeout;
-	u32        ioc_queue_depth;
-	u32        lun_queue_depth;
-	u32        io_max_sge;
-	u32        binding_method;
+	u32	rport_del_timeout;
+	u32	ioc_queue_depth;
+	u32	lun_queue_depth;
+	u32	io_max_sge;
+	u32	binding_method;
+};
+
+union bfad_tmp_buf {
+	/* From struct bfa_adapter_attr_s */
+	char		manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+	char		serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+	char		model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char		fw_ver[BFA_VERSION_LEN];
+	char		optrom_ver[BFA_VERSION_LEN];
+
+	/* From struct bfa_ioc_pci_attr_s */
+	u8		chip_rev[BFA_IOC_CHIP_REV_LEN];  /*  chip revision */
+
+	wwn_t		wwn[BFA_FCS_MAX_LPORTS];
 };
 };
 
 
 /*
 /*
  * BFAD (PCI function) data structure
  * BFAD (PCI function) data structure
  */
  */
 struct bfad_s {
 struct bfad_s {
+	bfa_sm_t	sm;	/* state machine */
 	struct list_head list_entry;
 	struct list_head list_entry;
-	struct bfa_s       bfa;
-	struct bfa_fcs_s       bfa_fcs;
+	struct bfa_s	bfa;
+	struct bfa_fcs_s bfa_fcs;
 	struct pci_dev *pcidev;
 	struct pci_dev *pcidev;
 	const char *pci_name;
 	const char *pci_name;
 	struct bfa_pcidev_s hal_pcidev;
 	struct bfa_pcidev_s hal_pcidev;
@@ -163,41 +180,41 @@ struct bfad_s {
 	struct bfad_port_s     pport;	/* physical port of the BFAD */
 	struct bfad_port_s     pport;	/* physical port of the BFAD */
 	struct bfa_meminfo_s meminfo;
 	struct bfa_meminfo_s meminfo;
 	struct bfa_iocfc_cfg_s   ioc_cfg;
 	struct bfa_iocfc_cfg_s   ioc_cfg;
-	u32        inst_no;	/* BFAD instance number */
-	u32        bfad_flags;
+	u32	inst_no;	/* BFAD instance number */
+	u32	bfad_flags;
 	spinlock_t      bfad_lock;
 	spinlock_t      bfad_lock;
 	struct task_struct *bfad_tsk;
 	struct task_struct *bfad_tsk;
 	struct bfad_cfg_param_s cfg_data;
 	struct bfad_cfg_param_s cfg_data;
 	struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
 	struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
-	int             nvec;
-	char            adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
-	char            port_name[BFA_ADAPTER_SYM_NAME_LEN];
+	int		nvec;
+	char	adapter_name[BFA_ADAPTER_SYM_NAME_LEN];
+	char	port_name[BFA_ADAPTER_SYM_NAME_LEN];
 	struct timer_list hal_tmo;
 	struct timer_list hal_tmo;
 	unsigned long   hs_start;
 	unsigned long   hs_start;
 	struct bfad_im_s *im;		/* IM specific data */
 	struct bfad_im_s *im;		/* IM specific data */
-	struct bfad_tm_s *tm;		/* TM specific data */
-	struct bfad_ipfc_s *ipfc;	/* IPFC specific data */
-	struct bfa_log_mod_s   log_data;
 	struct bfa_trc_mod_s  *trcmod;
 	struct bfa_trc_mod_s  *trcmod;
-	struct bfa_log_mod_s  *logmod;
-	struct bfa_aen_s      *aen;
-	struct bfa_aen_s       aen_buf;
-	void		*file_map[BFA_AEN_MAX_APP];
 	struct bfa_plog_s      plog_buf;
 	struct bfa_plog_s      plog_buf;
-	int             ref_count;
-	bfa_boolean_t	ipfc_enabled;
+	int		ref_count;
+	union bfad_tmp_buf tmp_buf;
 	struct fc_host_statistics link_stats;
 	struct fc_host_statistics link_stats;
-	struct list_head pbc_pcfg_list;
-	atomic_t wq_reqcnt;
+	struct list_head pbc_vport_list;
 	/* debugfs specific data */
 	/* debugfs specific data */
 	char *regdata;
 	char *regdata;
 	u32 reglen;
 	u32 reglen;
 	struct dentry *bfad_dentry_files[5];
 	struct dentry *bfad_dentry_files[5];
 };
 };
 
 
-struct bfad_pcfg_s {
-	struct list_head list_entry;
-	struct bfa_port_cfg_s port_cfg;
+/* BFAD state machine events */
+enum bfad_sm_event {
+	BFAD_E_CREATE			= 1,
+	BFAD_E_KTHREAD_CREATE_FAILED	= 2,
+	BFAD_E_INIT			= 3,
+	BFAD_E_INIT_SUCCESS		= 4,
+	BFAD_E_INIT_FAILED		= 5,
+	BFAD_E_INTR_INIT_FAILED		= 6,
+	BFAD_E_FCS_EXIT_COMP		= 7,
+	BFAD_E_EXIT_COMP		= 8,
+	BFAD_E_STOP			= 9
 };
 };
 
 
 /*
 /*
@@ -208,30 +225,30 @@ struct bfad_rport_s {
 };
 };
 
 
 struct bfad_buf_info {
 struct bfad_buf_info {
-	void           *virt;
+	void		*virt;
 	dma_addr_t      phys;
 	dma_addr_t      phys;
-	u32        size;
+	u32	size;
 };
 };
 
 
 struct bfad_fcxp {
 struct bfad_fcxp {
 	struct bfad_port_s    *port;
 	struct bfad_port_s    *port;
 	struct bfa_rport_s *bfa_rport;
 	struct bfa_rport_s *bfa_rport;
 	bfa_status_t    req_status;
 	bfa_status_t    req_status;
-	u16        tag;
-	u16        rsp_len;
-	u16        rsp_maxlen;
-	u8         use_ireqbuf;
-	u8         use_irspbuf;
-	u32        num_req_sgles;
-	u32        num_rsp_sgles;
-	struct fchs_s          fchs;
-	void           *reqbuf_info;
-	void           *rspbuf_info;
+	u16	tag;
+	u16	rsp_len;
+	u16	rsp_maxlen;
+	u8		use_ireqbuf;
+	u8		use_irspbuf;
+	u32	num_req_sgles;
+	u32	num_rsp_sgles;
+	struct fchs_s	fchs;
+	void		*reqbuf_info;
+	void		*rspbuf_info;
 	struct bfa_sge_s  *req_sge;
 	struct bfa_sge_s  *req_sge;
 	struct bfa_sge_s  *rsp_sge;
 	struct bfa_sge_s  *rsp_sge;
 	fcxp_send_cb_t  send_cbfn;
 	fcxp_send_cb_t  send_cbfn;
-	void           *send_cbarg;
-	void           *bfa_fcxp;
+	void		*send_cbarg;
+	void		*bfa_fcxp;
 	struct completion comp;
 	struct completion comp;
 };
 };
 
 
@@ -244,34 +261,48 @@ struct bfad_hal_comp {
  * Macro to obtain the immediate lower power
  * Macro to obtain the immediate lower power
  * of two for the integer.
  * of two for the integer.
  */
  */
-#define nextLowerInt(x)                         	\
-do {                                            	\
-	int j;                                  	\
-	(*x)--;    		                	\
-	for (j = 1; j < (sizeof(int) * 8); j <<= 1)     \
-		(*x) = (*x) | (*x) >> j;        	\
-	(*x)++;                  	        	\
-	(*x) = (*x) >> 1;                       	\
+#define nextLowerInt(x)                         \
+do {                                            \
+	int i;                                  \
+	(*x)--;					\
+	for (i = 1; i < (sizeof(int)*8); i <<= 1) \
+		(*x) = (*x) | (*x) >> i;	\
+	(*x)++;					\
+	(*x) = (*x) >> 1;			\
 } while (0)
 } while (0)
 
 
 
 
-bfa_status_t    bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
-			  struct bfa_port_cfg_s *port_cfg, struct device *dev);
-bfa_status_t    bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
-			       struct bfa_port_cfg_s *port_cfg);
-bfa_status_t    bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
-bfa_status_t    bfad_drv_init(struct bfad_s *bfad);
-bfa_status_t	bfad_start_ops(struct bfad_s *bfad);
-void            bfad_drv_start(struct bfad_s *bfad);
-void            bfad_uncfg_pport(struct bfad_s *bfad);
-void            bfad_drv_stop(struct bfad_s *bfad);
-void            bfad_remove_intr(struct bfad_s *bfad);
-void            bfad_hal_mem_release(struct bfad_s *bfad);
-void            bfad_hcb_comp(void *arg, bfa_status_t status);
-
-int             bfad_setup_intr(struct bfad_s *bfad);
-void            bfad_remove_intr(struct bfad_s *bfad);
+#define list_remove_head(list, entry, type, member)		\
+do {								\
+	entry = NULL;                                           \
+	if (!list_empty(list)) {                                \
+		entry = list_entry((list)->next, type, member);	\
+		list_del_init(&entry->member);			\
+	}							\
+} while (0)
 
 
+#define list_get_first(list, type, member)				\
+((list_empty(list)) ? NULL :						\
+	list_entry((list)->next, type, member))
+
+bfa_status_t	bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
+				  struct bfa_lport_cfg_s *port_cfg,
+				  struct device *dev);
+bfa_status_t	bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
+			       struct bfa_lport_cfg_s *port_cfg);
+bfa_status_t	bfad_cfg_pport(struct bfad_s *bfad, enum bfa_lport_role role);
+bfa_status_t	bfad_drv_init(struct bfad_s *bfad);
+bfa_status_t	bfad_start_ops(struct bfad_s *bfad);
+void		bfad_drv_start(struct bfad_s *bfad);
+void		bfad_uncfg_pport(struct bfad_s *bfad);
+void		bfad_stop(struct bfad_s *bfad);
+void		bfad_fcs_stop(struct bfad_s *bfad);
+void		bfad_remove_intr(struct bfad_s *bfad);
+void		bfad_hal_mem_release(struct bfad_s *bfad);
+void		bfad_hcb_comp(void *arg, bfa_status_t status);
+
+int		bfad_setup_intr(struct bfad_s *bfad);
+void		bfad_remove_intr(struct bfad_s *bfad);
 void		bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
 void		bfad_update_hal_cfg(struct bfa_iocfc_cfg_s *bfa_cfg);
 bfa_status_t	bfad_hal_mem_alloc(struct bfad_s *bfad);
 bfa_status_t	bfad_hal_mem_alloc(struct bfad_s *bfad);
 void		bfad_bfa_tmo(unsigned long data);
 void		bfad_bfa_tmo(unsigned long data);
@@ -280,9 +311,6 @@ int		bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
 void		bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
 void		bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
 void		bfad_fcs_port_cfg(struct bfad_s *bfad);
 void		bfad_fcs_port_cfg(struct bfad_s *bfad);
 void		bfad_drv_uninit(struct bfad_s *bfad);
 void		bfad_drv_uninit(struct bfad_s *bfad);
-void		bfad_drv_log_level_set(struct bfad_s *bfad);
-bfa_status_t	bfad_fc4_module_init(void);
-void		bfad_fc4_module_exit(void);
 int		bfad_worker(void *ptr);
 int		bfad_worker(void *ptr);
 void		bfad_debugfs_init(struct bfad_port_s *port);
 void		bfad_debugfs_init(struct bfad_port_s *port);
 void		bfad_debugfs_exit(struct bfad_port_s *port);
 void		bfad_debugfs_exit(struct bfad_port_s *port);
@@ -294,10 +322,30 @@ int bfad_os_get_linkup_delay(struct bfad_s *bfad);
 int bfad_install_msix_handler(struct bfad_s *bfad);
 int bfad_install_msix_handler(struct bfad_s *bfad);
 
 
 extern struct idr bfad_im_port_index;
 extern struct idr bfad_im_port_index;
+extern struct pci_device_id bfad_id_table[];
 extern struct list_head bfad_list;
 extern struct list_head bfad_list;
-extern int bfa_lun_queue_depth;
-extern int bfad_supported_fc4s;
-extern int bfa_linkup_delay;
+extern char	*os_name;
+extern char	*os_patch;
+extern char	*host_name;
+extern int	num_rports;
+extern int	num_ios;
+extern int	num_tms;
+extern int	num_fcxps;
+extern int	num_ufbufs;
+extern int	reqq_size;
+extern int	rspq_size;
+extern int	num_sgpgs;
+extern int      rport_del_timeout;
+extern int      bfa_lun_queue_depth;
+extern int      bfa_io_max_sge;
+extern int      log_level;
+extern int      ioc_auto_recover;
+extern int      bfa_linkup_delay;
+extern int      msix_disable_cb;
+extern int      msix_disable_ct;
+extern int      fdmi_enable;
+extern int      supported_fc4s;
+extern int	pcie_max_read_reqsz;
 extern int bfa_debugfs_enable;
 extern int bfa_debugfs_enable;
 extern struct mutex bfad_mutex;
 extern struct mutex bfad_mutex;
 
 

+ 0 - 131
drivers/scsi/bfa/bfad_fwimg.c

@@ -1,131 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  bfad_fwimg.c Linux driver PCI interface module.
- */
-#include <bfa_os_inc.h>
-#include <bfad_drv.h>
-#include <bfad_im_compat.h>
-#include <defs/bfa_defs_version.h>
-#include <linux/errno.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <asm/uaccess.h>
-#include <asm/fcntl.h>
-#include <linux/pci.h>
-#include <linux/firmware.h>
-#include <bfa_fwimg_priv.h>
-#include <bfa.h>
-
-u32 bfi_image_ct_fc_size;
-u32 bfi_image_ct_cna_size;
-u32 bfi_image_cb_fc_size;
-u32 *bfi_image_ct_fc;
-u32 *bfi_image_ct_cna;
-u32 *bfi_image_cb_fc;
-
-
-#define	BFAD_FW_FILE_CT_FC	"ctfw_fc.bin"
-#define	BFAD_FW_FILE_CT_CNA	"ctfw_cna.bin"
-#define	BFAD_FW_FILE_CB_FC	"cbfw_fc.bin"
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_FC);
-MODULE_FIRMWARE(BFAD_FW_FILE_CT_CNA);
-MODULE_FIRMWARE(BFAD_FW_FILE_CB_FC);
-
-u32 *
-bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
-			u32 *bfi_image_size, char *fw_name)
-{
-	const struct firmware *fw;
-
-	if (request_firmware(&fw, fw_name, &pdev->dev)) {
-		printk(KERN_ALERT "Can't locate firmware %s\n", fw_name);
-		goto error;
-	}
-
-	*bfi_image = vmalloc(fw->size);
-	if (NULL == *bfi_image) {
-		printk(KERN_ALERT "Fail to allocate buffer for fw image "
-			"size=%x!\n", (u32) fw->size);
-		goto error;
-	}
-
-	memcpy(*bfi_image, fw->data, fw->size);
-	*bfi_image_size = fw->size/sizeof(u32);
-
-	return *bfi_image;
-
-error:
-	return NULL;
-}
-
-u32 *
-bfad_get_firmware_buf(struct pci_dev *pdev)
-{
-	if (pdev->device == BFA_PCI_DEVICE_ID_CT_FC) {
-		if (bfi_image_ct_fc_size == 0)
-			bfad_read_firmware(pdev, &bfi_image_ct_fc,
-				&bfi_image_ct_fc_size, BFAD_FW_FILE_CT_FC);
-		return bfi_image_ct_fc;
-	} else if (pdev->device == BFA_PCI_DEVICE_ID_CT) {
-		if (bfi_image_ct_cna_size == 0)
-			bfad_read_firmware(pdev, &bfi_image_ct_cna,
-				&bfi_image_ct_cna_size, BFAD_FW_FILE_CT_CNA);
-		return bfi_image_ct_cna;
-	} else {
-		if (bfi_image_cb_fc_size == 0)
-			bfad_read_firmware(pdev, &bfi_image_cb_fc,
-				&bfi_image_cb_fc_size, BFAD_FW_FILE_CB_FC);
-		return bfi_image_cb_fc;
-	}
-}
-
-u32 *
-bfi_image_ct_fc_get_chunk(u32 off)
-{ return (u32 *)(bfi_image_ct_fc + off); }
-
-u32 *
-bfi_image_ct_cna_get_chunk(u32 off)
-{ return (u32 *)(bfi_image_ct_cna + off); }
-
-u32 *
-bfi_image_cb_fc_get_chunk(u32 off)
-{ return (u32 *)(bfi_image_cb_fc + off); }
-
-uint32_t *
-bfi_image_get_chunk(int type, uint32_t off)
-{
-	switch (type) {
-	case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_get_chunk(off); break;
-	case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_get_chunk(off); break;
-	case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_get_chunk(off); break;
-	default: return 0; break;
-	}
-}
-
-uint32_t
-bfi_image_get_size(int type)
-{
-	switch (type) {
-	case BFI_IMAGE_CT_FC: return bfi_image_ct_fc_size; break;
-	case BFI_IMAGE_CT_CNA: return bfi_image_ct_cna_size; break;
-	case BFI_IMAGE_CB_FC: return bfi_image_cb_fc_size; break;
-	default: return 0; break;
-	}
-}

+ 123 - 134
drivers/scsi/bfa/bfad_im.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -19,12 +19,10 @@
  *  bfad_im.c Linux driver IM module.
  *  bfad_im.c Linux driver IM module.
  */
  */
 
 
-#include <linux/slab.h>
 #include "bfad_drv.h"
 #include "bfad_drv.h"
 #include "bfad_im.h"
 #include "bfad_im.h"
-#include "bfad_trcmod.h"
-#include "bfa_cb_ioim_macros.h"
-#include <fcb/bfa_fcb_fcpim.h>
+#include "bfa_cb_ioim.h"
+#include "bfa_fcs.h"
 
 
 BFA_TRC_FILE(LDRV, IM);
 BFA_TRC_FILE(LDRV, IM);
 
 
@@ -33,8 +31,10 @@ struct scsi_transport_template *bfad_im_scsi_transport_template;
 struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
 struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
 static void bfad_im_itnim_work_handler(struct work_struct *work);
 static void bfad_im_itnim_work_handler(struct work_struct *work);
 static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
 static int bfad_im_queuecommand(struct scsi_cmnd *cmnd,
-		void (*done)(struct scsi_cmnd *));
+				void (*done)(struct scsi_cmnd *));
 static int bfad_im_slave_alloc(struct scsi_device *sdev);
 static int bfad_im_slave_alloc(struct scsi_device *sdev);
+static void bfad_im_fc_rport_add(struct bfad_im_port_s  *im_port,
+				struct bfad_itnim_s *itnim);
 
 
 void
 void
 bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
 bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
@@ -58,6 +58,7 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
 				sns_len = SCSI_SENSE_BUFFERSIZE;
 				sns_len = SCSI_SENSE_BUFFERSIZE;
 			memcpy(cmnd->sense_buffer, sns_info, sns_len);
 			memcpy(cmnd->sense_buffer, sns_info, sns_len);
 		}
 		}
+
 		if (residue > 0) {
 		if (residue > 0) {
 			bfa_trc(bfad, residue);
 			bfa_trc(bfad, residue);
 			scsi_set_resid(cmnd, residue);
 			scsi_set_resid(cmnd, residue);
@@ -76,7 +77,8 @@ bfa_cb_ioim_done(void *drv, struct bfad_ioim_s *dio,
 	case BFI_IOIM_STS_TIMEDOUT:
 	case BFI_IOIM_STS_TIMEDOUT:
 	case BFI_IOIM_STS_PATHTOV:
 	case BFI_IOIM_STS_PATHTOV:
 	default:
 	default:
-		cmnd->result = ScsiResult(DID_ERROR, 0);
+		host_status = DID_ERROR;
+		cmnd->result = ScsiResult(host_status, 0);
 	}
 	}
 
 
 	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
 	/* Unmap DMA, if host is NULL, it means a scsi passthru cmd */
@@ -162,11 +164,6 @@ bfa_cb_tskim_done(void *bfad, struct bfad_tskim_s *dtsk,
 		wake_up(wq);
 		wake_up(wq);
 }
 }
 
 
-void
-bfa_cb_ioim_resfree(void *drv)
-{
-}
-
 /**
 /**
  *  Scsi_Host_template SCSI host template
  *  Scsi_Host_template SCSI host template
  */
  */
@@ -179,15 +176,23 @@ bfad_im_info(struct Scsi_Host *shost)
 	static char     bfa_buf[256];
 	static char     bfa_buf[256];
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfad_s         *bfad = im_port->bfad;
+	struct bfad_s *bfad = im_port->bfad;
+	struct bfa_s *bfa = &bfad->bfa;
+	struct bfa_ioc_s *ioc = &bfa->ioc;
 	char model[BFA_ADAPTER_MODEL_NAME_LEN];
 	char model[BFA_ADAPTER_MODEL_NAME_LEN];
 
 
-	bfa_get_adapter_model(&bfad->bfa, model);
+	bfa_get_adapter_model(bfa, model);
 
 
 	memset(bfa_buf, 0, sizeof(bfa_buf));
 	memset(bfa_buf, 0, sizeof(bfa_buf));
-	snprintf(bfa_buf, sizeof(bfa_buf),
-		"Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
+	if (ioc->ctdev)
+		snprintf(bfa_buf, sizeof(bfa_buf),
+		"Brocade FCOE Adapter, " "model: %s hwpath: %s driver: %s",
+		 model, bfad->pci_name, BFAD_DRIVER_VERSION);
+	else
+		snprintf(bfa_buf, sizeof(bfa_buf),
+		"Brocade FC Adapter, " "model: %s hwpath: %s driver: %s",
 		model, bfad->pci_name, BFAD_DRIVER_VERSION);
 		model, bfad->pci_name, BFAD_DRIVER_VERSION);
+
 	return bfa_buf;
 	return bfa_buf;
 }
 }
 
 
@@ -221,9 +226,9 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
 	}
 	}
 
 
 	bfa_trc(bfad, hal_io->iotag);
 	bfa_trc(bfad, hal_io->iotag);
-	bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT,
+	BFA_LOG(KERN_INFO, bfad, log_level, "scsi%d: abort cmnd %p iotag %x\n",
 		im_port->shost->host_no, cmnd, hal_io->iotag);
 		im_port->shost->host_no, cmnd, hal_io->iotag);
-	bfa_ioim_abort(hal_io);
+	(void) bfa_ioim_abort(hal_io);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 
 	/* Need to wait until the command get aborted */
 	/* Need to wait until the command get aborted */
@@ -237,7 +242,8 @@ bfad_im_abort_handler(struct scsi_cmnd *cmnd)
 
 
 	cmnd->scsi_done(cmnd);
 	cmnd->scsi_done(cmnd);
 	bfa_trc(bfad, hal_io->iotag);
 	bfa_trc(bfad, hal_io->iotag);
-	bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_ABORT_COMP,
+	BFA_LOG(KERN_INFO, bfad, log_level,
+		"scsi%d: complete abort 0x%p iotag 0x%x\n",
 		im_port->shost->host_no, cmnd, hal_io->iotag);
 		im_port->shost->host_no, cmnd, hal_io->iotag);
 	return SUCCESS;
 	return SUCCESS;
 out:
 out:
@@ -255,8 +261,8 @@ bfad_im_target_reset_send(struct bfad_s *bfad, struct scsi_cmnd *cmnd,
 
 
 	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
 	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
 	if (!tskim) {
 	if (!tskim) {
-		BFA_DEV_PRINTF(bfad, BFA_ERR,
-			       "target reset, fail to allocate tskim\n");
+		BFA_LOG(KERN_ERR, bfad, log_level,
+			"target reset, fail to allocate tskim\n");
 		rc = BFA_STATUS_FAILED;
 		rc = BFA_STATUS_FAILED;
 		goto out;
 		goto out;
 	}
 	}
@@ -306,7 +312,7 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
 
 
 	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
 	tskim = bfa_tskim_alloc(&bfad->bfa, (struct bfad_tskim_s *) cmnd);
 	if (!tskim) {
 	if (!tskim) {
-		BFA_DEV_PRINTF(bfad, BFA_ERR,
+		BFA_LOG(KERN_ERR, bfad, log_level,
 				"LUN reset, fail to allocate tskim");
 				"LUN reset, fail to allocate tskim");
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 		rc = FAILED;
 		rc = FAILED;
@@ -331,8 +337,8 @@ bfad_im_reset_lun_handler(struct scsi_cmnd *cmnd)
 
 
 	task_status = cmnd->SCp.Status >> 1;
 	task_status = cmnd->SCp.Status >> 1;
 	if (task_status != BFI_TSKIM_STS_OK) {
 	if (task_status != BFI_TSKIM_STS_OK) {
-		BFA_DEV_PRINTF(bfad, BFA_ERR, "LUN reset failure, status: %d\n",
-			       task_status);
+		BFA_LOG(KERN_ERR, bfad, log_level,
+			"LUN reset failure, status: %d\n", task_status);
 		rc = FAILED;
 		rc = FAILED;
 	}
 	}
 
 
@@ -375,7 +381,7 @@ bfad_im_reset_bus_handler(struct scsi_cmnd *cmnd)
 
 
 			task_status = cmnd->SCp.Status >> 1;
 			task_status = cmnd->SCp.Status >> 1;
 			if (task_status != BFI_TSKIM_STS_OK) {
 			if (task_status != BFI_TSKIM_STS_OK) {
-				BFA_DEV_PRINTF(bfad, BFA_ERR,
+				BFA_LOG(KERN_ERR, bfad, log_level,
 					"target reset failure,"
 					"target reset failure,"
 					" status: %d\n", task_status);
 					" status: %d\n", task_status);
 				err_cnt++;
 				err_cnt++;
@@ -438,6 +444,7 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
 	wwn_t wwpn;
 	wwn_t wwpn;
 	u32 fcid;
 	u32 fcid;
 	char wwpn_str[32], fcid_str[16];
 	char wwpn_str[32], fcid_str[16];
+	struct bfad_im_s	*im = itnim_drv->im;
 
 
 	/* online to free state transtion should not happen */
 	/* online to free state transtion should not happen */
 	bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
 	bfa_assert(itnim_drv->state != ITNIM_STATE_ONLINE);
@@ -454,10 +461,14 @@ bfa_fcb_itnim_free(struct bfad_s *bfad, struct bfad_itnim_s *itnim_drv)
 	fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
 	fcid = bfa_fcs_itnim_get_fcid(&itnim_drv->fcs_itnim);
 	wwn2str(wwpn_str, wwpn);
 	wwn2str(wwpn_str, wwpn);
 	fcid2str(fcid_str, fcid);
 	fcid2str(fcid_str, fcid);
-	bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_FREE,
+	BFA_LOG(KERN_INFO, bfad, log_level,
+		"ITNIM FREE scsi%d: FCID: %s WWPN: %s\n",
 		port->im_port->shost->host_no,
 		port->im_port->shost->host_no,
 		fcid_str, wwpn_str);
 		fcid_str, wwpn_str);
-	bfad_os_itnim_process(itnim_drv);
+
+	/* ITNIM processing */
+	if (itnim_drv->queue_work)
+		queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 }
 
 
 /**
 /**
@@ -468,13 +479,17 @@ void
 bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
 bfa_fcb_itnim_online(struct bfad_itnim_s *itnim_drv)
 {
 {
 	struct bfad_port_s    *port;
 	struct bfad_port_s    *port;
+	struct bfad_im_s	*im = itnim_drv->im;
 
 
 	itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
 	itnim_drv->bfa_itnim = bfa_fcs_itnim_get_halitn(&itnim_drv->fcs_itnim);
 	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
 	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
 	itnim_drv->state = ITNIM_STATE_ONLINE;
 	itnim_drv->state = ITNIM_STATE_ONLINE;
 	itnim_drv->queue_work = 1;
 	itnim_drv->queue_work = 1;
 	itnim_drv->im_port = port->im_port;
 	itnim_drv->im_port = port->im_port;
-	bfad_os_itnim_process(itnim_drv);
+
+	/* ITNIM processing */
+	if (itnim_drv->queue_work)
+		queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 }
 
 
 /**
 /**
@@ -486,6 +501,7 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
 {
 {
 	struct bfad_port_s    *port;
 	struct bfad_port_s    *port;
 	struct bfad_s *bfad;
 	struct bfad_s *bfad;
+	struct bfad_im_s	*im = itnim_drv->im;
 
 
 	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
 	port = bfa_fcs_itnim_get_drvport(&itnim_drv->fcs_itnim);
 	bfad = port->bfad;
 	bfad = port->bfad;
@@ -497,16 +513,10 @@ bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv)
 	itnim_drv->im_port = port->im_port;
 	itnim_drv->im_port = port->im_port;
 	itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
 	itnim_drv->state = ITNIM_STATE_OFFLINE_PENDING;
 	itnim_drv->queue_work = 1;
 	itnim_drv->queue_work = 1;
-	bfad_os_itnim_process(itnim_drv);
-}
 
 
-/**
- * BFA FCS itnim timeout callback.
- * Context: Interrupt. bfad_lock is held
- */
-void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
-{
-	itnim->state = ITNIM_STATE_TIMEOUT;
+	/* ITNIM processing */
+	if (itnim_drv->queue_work)
+		queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 }
 
 
 /**
 /**
@@ -514,7 +524,7 @@ void bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim)
  */
  */
 int
 int
 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
 bfad_im_scsi_host_alloc(struct bfad_s *bfad, struct bfad_im_port_s *im_port,
-				struct device *dev)
+			struct device *dev)
 {
 {
 	int error = 1;
 	int error = 1;
 
 
@@ -580,7 +590,7 @@ void
 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
 bfad_im_scsi_host_free(struct bfad_s *bfad, struct bfad_im_port_s *im_port)
 {
 {
 	bfa_trc(bfad, bfad->inst_no);
 	bfa_trc(bfad, bfad->inst_no);
-	bfa_log(bfad->logmod, BFA_LOG_LINUX_SCSI_HOST_FREE,
+	BFA_LOG(KERN_INFO, bfad, log_level, "Free scsi%d\n",
 			im_port->shost->host_no);
 			im_port->shost->host_no);
 
 
 	fc_remove_host(im_port->shost);
 	fc_remove_host(im_port->shost);
@@ -598,14 +608,11 @@ bfad_im_port_delete_handler(struct work_struct *work)
 {
 {
 	struct bfad_im_port_s *im_port =
 	struct bfad_im_port_s *im_port =
 		container_of(work, struct bfad_im_port_s, port_delete_work);
 		container_of(work, struct bfad_im_port_s, port_delete_work);
-	struct bfad_s *bfad = im_port->bfad;
 
 
 	if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
 	if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
 		im_port->flags |= BFAD_PORT_DELETE;
 		im_port->flags |= BFAD_PORT_DELETE;
 		fc_vport_terminate(im_port->fc_vport);
 		fc_vport_terminate(im_port->fc_vport);
-		atomic_dec(&bfad->wq_reqcnt);
 	}
 	}
-
 }
 }
 
 
 bfa_status_t
 bfa_status_t
@@ -636,11 +643,8 @@ bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port)
 {
 {
 	struct bfad_im_port_s *im_port = port->im_port;
 	struct bfad_im_port_s *im_port = port->im_port;
 
 
-	if (im_port->port->pvb_type != BFAD_PORT_PHYS_BASE) {
-		atomic_inc(&bfad->wq_reqcnt);
-		queue_work(bfad->im->drv_workq,
+	queue_work(bfad->im->drv_workq,
 				&im_port->port_delete_work);
 				&im_port->port_delete_work);
-	}
 }
 }
 
 
 void
 void
@@ -663,16 +667,6 @@ bfad_im_port_clean(struct bfad_im_port_s *im_port)
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 }
 
 
-void
-bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port)
-{
-}
-
-void
-bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port)
-{
-}
-
 bfa_status_t
 bfa_status_t
 bfad_im_probe(struct bfad_s *bfad)
 bfad_im_probe(struct bfad_s *bfad)
 {
 {
@@ -701,27 +695,12 @@ void
 bfad_im_probe_undo(struct bfad_s *bfad)
 bfad_im_probe_undo(struct bfad_s *bfad)
 {
 {
 	if (bfad->im) {
 	if (bfad->im) {
-		while (atomic_read(&bfad->wq_reqcnt)) {
-			printk(KERN_INFO "bfa %s: waiting workq processing,"
-				" wq_reqcnt:%x\n", bfad->pci_name,
-				atomic_read(&bfad->wq_reqcnt));
-			schedule_timeout_uninterruptible(HZ);
-		}
 		bfad_os_destroy_workq(bfad->im);
 		bfad_os_destroy_workq(bfad->im);
 		kfree(bfad->im);
 		kfree(bfad->im);
 		bfad->im = NULL;
 		bfad->im = NULL;
 	}
 	}
 }
 }
 
 
-/**
- * Call back function to handle IO redirection state change
- */
-void
-bfa_cb_ioredirect_state_change(void *hcb_bfad, bfa_boolean_t ioredirect)
-{
-	/* Do nothing */
-}
-
 struct Scsi_Host *
 struct Scsi_Host *
 bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port, struct bfad_s *bfad)
 {
 {
@@ -751,6 +730,7 @@ void
 bfad_os_destroy_workq(struct bfad_im_s *im)
 bfad_os_destroy_workq(struct bfad_im_s *im)
 {
 {
 	if (im && im->drv_workq) {
 	if (im && im->drv_workq) {
+		flush_workqueue(im->drv_workq);
 		destroy_workqueue(im->drv_workq);
 		destroy_workqueue(im->drv_workq);
 		im->drv_workq = NULL;
 		im->drv_workq = NULL;
 	}
 	}
@@ -762,7 +742,7 @@ bfad_os_thread_workq(struct bfad_s *bfad)
 	struct bfad_im_s      *im = bfad->im;
 	struct bfad_im_s      *im = bfad->im;
 
 
 	bfa_trc(bfad, 0);
 	bfa_trc(bfad, 0);
-	snprintf(im->drv_workq_name, BFAD_KOBJ_NAME_LEN, "bfad_wq_%d",
+	snprintf(im->drv_workq_name, KOBJ_NAME_LEN, "bfad_wq_%d",
 		 bfad->inst_no);
 		 bfad->inst_no);
 	im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
 	im->drv_workq = create_singlethread_workqueue(im->drv_workq_name);
 	if (!im->drv_workq)
 	if (!im->drv_workq)
@@ -832,12 +812,6 @@ struct scsi_host_template bfad_im_vport_template = {
 	.max_sectors = 0xFFFF,
 	.max_sectors = 0xFFFF,
 };
 };
 
 
-void
-bfad_im_probe_post(struct bfad_im_s *im)
-{
-	flush_workqueue(im->drv_workq);
-}
-
 bfa_status_t
 bfa_status_t
 bfad_im_module_init(void)
 bfad_im_module_init(void)
 {
 {
@@ -861,19 +835,11 @@ bfad_im_module_exit(void)
 {
 {
 	if (bfad_im_scsi_transport_template)
 	if (bfad_im_scsi_transport_template)
 		fc_release_transport(bfad_im_scsi_transport_template);
 		fc_release_transport(bfad_im_scsi_transport_template);
+
 	if (bfad_im_scsi_vport_transport_template)
 	if (bfad_im_scsi_vport_transport_template)
 		fc_release_transport(bfad_im_scsi_vport_transport_template);
 		fc_release_transport(bfad_im_scsi_vport_transport_template);
 }
 }
 
 
-void
-bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv)
-{
-	struct bfad_im_s      *im = itnim_drv->im;
-
-	if (itnim_drv->queue_work)
-		queue_work(im->drv_workq, &itnim_drv->itnim_work);
-}
-
 void
 void
 bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 {
 {
@@ -916,9 +882,6 @@ bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev)
 	}
 	}
 }
 }
 
 
-
-
-
 struct bfad_itnim_s *
 struct bfad_itnim_s *
 bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
 bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id)
 {
 {
@@ -949,44 +912,64 @@ bfad_im_slave_alloc(struct scsi_device *sdev)
 	return 0;
 	return 0;
 }
 }
 
 
+static u32
+bfad_im_supported_speeds(struct bfa_s *bfa)
+{
+	struct bfa_ioc_attr_s ioc_attr;
+	u32 supported_speed = 0;
+
+	bfa_get_attr(bfa, &ioc_attr);
+	if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+		if (ioc_attr.adapter_attr.is_mezz) {
+			supported_speed |= FC_PORTSPEED_8GBIT |
+				FC_PORTSPEED_4GBIT |
+				FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
+		} else {
+			supported_speed |= FC_PORTSPEED_8GBIT |
+				FC_PORTSPEED_4GBIT |
+				FC_PORTSPEED_2GBIT;
+		}
+	} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
+		supported_speed |=  FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
+				FC_PORTSPEED_1GBIT;
+	} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
+		supported_speed |= FC_PORTSPEED_10GBIT;
+	}
+	return supported_speed;
+}
+
 void
 void
 bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
 bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
 {
 {
 	struct Scsi_Host *host = im_port->shost;
 	struct Scsi_Host *host = im_port->shost;
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_port_s    *port = im_port->port;
 	struct bfad_port_s    *port = im_port->port;
-	struct bfa_pport_attr_s pattr;
-	char model[BFA_ADAPTER_MODEL_NAME_LEN];
-	char fw_ver[BFA_VERSION_LEN];
+	struct bfa_port_attr_s pattr;
+	struct bfa_lport_attr_s port_attr;
+	char symname[BFA_SYMNAME_MAXLEN];
 
 
 	fc_host_node_name(host) =
 	fc_host_node_name(host) =
-		bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
+		bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
 	fc_host_port_name(host) =
 	fc_host_port_name(host) =
-		bfa_os_htonll((bfa_fcs_port_get_pwwn(port->fcs_port)));
+		bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
 	fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
 	fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
 
 
 	fc_host_supported_classes(host) = FC_COS_CLASS3;
 	fc_host_supported_classes(host) = FC_COS_CLASS3;
 
 
 	memset(fc_host_supported_fc4s(host), 0,
 	memset(fc_host_supported_fc4s(host), 0,
 	       sizeof(fc_host_supported_fc4s(host)));
 	       sizeof(fc_host_supported_fc4s(host)));
-	if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
+	if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
 		/* For FCP type 0x08 */
 		/* For FCP type 0x08 */
 		fc_host_supported_fc4s(host)[2] = 1;
 		fc_host_supported_fc4s(host)[2] = 1;
-	if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
-		/* For LLC/SNAP type 0x05 */
-		fc_host_supported_fc4s(host)[3] = 0x20;
 	/* For fibre channel services type 0x20 */
 	/* For fibre channel services type 0x20 */
 	fc_host_supported_fc4s(host)[7] = 1;
 	fc_host_supported_fc4s(host)[7] = 1;
 
 
-	bfa_get_adapter_model(&bfad->bfa, model);
-	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
-	sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
-		model, fw_ver, BFAD_DRIVER_VERSION);
+	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
+	strncpy(symname, port_attr.port_cfg.sym_name.symname,
+		BFA_SYMNAME_MAXLEN);
+	sprintf(fc_host_symbolic_name(host), "%s", symname);
 
 
-	fc_host_supported_speeds(host) = 0;
-	fc_host_supported_speeds(host) |=
-		FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
-		FC_PORTSPEED_1GBIT;
+	fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
 
 
 	bfa_fcport_get_attr(&bfad->bfa, &pattr);
 	bfa_fcport_get_attr(&bfad->bfa, &pattr);
 	fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
 	fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
@@ -1065,7 +1048,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
 			fcid2str(fcid_str, fcid);
 			fcid2str(fcid_str, fcid);
 			list_add_tail(&itnim->list_entry,
 			list_add_tail(&itnim->list_entry,
 				&im_port->itnim_mapped_list);
 				&im_port->itnim_mapped_list);
-			bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_ONLINE,
+			BFA_LOG(KERN_INFO, bfad, log_level,
+				"ITNIM ONLINE Target: %d:0:%d "
+				"FCID: %s WWPN: %s\n",
 				im_port->shost->host_no,
 				im_port->shost->host_no,
 				itnim->scsi_tgt_id,
 				itnim->scsi_tgt_id,
 				fcid_str, wwpn_str);
 				fcid_str, wwpn_str);
@@ -1096,7 +1081,9 @@ bfad_im_itnim_work_handler(struct work_struct *work)
 			wwn2str(wwpn_str, wwpn);
 			wwn2str(wwpn_str, wwpn);
 			fcid2str(fcid_str, fcid);
 			fcid2str(fcid_str, fcid);
 			list_del(&itnim->list_entry);
 			list_del(&itnim->list_entry);
-			bfa_log(bfad->logmod, BFA_LOG_LINUX_ITNIM_OFFLINE,
+			BFA_LOG(KERN_INFO, bfad, log_level,
+				"ITNIM OFFLINE Target: %d:0:%d "
+				"FCID: %s WWPN: %s\n",
 				im_port->shost->host_no,
 				im_port->shost->host_no,
 				itnim->scsi_tgt_id,
 				itnim->scsi_tgt_id,
 				fcid_str, wwpn_str);
 				fcid_str, wwpn_str);
@@ -1142,7 +1129,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	struct bfa_ioim_s *hal_io;
 	struct bfa_ioim_s *hal_io;
 	unsigned long   flags;
 	unsigned long   flags;
 	int             rc;
 	int             rc;
-	s16        sg_cnt = 0;
+	int       sg_cnt = 0;
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 	struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device));
 
 
 	rc = fc_remote_port_chkready(rport);
 	rc = fc_remote_port_chkready(rport);
@@ -1153,7 +1140,6 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 	}
 	}
 
 
 	sg_cnt = scsi_dma_map(cmnd);
 	sg_cnt = scsi_dma_map(cmnd);
-
 	if (sg_cnt < 0)
 	if (sg_cnt < 0)
 		return SCSI_MLQUEUE_HOST_BUSY;
 		return SCSI_MLQUEUE_HOST_BUSY;
 
 
@@ -1168,6 +1154,7 @@ bfad_im_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
 		goto out_fail_cmd;
 		goto out_fail_cmd;
 	}
 	}
 
 
+
 	itnim = itnim_data->itnim;
 	itnim = itnim_data->itnim;
 	if (!itnim) {
 	if (!itnim) {
 		cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
 		cmnd->result = ScsiResult(DID_IMM_RETRY, 0);
@@ -1206,47 +1193,49 @@ bfad_os_rport_online_wait(struct bfad_s *bfad)
 	int rport_delay = 10;
 	int rport_delay = 10;
 
 
 	for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
 	for (i = 0; !(bfad->bfad_flags & BFAD_PORT_ONLINE)
-		 && i < bfa_linkup_delay; i++)
-		schedule_timeout_uninterruptible(HZ);
+		&& i < bfa_linkup_delay; i++) {
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(HZ);
+	}
 
 
 	if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
 	if (bfad->bfad_flags & BFAD_PORT_ONLINE) {
 		rport_delay = rport_delay < bfa_linkup_delay ?
 		rport_delay = rport_delay < bfa_linkup_delay ?
-				 rport_delay : bfa_linkup_delay;
+			rport_delay : bfa_linkup_delay;
 		for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
 		for (i = 0; !(bfad->bfad_flags & BFAD_RPORT_ONLINE)
-			 && i < rport_delay; i++)
-			schedule_timeout_uninterruptible(HZ);
+			&& i < rport_delay; i++) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(HZ);
+		}
 
 
-		if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE))
-			schedule_timeout_uninterruptible(rport_delay * HZ);
+		if (rport_delay > 0 && (bfad->bfad_flags & BFAD_RPORT_ONLINE)) {
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(rport_delay * HZ);
+		}
 	}
 	}
 }
 }
 
 
 int
 int
 bfad_os_get_linkup_delay(struct bfad_s *bfad)
 bfad_os_get_linkup_delay(struct bfad_s *bfad)
 {
 {
-
-	u8      nwwns = 0;
-	wwn_t	wwns[BFA_PREBOOT_BOOTLUN_MAX];
-	int     ldelay;
+	u8		nwwns = 0;
+	wwn_t		wwns[BFA_PREBOOT_BOOTLUN_MAX];
+	int		linkup_delay;
 
 
 	/*
 	/*
 	 * Querying for the boot target port wwns
 	 * Querying for the boot target port wwns
 	 * -- read from boot information in flash.
 	 * -- read from boot information in flash.
-	 * If nwwns > 0 => boot over SAN and set bfa_linkup_delay = 30
-	 * else => local boot machine set bfa_linkup_delay = 10
+	 * If nwwns > 0 => boot over SAN and set linkup_delay = 30
+	 * else => local boot machine set linkup_delay = 0
 	 */
 	 */
 
 
 	bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
 	bfa_iocfc_get_bootwwns(&bfad->bfa, &nwwns, wwns);
 
 
-	if (nwwns > 0) {
-		/* If boot over SAN; linkup_delay = 30sec */
-		ldelay = 30;
-	} else {
-		/* If local boot; linkup_delay = 10sec */
-		ldelay = 0;
-	}
+	if (nwwns > 0)
+		/* If Boot over SAN set linkup_delay = 30sec */
+		linkup_delay = 30;
+	else
+		/* If local boot; no linkup_delay */
+		linkup_delay = 0;
 
 
-	return ldelay;
+	return linkup_delay;
 }
 }
-
-

+ 38 - 18
drivers/scsi/bfa/bfad_im.h

@@ -1,5 +1,5 @@
 /*
 /*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
  * All rights reserved
  * All rights reserved
  * www.brocade.com
  * www.brocade.com
  *
  *
@@ -18,20 +18,20 @@
 #ifndef __BFAD_IM_H__
 #ifndef __BFAD_IM_H__
 #define __BFAD_IM_H__
 #define __BFAD_IM_H__
 
 
-#include "fcs/bfa_fcs_fcpim.h"
-#include "bfad_im_compat.h"
+#include "bfa_fcs.h"
 
 
 #define FCPI_NAME " fcpim"
 #define FCPI_NAME " fcpim"
 
 
+#ifndef KOBJ_NAME_LEN
+#define KOBJ_NAME_LEN           20
+#endif
+
 bfa_status_t bfad_im_module_init(void);
 bfa_status_t bfad_im_module_init(void);
 void bfad_im_module_exit(void);
 void bfad_im_module_exit(void);
 bfa_status_t bfad_im_probe(struct bfad_s *bfad);
 bfa_status_t bfad_im_probe(struct bfad_s *bfad);
 void bfad_im_probe_undo(struct bfad_s *bfad);
 void bfad_im_probe_undo(struct bfad_s *bfad);
-void bfad_im_probe_post(struct bfad_im_s *im);
 bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
 bfa_status_t bfad_im_port_new(struct bfad_s *bfad, struct bfad_port_s *port);
 void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
 void bfad_im_port_delete(struct bfad_s *bfad, struct bfad_port_s *port);
-void bfad_im_port_online(struct bfad_s *bfad, struct bfad_port_s *port);
-void bfad_im_port_offline(struct bfad_s *bfad, struct bfad_port_s *port);
 void bfad_im_port_clean(struct bfad_im_port_s *im_port);
 void bfad_im_port_clean(struct bfad_im_port_s *im_port);
 int  bfad_im_scsi_host_alloc(struct bfad_s *bfad,
 int  bfad_im_scsi_host_alloc(struct bfad_s *bfad,
 		struct bfad_im_port_s *im_port, struct device *dev);
 		struct bfad_im_port_s *im_port, struct device *dev);
@@ -44,14 +44,10 @@ void bfad_im_scsi_host_free(struct bfad_s *bfad,
 #define BFAD_LUN_RESET_TMO 60
 #define BFAD_LUN_RESET_TMO 60
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define BFA_QUEUE_FULL_RAMP_UP_TIME 120
 #define BFA_QUEUE_FULL_RAMP_UP_TIME 120
-#define BFAD_KOBJ_NAME_LEN 20
 
 
 /*
 /*
  * itnim flags
  * itnim flags
  */
  */
-#define ITNIM_MAPPED		0x00000001
-
-#define SCSI_TASK_MGMT		0x00000001
 #define IO_DONE_BIT			0
 #define IO_DONE_BIT			0
 
 
 struct bfad_itnim_data_s {
 struct bfad_itnim_data_s {
@@ -64,7 +60,7 @@ struct bfad_im_port_s {
 	struct work_struct port_delete_work;
 	struct work_struct port_delete_work;
 	int             idr_id;
 	int             idr_id;
 	u16        cur_scsi_id;
 	u16        cur_scsi_id;
-	u16	   flags;
+	u16	flags;
 	struct list_head binding_list;
 	struct list_head binding_list;
 	struct Scsi_Host *shost;
 	struct Scsi_Host *shost;
 	struct list_head itnim_mapped_list;
 	struct list_head itnim_mapped_list;
@@ -118,14 +114,13 @@ struct bfad_fcp_binding {
 struct bfad_im_s {
 struct bfad_im_s {
 	struct bfad_s         *bfad;
 	struct bfad_s         *bfad;
 	struct workqueue_struct *drv_workq;
 	struct workqueue_struct *drv_workq;
-	char   drv_workq_name[BFAD_KOBJ_NAME_LEN];
+	char            drv_workq_name[KOBJ_NAME_LEN];
 };
 };
 
 
 struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
 struct Scsi_Host *bfad_os_scsi_host_alloc(struct bfad_im_port_s *im_port,
 				struct bfad_s *);
 				struct bfad_s *);
 bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
 bfa_status_t bfad_os_thread_workq(struct bfad_s *bfad);
 void bfad_os_destroy_workq(struct bfad_im_s *im);
 void bfad_os_destroy_workq(struct bfad_im_s *im);
-void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
 void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
 void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
 void bfad_os_scsi_host_free(struct bfad_s *bfad,
 void bfad_os_scsi_host_free(struct bfad_s *bfad,
 				 struct bfad_im_port_s *im_port);
 				 struct bfad_im_port_s *im_port);
@@ -133,11 +128,6 @@ void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
 				 struct scsi_device *sdev);
 				 struct scsi_device *sdev);
 void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
 void bfad_os_handle_qfull(struct bfad_itnim_s *itnim, struct scsi_device *sdev);
 struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
 struct bfad_itnim_s *bfad_os_get_itnim(struct bfad_im_port_s *im_port, int id);
-int bfad_os_scsi_add_host(struct Scsi_Host *shost,
-		struct bfad_im_port_s *im_port, struct bfad_s *bfad);
-
-void bfad_im_itnim_unmap(struct bfad_im_port_s  *im_port,
-			 struct bfad_itnim_s *itnim);
 
 
 extern struct scsi_host_template bfad_im_scsi_host_template;
 extern struct scsi_host_template bfad_im_scsi_host_template;
 extern struct scsi_host_template bfad_im_vport_template;
 extern struct scsi_host_template bfad_im_vport_template;
@@ -146,4 +136,34 @@ extern struct fc_function_template bfad_im_vport_fc_function_template;
 extern struct scsi_transport_template *bfad_im_scsi_transport_template;
 extern struct scsi_transport_template *bfad_im_scsi_transport_template;
 extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
 extern struct scsi_transport_template *bfad_im_scsi_vport_transport_template;
 
 
+extern struct device_attribute *bfad_im_host_attrs[];
+extern struct device_attribute *bfad_im_vport_attrs[];
+
+irqreturn_t bfad_intx(int irq, void *dev_id);
+
+/* Firmware releated */
+#define BFAD_FW_FILE_CT_FC      "ctfw_fc.bin"
+#define BFAD_FW_FILE_CT_CNA     "ctfw_cna.bin"
+#define BFAD_FW_FILE_CB_FC      "cbfw_fc.bin"
+
+u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
+u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
+		u32 *bfi_image_size, char *fw_name);
+
+static inline u32 *
+bfad_load_fwimg(struct pci_dev *pdev)
+{
+	return bfad_get_firmware_buf(pdev);
+}
+
+static inline void
+bfad_free_fwimg(void)
+{
+	if (bfi_image_ct_fc_size && bfi_image_ct_fc)
+		vfree(bfi_image_ct_fc);
+	if (bfi_image_ct_cna_size && bfi_image_ct_cna)
+		vfree(bfi_image_ct_cna);
+	if (bfi_image_cb_fc_size && bfi_image_cb_fc)
+		vfree(bfi_image_cb_fc);
+}
 #endif
 #endif

+ 0 - 45
drivers/scsi/bfa/bfad_im_compat.h

@@ -1,45 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFAD_IM_COMPAT_H__
-#define __BFAD_IM_COMPAT_H__
-
-extern struct device_attribute *bfad_im_host_attrs[];
-extern struct device_attribute *bfad_im_vport_attrs[];
-
-u32 *bfad_get_firmware_buf(struct pci_dev *pdev);
-u32 *bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
-			u32 *bfi_image_size, char *fw_name);
-
-static inline u32 *
-bfad_load_fwimg(struct pci_dev *pdev)
-{
-	return bfad_get_firmware_buf(pdev);
-}
-
-static inline void
-bfad_free_fwimg(void)
-{
-	if (bfi_image_ct_fc_size && bfi_image_ct_fc)
-		vfree(bfi_image_ct_fc);
-	if (bfi_image_ct_cna_size && bfi_image_ct_cna)
-		vfree(bfi_image_ct_cna);
-	if (bfi_image_cb_fc_size && bfi_image_cb_fc)
-		vfree(bfi_image_cb_fc);
-}
-
-#endif

+ 0 - 222
drivers/scsi/bfa/bfad_intr.c

@@ -1,222 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#include "bfad_drv.h"
-#include "bfad_trcmod.h"
-
-BFA_TRC_FILE(LDRV, INTR);
-
-/**
- *  bfa_isr BFA driver interrupt functions
- */
-static int msix_disable_cb;
-static int msix_disable_ct;
-module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(msix_disable_cb, "Disable MSIX for Brocade-415/425/815/825"
-		" cards, default=0, Range[false:0|true:1]");
-module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(msix_disable_ct, "Disable MSIX for Brocade-1010/1020/804"
-		" cards, default=0, Range[false:0|true:1]");
-/**
- * Line based interrupt handler.
- */
-static irqreturn_t
-bfad_intx(int irq, void *dev_id)
-{
-	struct bfad_s         *bfad = dev_id;
-	struct list_head         doneq;
-	unsigned long   flags;
-	bfa_boolean_t rc;
-
-	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_intx(&bfad->bfa);
-	if (!rc) {
-		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		return IRQ_NONE;
-	}
-
-	bfa_comp_deq(&bfad->bfa, &doneq);
-	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
-	if (!list_empty(&doneq)) {
-		bfa_comp_process(&bfad->bfa, &doneq);
-
-		spin_lock_irqsave(&bfad->bfad_lock, flags);
-		bfa_comp_free(&bfad->bfa, &doneq);
-		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-		bfa_trc_fp(bfad, irq);
-	}
-
-	return IRQ_HANDLED;
-
-}
-
-static irqreturn_t
-bfad_msix(int irq, void *dev_id)
-{
-	struct bfad_msix_s *vec = dev_id;
-	struct bfad_s *bfad = vec->bfad;
-	struct list_head doneq;
-	unsigned long   flags;
-
-	spin_lock_irqsave(&bfad->bfad_lock, flags);
-
-	bfa_msix(&bfad->bfa, vec->msix.entry);
-	bfa_comp_deq(&bfad->bfa, &doneq);
-	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-
-	if (!list_empty(&doneq)) {
-		bfa_comp_process(&bfad->bfa, &doneq);
-
-		spin_lock_irqsave(&bfad->bfad_lock, flags);
-		bfa_comp_free(&bfad->bfa, &doneq);
-		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
-	}
-
-	return IRQ_HANDLED;
-}
-
-/**
- * Initialize the MSIX entry table.
- */
-static void
-bfad_init_msix_entry(struct bfad_s *bfad, struct msix_entry *msix_entries,
-			 int mask, int max_bit)
-{
-	int             i;
-	int             match = 0x00000001;
-
-	for (i = 0, bfad->nvec = 0; i < MAX_MSIX_ENTRY; i++) {
-		if (mask & match) {
-			bfad->msix_tab[bfad->nvec].msix.entry = i;
-			bfad->msix_tab[bfad->nvec].bfad = bfad;
-			msix_entries[bfad->nvec].entry = i;
-			bfad->nvec++;
-		}
-
-		match <<= 1;
-	}
-
-}
-
-int
-bfad_install_msix_handler(struct bfad_s *bfad)
-{
-	int             i, error = 0;
-
-	for (i = 0; i < bfad->nvec; i++) {
-		error = request_irq(bfad->msix_tab[i].msix.vector,
-				    (irq_handler_t) bfad_msix, 0,
-				    BFAD_DRIVER_NAME, &bfad->msix_tab[i]);
-		bfa_trc(bfad, i);
-		bfa_trc(bfad, bfad->msix_tab[i].msix.vector);
-		if (error) {
-			int             j;
-
-			for (j = 0; j < i; j++)
-				free_irq(bfad->msix_tab[j].msix.vector,
-						&bfad->msix_tab[j]);
-
-			return 1;
-		}
-	}
-
-	return 0;
-}
-
-/**
- * Setup MSIX based interrupt.
- */
-int
-bfad_setup_intr(struct bfad_s *bfad)
-{
-	int error = 0;
-	u32 mask = 0, i, num_bit = 0, max_bit = 0;
-	struct msix_entry msix_entries[MAX_MSIX_ENTRY];
-	struct pci_dev *pdev = bfad->pcidev;
-
-	/* Call BFA to get the msix map for this PCI function.  */
-	bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
-
-	/* Set up the msix entry table */
-	bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
-
-	if ((bfa_asic_id_ct(pdev->device) && !msix_disable_ct) ||
-		(!bfa_asic_id_ct(pdev->device) && !msix_disable_cb)) {
-
-		error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
-		if (error) {
-			/*
-			 * Only error number of vector is available.
-			 * We don't have a mechanism to map multiple
-			 * interrupts into one vector, so even if we
-			 * can try to request less vectors, we don't
-			 * know how to associate interrupt events to
-			 *  vectors. Linux doesn't dupicate vectors
-			 * in the MSIX table for this case.
-			 */
-
-			printk(KERN_WARNING "bfad%d: "
-				"pci_enable_msix failed (%d),"
-				" use line based.\n", bfad->inst_no, error);
-
-			goto line_based;
-		}
-
-		/* Save the vectors */
-		for (i = 0; i < bfad->nvec; i++) {
-			bfa_trc(bfad, msix_entries[i].vector);
-			bfad->msix_tab[i].msix.vector = msix_entries[i].vector;
-		}
-
-		bfa_msix_init(&bfad->bfa, bfad->nvec);
-
-		bfad->bfad_flags |= BFAD_MSIX_ON;
-
-		return error;
-	}
-
-line_based:
-	error = 0;
-	if (request_irq
-	    (bfad->pcidev->irq, (irq_handler_t) bfad_intx, BFAD_IRQ_FLAGS,
-	     BFAD_DRIVER_NAME, bfad) != 0) {
-		/* Enable interrupt handler failed */
-		return 1;
-	}
-
-	return error;
-}
-
-void
-bfad_remove_intr(struct bfad_s *bfad)
-{
-	int             i;
-
-	if (bfad->bfad_flags & BFAD_MSIX_ON) {
-		for (i = 0; i < bfad->nvec; i++)
-			free_irq(bfad->msix_tab[i].msix.vector,
-					&bfad->msix_tab[i]);
-
-		pci_disable_msix(bfad->pcidev);
-		bfad->bfad_flags &= ~BFAD_MSIX_ON;
-	} else {
-		free_irq(bfad->pcidev->irq, bfad);
-	}
-}
-
-

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно